Compare commits

..

2 Commits

Author SHA1 Message Date
Colin Young
46abb06865 [Codex][Codex CLI] refine auth routing observability follow-up
Use the provider header map when deriving residency telemetry, remove the conversation-start base URL override fallback, and keep request-route telemetry owned by the actual request path.

Co-authored-by: Codex <noreply@openai.com>
2026-03-17 15:20:29 -07:00
Colin Young
d02630eccc [Codex][Codex CLI] Add auth routing observability follow-up
Checkpoint the PR2 remainder before rebasing onto main.

Co-authored-by: Codex <noreply@openai.com>
2026-03-16 15:57:11 -07:00
4312 changed files with 338058 additions and 588167 deletions

148
.bazelrc
View File

@@ -20,6 +20,9 @@ common:windows --host_platform=//:local_windows
common --@rules_cc//cc/toolchains/args/archiver_flags:use_libtool_on_macos=False
common --@llvm//config:experimental_stub_libgcc_s
# We need to use the sh toolchain on windows so we don't send host bash paths to the linux executor.
common:windows --@rules_rust//rust/settings:experimental_use_sh_toolchain_for_bootstrap_process_wrapper
# TODO(zbarsky): rules_rust doesn't implement this flag properly with remote exec...
# common --@rules_rust//rust/settings:pipelined_compilation
@@ -29,13 +32,10 @@ common:linux --test_env=PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
common:macos --test_env=PATH=/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
# Pass through some env vars Windows needs to use powershell?
common:windows --test_env=PATH
common:windows --test_env=SYSTEMROOT
common:windows --test_env=COMSPEC
common:windows --test_env=WINDIR
# Rust's libtest harness runs test bodies on std-spawned threads. The default
# 2 MiB stack can be too small for large async test futures on Windows CI; see
# https://github.com/openai/codex/pull/19067 for the motivating failure.
common --test_env=RUST_MIN_STACK=8388608 # 8 MiB
common --test_output=errors
common --bes_results_url=https://app.buildbuddy.io/invocation/
@@ -47,7 +47,6 @@ common --remote_timeout=3600
common --noexperimental_throttle_remote_action_building
common --experimental_remote_execution_keepalive
common --grpc_keepalive_time=30s
common --experimental_remote_downloader=grpcs://remote.buildbuddy.io
# This limits both in-flight executions and concurrent downloads. Even with high number
# of jobs execution will still be limited by CPU cores, so this just pays a bit of
@@ -61,142 +60,3 @@ common:remote --jobs=800
# Enable pipelined compilation since we are not bound by local CPU count.
#common:remote --@rules_rust//rust/settings:pipelined_compilation
# GitHub Actions CI configs.
common:ci --remote_download_minimal
common:ci --keep_going
common:ci --verbose_failures
common:ci --build_metadata=REPO_URL=https://github.com/openai/codex.git
common:ci --build_metadata=ROLE=CI
common:ci --build_metadata=VISIBILITY=PUBLIC
# rules_rust derives debug level from Bazel toolchain/compilation-mode settings,
# not Cargo profiles. Keep CI Rust actions explicit and lean.
common:ci --@rules_rust//rust/settings:extra_rustc_flag=-Cdebuginfo=0
common:ci --@rules_rust//rust/settings:extra_exec_rustc_flag=-Cdebuginfo=0
# Disable disk cache in CI since we have a remote one and aren't using persistent workers.
common:ci --disk_cache=
# Shared config for the main Bazel CI workflow.
common:ci-bazel --config=ci
common:ci-bazel --build_metadata=TAG_workflow=bazel
# Bazel CI cross-compiles in several legs, and the V8-backed code-mode tests
# are not stable in that setup yet. Keep running the rest of the Rust
# integration suites through the workspace-root launcher.
common:ci-bazel --test_env=CODEX_BAZEL_TEST_SKIP_FILTERS=suite::code_mode::
# Shared config for Bazel-backed Rust linting.
build:clippy --aspects=@rules_rust//rust:defs.bzl%rust_clippy_aspect
build:clippy --output_groups=+clippy_checks
build:clippy --@rules_rust//rust/settings:clippy.toml=//codex-rs:clippy.toml
# Keep this deny-list in sync with `codex-rs/Cargo.toml` `[workspace.lints.clippy]`.
# Cargo applies those lint levels to member crates that opt into `[lints] workspace = true`
# in their own `Cargo.toml`, but `rules_rust` Bazel clippy does not read Cargo lint levels.
# `clippy.toml` can configure lint behavior, but it cannot set allow/warn/deny/forbid levels.
build:clippy --@rules_rust//rust/settings:clippy_flag=-Dwarnings
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::await_holding_invalid_type
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::await_holding_lock
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::expect_used
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::identity_op
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_clamp
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_filter
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_find
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_flatten
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_map
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_memcpy
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_non_exhaustive
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_ok_or
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_range_contains
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_retain
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_strip
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_try_fold
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_unwrap_or
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_borrow
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_borrowed_reference
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_collect
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_late_init
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_option_as_deref
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_question_mark
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_update
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::redundant_clone
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::redundant_closure
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::redundant_closure_for_method_calls
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::redundant_static_lifetimes
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::trivially_copy_pass_by_ref
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::uninlined_format_args
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::unnecessary_filter_map
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::unnecessary_lazy_evaluations
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::unnecessary_sort_by
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::unnecessary_to_owned
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::unwrap_used
# Shared config for Bazel-backed argument-comment-lint.
build:argument-comment-lint --aspects=//tools/argument-comment-lint:lint_aspect.bzl%rust_argument_comment_lint_aspect
build:argument-comment-lint --output_groups=argument_comment_lint_checks
build:argument-comment-lint --@rules_rust//rust/toolchain/channel=nightly
# Rearrange caches on Windows so they're on the same volume as the checkout.
common:ci-windows --config=ci-bazel
common:ci-windows --build_metadata=TAG_os=windows
common:ci-windows --repo_contents_cache=D:/a/.cache/bazel-repo-contents-cache
# We prefer to run the build actions entirely remotely so we can dial up the concurrency.
# We have platform-specific tests, so we want to execute the tests on all platforms using the strongest sandboxing available on each platform.
# On linux, we can do a full remote build/test, by targeting the right (x86/arm) runners, so we have coverage of both.
# Linux crossbuilds don't work until we untangle the libc constraint mess.
common:ci-linux --config=ci-bazel
common:ci-linux --build_metadata=TAG_os=linux
common:ci-linux --config=remote
common:ci-linux --strategy=remote
common:ci-linux --platforms=//:rbe
# On mac, we can run all the build actions remotely but test actions locally.
common:ci-macos --config=ci-bazel
common:ci-macos --build_metadata=TAG_os=macos
common:ci-macos --config=remote
common:ci-macos --strategy=remote
common:ci-macos --strategy=TestRunner=darwin-sandbox,local
# On Windows, use Linux remote execution for build actions but keep test actions
# on the Windows runner so Bazel's normal test sharding and flaky-test retries
# still run against Windows binaries.
common:ci-windows-cross --config=ci-windows
common:ci-windows-cross --build_metadata=TAG_windows_cross_compile=true
common:ci-windows-cross --config=remote
common:ci-windows-cross --host_platform=//:rbe
common:ci-windows-cross --strategy=remote
common:ci-windows-cross --strategy=TestRunner=local
common:ci-windows-cross --local_test_jobs=4
common:ci-windows-cross --test_env=RUST_TEST_THREADS=1
# Native Windows CI still covers the PowerShell tests. The cross-built gnullvm
# binaries currently hang in PowerShell AST parser tests when those binaries are
# run on the Windows runner.
common:ci-windows-cross --test_env=CODEX_BAZEL_TEST_SKIP_FILTERS=suite::code_mode::,powershell
common:ci-windows-cross --platforms=//:windows_x86_64_gnullvm
common:ci-windows-cross --extra_execution_platforms=//:rbe,//:windows_x86_64_msvc
common:ci-windows-cross --extra_toolchains=//:windows_gnullvm_tests_on_msvc_host_toolchain
# Linux-only V8 CI config.
common:ci-v8 --config=ci
common:ci-v8 --build_metadata=TAG_workflow=v8
common:ci-v8 --build_metadata=TAG_os=linux
common:ci-v8 --config=remote
common:ci-v8 --strategy=remote
# Source-built Bazel V8 artifacts use the in-process sandbox by default. This
# does not affect Cargo's default prebuilt rusty_v8 path.
common --@v8//:v8_enable_pointer_compression=True
common --@v8//:v8_enable_sandbox=True
# Keep currently published rusty_v8 release artifacts non-sandboxed until the
# artifact migration ships matching Rust feature selection for Cargo consumers.
common:v8-release-compat --@v8//:v8_enable_pointer_compression=False
common:v8-release-compat --@v8//:v8_enable_sandbox=False
# Match rusty_v8's upstream GN release contract for published artifacts: every
# target object uses Chromium's custom libc++ headers and the archive folds in
# the matching runtime objects.
common:rusty-v8-upstream-libcxx --@v8//:v8_use_rusty_v8_custom_libcxx=True
# Optional per-user local overrides.
try-import %workspace%/user.bazelrc

View File

@@ -1,6 +1,5 @@
iTerm
iTerm2
psuedo
SOM
te
TE

View File

@@ -1,6 +1,6 @@
[codespell]
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new,*meriyah.umd.min.js
check-hidden = true
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm,te,TE,PASE,SEH
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm,te,TE

View File

@@ -1,11 +0,0 @@
# THIS IS AUTOGENERATED. DO NOT EDIT MANUALLY
version = 1
name = "codex"
[setup]
script = ""
[[actions]]
name = "Run"
icon = "run"
command = "cargo +1.93.0 run --manifest-path=codex-rs/Cargo.toml --bin codex -- -c mcp_oauth_credentials_store=file"

View File

@@ -1,6 +1,6 @@
---
name: babysit-pr
description: Babysit a GitHub pull request after creation by continuously polling review comments, CI checks/workflow runs, and mergeability state until the PR is merged/closed or user help is required. Diagnose failures, retry likely flaky failures up to 3 times, auto-fix/push branch-related issues when appropriate, and keep watching open PRs so fresh review feedback is surfaced promptly. Use when the user asks Codex to monitor a PR, watch CI, handle review comments, or keep an eye on failures and feedback on an open PR.
description: Babysit a GitHub pull request after creation by continuously polling CI checks/workflow runs, new review comments, and mergeability state until the PR is ready to merge (or merged/closed). Diagnose failures, retry likely flaky failures up to 3 times, auto-fix/push branch-related issues when appropriate, and stop only when user help is required (for example CI infrastructure issues, exhausted flaky retries, or ambiguous/blocking situations). Use when the user asks Codex to monitor a PR, watch CI, handle review comments, or keep an eye on failures and feedback on an open PR.
---
# PR Babysitter
@@ -9,8 +9,8 @@ description: Babysit a GitHub pull request after creation by continuously pollin
Babysit a PR persistently until one of these terminal outcomes occurs:
- The PR is merged or closed.
- CI is successful, there are no unaddressed review comments surfaced by the watcher, required review approval is not blocking merge, and there are no potential merge conflicts (PR is mergeable / not reporting conflict risk).
- A situation requires user help (for example CI infrastructure issues, repeated flaky failures after retry budget is exhausted, permission problems, or ambiguity that cannot be resolved safely).
- Optional handoff milestone: the PR is currently green + mergeable + review-clean. Treat this as a progress state, not a watcher stop, so late-arriving review comments are still surfaced promptly while the PR remains open.
Do not stop merely because a single snapshot returns `idle` while checks are still pending.
@@ -24,20 +24,19 @@ Accept any of the following:
## Core Workflow
1. When the user asks to "monitor"/"watch"/"babysit" a PR, start with the watcher's continuous mode (`--watch`) unless you are intentionally doing a one-shot diagnostic snapshot.
2. Run the watcher script to snapshot PR/review/CI state (or consume each streamed snapshot from `--watch`).
2. Run the watcher script to snapshot PR/CI/review state (or consume each streamed snapshot from `--watch`).
3. Inspect the `actions` list in the JSON response.
4. If `diagnose_ci_failure` is present, inspect failed run logs and classify the failure.
5. If the failure is likely caused by the current branch, patch code locally, commit, and push. Do not patch random flaky tests, CI infrastructure, dependency outages, runner issues, or other failures that are unrelated to the branch.
5. If the failure is likely caused by the current branch, patch code locally, commit, and push.
6. If `process_review_comment` is present, inspect surfaced review items and decide whether to address them.
7. If a review item is actionable and correct, patch code locally, commit, push, and then mark the associated review thread/comment as resolved once the fix is on GitHub.
8. Do not post replies to human-authored review comments/threads unless the user explicitly confirms the exact response. If a human review item is non-actionable, already addressed, or not valid, surface the item and recommended response to the user instead of replying on GitHub.
9. If the failure is likely flaky/unrelated and `retry_failed_checks` is present, rerun failed jobs with `--retry-failed-now`.
10. If both actionable review feedback and `retry_failed_checks` are present, prioritize review feedback first; a new commit will retrigger CI, so avoid rerunning flaky checks on the old SHA unless you intentionally defer the review change.
11. On every loop, look for newly surfaced review feedback before acting on CI failures or mergeability state, then verify mergeability / merge-conflict status (for example via `gh pr view`) alongside CI.
12. After any push or rerun action, immediately return to step 1 and continue polling on the updated SHA/state.
13. If you had been using `--watch` before pausing to patch/commit/push, relaunch `--watch` yourself in the same turn immediately after the push (do not wait for the user to re-invoke the skill).
14. Repeat polling until `stop_pr_closed` appears or a user-help-required blocker is reached. A green + review-clean + mergeable PR is a progress milestone, not a reason to stop the watcher while the PR is still open.
15. Maintain terminal/session ownership: while babysitting is active, keep consuming watcher output in the same turn; do not leave a detached `--watch` process running and then end the turn as if monitoring were complete.
7. If a review item is actionable and correct, patch code locally, commit, and push.
8. If the failure is likely flaky/unrelated and `retry_failed_checks` is present, rerun failed jobs with `--retry-failed-now`.
9. If both actionable review feedback and `retry_failed_checks` are present, prioritize review feedback first; a new commit will retrigger CI, so avoid rerunning flaky checks on the old SHA unless you intentionally defer the review change.
10. On every loop, verify mergeability / merge-conflict status (for example via `gh pr view`) in addition to CI and review state.
11. After any push or rerun action, immediately return to step 1 and continue polling on the updated SHA/state.
12. If you had been using `--watch` before pausing to patch/commit/push, relaunch `--watch` yourself in the same turn immediately after the push (do not wait for the user to re-invoke the skill).
13. Repeat polling until the PR is green + review-clean + mergeable, `stop_pr_closed` appears, or a user-help-required blocker is reached.
14. Maintain terminal/session ownership: while babysitting is active, keep consuming watcher output in the same turn; do not leave a detached `--watch` process running and then end the turn as if monitoring were complete.
## Commands
@@ -69,18 +68,12 @@ python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr <number-or-url> --o
Use `gh` commands to inspect failed runs before deciding to rerun.
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
- `gh api repos/<owner>/<repo>/actions/runs/<run-id>/jobs -X GET -f per_page=100`
- `gh api repos/<owner>/<repo>/actions/jobs/<job-id>/logs > /tmp/codex-gh-job-<job-id>-logs.zip`
- `gh run view <run-id> --log-failed` as a fallback after the overall workflow run is complete
- `gh run view <run-id> --log-failed`
`gh run view --log-failed` is workflow-run scoped and may not expose failed-job logs until the overall run finishes. For faster diagnosis, poll the run's jobs first and, as soon as a specific job has failed, fetch that job's logs directly from the Actions job logs endpoint. The watcher includes a `failed_jobs` list with each failed job's `job_id` and `logs_endpoint` when GitHub exposes one.
Prefer treating failures as branch-related when failed-job logs point to changed code (compile/test/lint/typecheck/snapshots/static analysis in touched areas).
Prefer treating failures as branch-related when logs point to changed code (compile/test/lint/typecheck/snapshots/static analysis in touched areas).
Prefer treating failures as flaky/unrelated when logs show transient infra/external issues (timeouts, runner provisioning failures, registry/network outages, GitHub Actions infra errors).
Do not attempt to fix flaky/unrelated failures by changing tests, build scripts, CI configuration, dependency pins, or infrastructure-adjacent code unless the logs clearly connect the failure to the PR branch. For flaky/unrelated failures, rerun only when the watcher recommends `retry_failed_checks`; otherwise wait or stop for user help.
If classification is ambiguous, perform one manual diagnosis attempt before choosing rerun.
Read `.codex/skills/babysit-pr/references/heuristics.md` for a concise checklist.
@@ -101,12 +94,10 @@ When you agree with a comment and it is actionable:
1. Patch code locally.
2. Commit with `codex: address PR review feedback (#<n>)`.
3. Push to the PR head branch.
4. After the push succeeds, mark the associated GitHub review thread/comment as resolved.
5. Resume watching on the new SHA immediately (do not stop after reporting the push).
6. If monitoring was running in `--watch` mode, restart `--watch` immediately after the push in the same turn; do not wait for the user to ask again.
4. Resume watching on the new SHA immediately (do not stop after reporting the push).
5. If monitoring was running in `--watch` mode, restart `--watch` immediately after the push in the same turn; do not wait for the user to ask again.
Do not post replies to human-authored GitHub review comments/threads automatically. If you disagree with a human comment, believe it is non-actionable/already addressed, or need to answer a question, report the item to the user with a suggested response and wait for explicit confirmation before posting anything on GitHub. If the user approves a response, prefix it with `[codex]` so it is clear the response is automated and not from the human user.
If the watcher later surfaces your own approved reply because the authenticated operator is treated as a trusted review author, treat that self-authored item as already handled and do not reply again.
If you disagree or the comment is non-actionable/already addressed, record it as handled by continuing the watcher loop (the script de-duplicates surfaced items via state after surfacing them).
If a code review comment/thread is already marked as resolved in GitHub, treat it as non-actionable and safely ignore it unless new unresolved follow-up feedback appears.
## Git Safety Rules
@@ -132,15 +123,14 @@ Use this loop in a live Codex session:
2. Read `actions`.
3. First check whether the PR is now merged or otherwise closed; if so, report that terminal state and stop polling immediately.
4. Check CI summary, new review items, and mergeability/conflict status.
5. Diagnose CI failures and classify branch-related vs flaky/unrelated. If the overall run is still pending but `failed_jobs` already includes a failed job, fetch that job's logs and diagnose immediately instead of waiting for the whole workflow run to finish. Patch only when the failure is branch-related.
6. For each surfaced review item from another author, patch/commit/push and then resolve it if it is actionable. If it is non-actionable, already addressed, or requires a written answer, surface it to the user with a suggested response instead of posting automatically. If a later snapshot surfaces your own approved reply, treat it as informational and continue without responding again.
7. Process actionable review comments before flaky reruns when both are present; if a review fix requires a commit, push it and skip rerunning failed checks on the old SHA.
8. Retry failed checks only when `retry_failed_checks` is present and you are not about to replace the current SHA with a review/CI fix commit. Do not make code changes for unrelated flakes or infrastructure failures just to get CI green.
9. If you pushed a commit, resolved a review thread, or triggered a rerun, report the action briefly and continue polling (do not stop). If a human review comment needs a written GitHub response, stop and ask for confirmation before posting.
10. After a review-fix push, proactively restart continuous monitoring (`--watch`) in the same turn unless a strict stop condition has already been reached.
11. If everything is passing, mergeable, not blocked on required review approval, and there are no unaddressed review items, report that the PR is currently ready to merge but keep the watcher running so new review comments are surfaced quickly while the PR remains open.
12. If blocked on a user-help-required issue (infra outage, exhausted flaky retries, unclear reviewer request, permissions), report the blocker and stop.
13. Otherwise sleep according to the polling cadence below and repeat.
5. Diagnose CI failures and classify branch-related vs flaky/unrelated.
6. Process actionable review comments before flaky reruns when both are present; if a review fix requires a commit, push it and skip rerunning failed checks on the old SHA.
7. Retry failed checks only when `retry_failed_checks` is present and you are not about to replace the current SHA with a review/CI fix commit.
8. If you pushed a commit or triggered a rerun, report the action briefly and continue polling (do not stop).
9. After a review-fix push, proactively restart continuous monitoring (`--watch`) in the same turn unless a strict stop condition has already been reached.
10. If everything is passing, mergeable, not blocked on required review approval, and there are no unaddressed review items, report success and stop.
11. If blocked on a user-help-required issue (infra outage, exhausted flaky retries, unclear reviewer request, permissions), report the blocker and stop.
12. Otherwise sleep according to the polling cadence below and repeat.
When the user explicitly asks to monitor/watch/babysit a PR, prefer `--watch` so polling continues autonomously in one command. Use repeated `--once` snapshots only for debugging, local testing, or when the user explicitly asks for a one-shot check.
Do not stop to ask the user whether to continue polling; continue autonomously until a strict stop condition is met or the user explicitly interrupts.
@@ -148,18 +138,19 @@ Do not hand control back to the user after a review-fix push just because a new
If a `--watch` process is still running and no strict stop condition has been reached, the babysitting task is still in progress; keep streaming/consuming watcher output instead of ending the turn.
## Polling Cadence
Keep review polling aggressive and continue monitoring even after CI turns green:
Use adaptive polling and continue monitoring even after CI turns green:
- While CI is not green (pending/running/queued or failing): poll every 1 minute.
- After CI turns green: keep polling at the base cadence while the PR remains open so newly posted review comments are surfaced promptly instead of waiting on a long green-state backoff.
- Reset the cadence immediately whenever anything changes (new commit/SHA, check status changes, new review comments, mergeability changes, review decision changes).
- If CI stops being green again (new commit, rerun, or regression): stay on the base polling cadence.
- After CI turns green: start at every 1 minute, then back off exponentially when there is no change (for example 1m, 2m, 4m, 8m, 16m, 32m), capping at every 1 hour.
- Reset the green-state polling interval back to 1 minute whenever anything changes (new commit/SHA, check status changes, new review comments, mergeability changes, review decision changes).
- If CI stops being green again (new commit, rerun, or regression): return to 1-minute polling.
- If any poll shows the PR is merged or otherwise closed: stop polling immediately and report the terminal state.
## Stop Conditions (Strict)
Stop only when one of the following is true:
- PR merged or closed (stop as soon as a poll/snapshot confirms this).
- PR is ready to merge: CI succeeded, no surfaced unaddressed review comments, not blocked on required review approval, and no merge conflict risk.
- User intervention is required and Codex cannot safely proceed alone.
Keep polling when:
@@ -168,14 +159,14 @@ Keep polling when:
- CI is still running/queued.
- Review state is quiet but CI is not terminal.
- CI is green but mergeability is unknown/pending.
- CI is green and mergeable, but the PR is still open and you are waiting for possible new review comments or merge-conflict changes.
- The PR is green but blocked on review approval (`REVIEW_REQUIRED` / similar); continue polling at the base cadence and surface any new review comments without asking for confirmation to keep watching.
- CI is green and mergeable, but the PR is still open and you are waiting for possible new review comments or merge-conflict changes per the green-state cadence.
- The PR is green but blocked on review approval (`REVIEW_REQUIRED` / similar); continue polling on the green-state cadence and surface any new review comments without asking for confirmation to keep watching.
## Output Expectations
Provide concise progress updates while monitoring and a final summary that includes:
- During long unchanged monitoring periods, avoid emitting a full update on every poll; summarize only status changes plus occasional heartbeat updates.
- Treat push confirmations, intermediate CI snapshots, ready-to-merge snapshots, and review-action updates as progress updates only; do not emit the final summary or end the babysitting session unless a strict stop condition is met.
- Treat push confirmations, intermediate CI snapshots, and review-action updates as progress updates only; do not emit the final summary or end the babysitting session unless a strict stop condition is met.
- A user request to "monitor" is not satisfied by a couple of sample polls; remain in the loop until a strict stop condition or an explicit user interruption.
- A review-fix commit + push is not a completion event; immediately resume live monitoring (`--watch`) in the same turn and continue reporting progress updates.
- When CI first transitions to all green for the current SHA, emit a one-time celebratory progress update (do not repeat it on every green poll). Preferred style: `🚀 CI is all green! 33/33 passed. Still on watch for review approval.`

View File

@@ -1,4 +1,4 @@
interface:
display_name: "PR Babysitter"
short_description: "Watch PR review comments, CI, and merge conflicts"
default_prompt: "Babysit the current PR: monitor reviewer comments, CI, and merge-conflict status (prefer the watchers --watch mode for live monitoring); surface new review feedback before acting on CI or mergeability work, fix valid issues, push updates, and rerun flaky failures up to 3 times. Do not post replies to human-authored review comments unless the user explicitly confirms the exact response. Do not patch unrelated flaky tests, CI infrastructure, dependency outages, runner issues, or other failures that are not caused by the branch. Keep exactly one watcher session active for the PR (do not leave duplicate --watch terminals running). If you pause monitoring to patch review/CI feedback, restart --watch yourself immediately after the push in the same turn. If a watcher is still running and no strict stop condition has been reached, the task is still in progress: keep consuming watcher output and sending progress updates instead of ending the turn. Do not treat a green + mergeable PR as a terminal stop while it is still open; continue polling autonomously after any push/rerun so newly posted review comments are surfaced until a strict terminal stop condition is reached or the user interrupts."
short_description: "Watch PR CI, reviews, and merge conflicts"
default_prompt: "Babysit the current PR: monitor CI, reviewer comments, and merge-conflict status (prefer the watchers --watch mode for live monitoring); fix valid issues, push updates, and rerun flaky failures up to 3 times. Keep exactly one watcher session active for the PR (do not leave duplicate --watch terminals running). If you pause monitoring to patch review/CI feedback, restart --watch yourself immediately after the push in the same turn. If a watcher is still running and no strict stop condition has been reached, the task is still in progress: keep consuming watcher output and sending progress updates instead of ending the turn. Continue polling autonomously after any push/rerun until a strict terminal stop condition is reached or the user interrupts."

View File

@@ -23,11 +23,9 @@ Used to discover failed workflow runs and rerunnable run IDs.
### Failed log inspection
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
- `gh api repos/{owner}/{repo}/actions/runs/{run_id}/jobs -X GET -f per_page=100`
- `gh api repos/{owner}/{repo}/actions/jobs/{job_id}/logs > /tmp/codex-gh-job-{job_id}-logs.zip`
- `gh run view <run-id> --log-failed`
Used by Codex to classify branch-related vs flaky/unrelated failures. Prefer the direct job log endpoint as soon as a job has failed because `gh run view --log-failed` may not produce failed-job logs until the overall workflow run completes.
Used by Codex to classify branch-related vs flaky/unrelated failures.
### Retry failed jobs only
@@ -72,11 +70,3 @@ Reruns only failed jobs (and dependencies) for a workflow run.
- `conclusion`
- `html_url`
- `head_sha`
### Actions run jobs API (`jobs[]`)
- `id`
- `name`
- `status`
- `conclusion`
- `html_url`

View File

@@ -18,8 +18,6 @@ Treat as **likely flaky or unrelated** when evidence points to transient or exte
- Cloud/service rate limits or transient API outages
- Non-deterministic failures in unrelated integration tests with known flake patterns
Do not patch likely flaky/unrelated failures. Use the retry budget for rerunnable failures, wait for pending jobs, or stop and report the blocker when the failure is persistent or infrastructure-owned.
If uncertain, inspect failed logs once before choosing rerun.
## Decision tree (fix vs rerun vs stop)
@@ -27,11 +25,9 @@ If uncertain, inspect failed logs once before choosing rerun.
1. If PR is merged/closed: stop.
2. If there are failed checks:
- Diagnose first.
- If checks are still pending but an individual job has already failed: fetch that job's logs and diagnose now.
- If branch-related: fix locally, commit, push.
- If likely flaky/unrelated and all checks for the current SHA are terminal: rerun failed jobs.
- If likely flaky/unrelated and not safely rerunnable: stop and report the blocker; do not edit unrelated tests, build scripts, CI configuration, dependency pins, or infrastructure code.
- If checks are still pending and no failed job is available yet: wait.
- If checks are still pending: wait.
3. If flaky reruns for the same SHA reach the configured limit (default 3): stop and report persistent failure.
4. Independently, process any new human review comments.
@@ -44,15 +40,12 @@ Address the comment when:
- The requested change does not conflict with the users intent or recent guidance.
- The change can be made safely without unrelated refactors.
Fix valid human review feedback in code when possible, but do not post a GitHub reply to a human-authored comment/thread unless the user explicitly confirms the exact response.
Do not auto-fix when:
- The comment is ambiguous and needs clarification.
- The request conflicts with explicit user instructions.
- The proposed change requires product/design decisions the user has not made.
- The codebase is in a dirty/unrelated state that makes safe editing uncertain.
- The comment only needs a written answer or disagreement response; propose the reply to the user instead of posting it automatically.
## Stop-and-ask conditions
@@ -63,4 +56,3 @@ Stop and ask the user instead of continuing automatically when:
- The PR branch cannot be pushed.
- CI failures persist after the flaky retry budget.
- Reviewer feedback requires a product decision or cross-team coordination.
- A human review comment requires a written GitHub reply instead of a code change.

View File

@@ -45,6 +45,7 @@ MERGE_CONFLICT_OR_BLOCKING_STATES = {
"DRAFT",
"UNKNOWN",
}
GREEN_STATE_MAX_POLL_SECONDS = 60 * 60
class GhCommandError(RuntimeError):
@@ -338,66 +339,6 @@ def failed_runs_from_workflow_runs(runs, head_sha):
return failed_runs
def get_jobs_for_run(repo, run_id):
endpoint = f"repos/{repo}/actions/runs/{run_id}/jobs"
data = gh_json(["api", endpoint, "-X", "GET", "-f", "per_page=100"], repo=repo)
if not isinstance(data, dict):
raise GhCommandError("Unexpected payload from actions run jobs API")
jobs = data.get("jobs") or []
if not isinstance(jobs, list):
raise GhCommandError("Expected `jobs` to be a list")
return jobs
def failed_jobs_from_workflow_runs(repo, runs, head_sha):
failed_jobs = []
for run in runs:
if not isinstance(run, dict):
continue
if str(run.get("head_sha") or "") != head_sha:
continue
run_id = run.get("id")
if run_id in (None, ""):
continue
run_status = str(run.get("status") or "")
run_conclusion = str(run.get("conclusion") or "")
if run_status.lower() == "completed" and run_conclusion not in FAILED_RUN_CONCLUSIONS:
continue
jobs = get_jobs_for_run(repo, run_id)
for job in jobs:
if not isinstance(job, dict):
continue
conclusion = str(job.get("conclusion") or "")
if conclusion not in FAILED_RUN_CONCLUSIONS:
continue
job_id = job.get("id")
logs_endpoint = None
if job_id not in (None, ""):
logs_endpoint = f"repos/{repo}/actions/jobs/{job_id}/logs"
failed_jobs.append(
{
"run_id": run_id,
"workflow_name": run.get("name") or run.get("display_title") or "",
"run_status": run_status,
"run_conclusion": run_conclusion,
"job_id": job_id,
"job_name": str(job.get("name") or ""),
"status": str(job.get("status") or ""),
"conclusion": conclusion,
"html_url": str(job.get("html_url") or ""),
"logs_endpoint": logs_endpoint,
}
)
failed_jobs.sort(
key=lambda item: (
str(item.get("workflow_name") or ""),
str(item.get("job_name") or ""),
str(item.get("job_id") or ""),
)
)
return failed_jobs
def get_authenticated_login():
data = gh_json(["api", "user"])
if not isinstance(data, dict) or not data.get("login"):
@@ -628,7 +569,7 @@ def is_pr_ready_to_merge(pr, checks_summary, new_review_items):
return True
def recommend_actions(pr, checks_summary, failed_runs, failed_jobs, new_review_items, retries_used, max_retries):
def recommend_actions(pr, checks_summary, failed_runs, new_review_items, retries_used, max_retries):
actions = []
if pr["closed"] or pr["merged"]:
if new_review_items:
@@ -637,13 +578,13 @@ def recommend_actions(pr, checks_summary, failed_runs, failed_jobs, new_review_i
return unique_actions(actions)
if is_pr_ready_to_merge(pr, checks_summary, new_review_items):
actions.append("ready_to_merge")
actions.append("stop_ready_to_merge")
return unique_actions(actions)
if new_review_items:
actions.append("process_review_comment")
has_failed_pr_checks = checks_summary["failed_count"] > 0 or bool(failed_jobs)
has_failed_pr_checks = checks_summary["failed_count"] > 0
if has_failed_pr_checks:
if checks_summary["all_terminal"] and retries_used >= max_retries:
actions.append("stop_exhausted_retries")
@@ -665,6 +606,12 @@ def collect_snapshot(args):
if not state.get("started_at"):
state["started_at"] = int(time.time())
# `gh pr checks -R <repo>` requires an explicit PR/branch/url argument.
# After resolving `--pr auto`, reuse the concrete PR number.
checks = get_pr_checks(str(pr["number"]), repo=pr["repo"])
checks_summary = summarize_checks(checks)
workflow_runs = get_workflow_runs_for_sha(pr["repo"], pr["head_sha"])
failed_runs = failed_runs_from_workflow_runs(workflow_runs, pr["head_sha"])
authenticated_login = get_authenticated_login()
new_review_items = fetch_new_review_items(
pr,
@@ -672,23 +619,12 @@ def collect_snapshot(args):
fresh_state=fresh_state,
authenticated_login=authenticated_login,
)
# Surface review feedback before drilling into CI and mergeability details.
# That keeps the babysitter responsive to new comments even when other
# actions are also available.
# `gh pr checks -R <repo>` requires an explicit PR/branch/url argument.
# After resolving `--pr auto`, reuse the concrete PR number.
checks = get_pr_checks(str(pr["number"]), repo=pr["repo"])
checks_summary = summarize_checks(checks)
workflow_runs = get_workflow_runs_for_sha(pr["repo"], pr["head_sha"])
failed_runs = failed_runs_from_workflow_runs(workflow_runs, pr["head_sha"])
failed_jobs = failed_jobs_from_workflow_runs(pr["repo"], workflow_runs, pr["head_sha"])
retries_used = current_retry_count(state, pr["head_sha"])
actions = recommend_actions(
pr,
checks_summary,
failed_runs,
failed_jobs,
new_review_items,
retries_used,
args.max_flaky_retries,
@@ -703,7 +639,6 @@ def collect_snapshot(args):
"pr": pr,
"checks": checks_summary,
"failed_runs": failed_runs,
"failed_jobs": failed_jobs,
"new_review_items": new_review_items,
"actions": actions,
"retry_state": {
@@ -826,6 +761,7 @@ def run_watch(args):
if (
"stop_pr_closed" in actions
or "stop_exhausted_retries" in actions
or "stop_ready_to_merge" in actions
):
print_event("stop", {"actions": snapshot.get("actions"), "pr": snapshot.get("pr")})
return 0
@@ -833,13 +769,13 @@ def run_watch(args):
current_change_key = snapshot_change_key(snapshot)
changed = current_change_key != last_change_key
green = is_ci_green(snapshot)
pr = snapshot.get("pr") or {}
pr_open = not bool(pr.get("closed")) and not bool(pr.get("merged"))
if not green or pr_open:
if not green:
poll_seconds = args.poll_seconds
elif changed or last_change_key is None:
poll_seconds = args.poll_seconds
else:
poll_seconds = min(poll_seconds * 2, GREEN_STATE_MAX_POLL_SECONDS)
last_change_key = current_change_key
time.sleep(poll_seconds)

View File

@@ -1,217 +0,0 @@
import argparse
import importlib.util
from pathlib import Path
import pytest
MODULE_PATH = Path(__file__).with_name("gh_pr_watch.py")
MODULE_SPEC = importlib.util.spec_from_file_location("gh_pr_watch", MODULE_PATH)
gh_pr_watch = importlib.util.module_from_spec(MODULE_SPEC)
assert MODULE_SPEC.loader is not None
MODULE_SPEC.loader.exec_module(gh_pr_watch)
def sample_pr():
return {
"number": 123,
"url": "https://github.com/openai/codex/pull/123",
"repo": "openai/codex",
"head_sha": "abc123",
"head_branch": "feature",
"state": "OPEN",
"merged": False,
"closed": False,
"mergeable": "MERGEABLE",
"merge_state_status": "CLEAN",
"review_decision": "",
}
def sample_checks(**overrides):
checks = {
"pending_count": 0,
"failed_count": 0,
"passed_count": 12,
"all_terminal": True,
}
checks.update(overrides)
return checks
def test_collect_snapshot_fetches_review_items_before_ci(monkeypatch, tmp_path):
call_order = []
pr = sample_pr()
monkeypatch.setattr(gh_pr_watch, "resolve_pr", lambda *args, **kwargs: pr)
monkeypatch.setattr(gh_pr_watch, "load_state", lambda path: ({}, True))
monkeypatch.setattr(
gh_pr_watch,
"get_authenticated_login",
lambda: call_order.append("auth") or "octocat",
)
monkeypatch.setattr(
gh_pr_watch,
"fetch_new_review_items",
lambda *args, **kwargs: call_order.append("review") or [],
)
monkeypatch.setattr(
gh_pr_watch,
"get_pr_checks",
lambda *args, **kwargs: call_order.append("checks") or [],
)
monkeypatch.setattr(
gh_pr_watch,
"summarize_checks",
lambda checks: call_order.append("summarize") or sample_checks(),
)
monkeypatch.setattr(
gh_pr_watch,
"get_workflow_runs_for_sha",
lambda *args, **kwargs: call_order.append("workflow") or [],
)
monkeypatch.setattr(
gh_pr_watch,
"failed_runs_from_workflow_runs",
lambda *args, **kwargs: call_order.append("failed_runs") or [],
)
monkeypatch.setattr(
gh_pr_watch,
"failed_jobs_from_workflow_runs",
lambda *args, **kwargs: call_order.append("failed_jobs") or [],
)
monkeypatch.setattr(
gh_pr_watch,
"recommend_actions",
lambda *args, **kwargs: call_order.append("recommend") or ["idle"],
)
monkeypatch.setattr(gh_pr_watch, "save_state", lambda *args, **kwargs: None)
args = argparse.Namespace(
pr="123",
repo=None,
state_file=str(tmp_path / "watcher-state.json"),
max_flaky_retries=3,
)
gh_pr_watch.collect_snapshot(args)
assert call_order.index("review") < call_order.index("checks")
assert call_order.index("review") < call_order.index("workflow")
def test_recommend_actions_prioritizes_review_comments():
actions = gh_pr_watch.recommend_actions(
sample_pr(),
sample_checks(failed_count=1),
[{"run_id": 99}],
[],
[{"kind": "review_comment", "id": "1"}],
0,
3,
)
assert actions == [
"process_review_comment",
"diagnose_ci_failure",
"retry_failed_checks",
]
def test_run_watch_keeps_polling_open_ready_to_merge_pr(monkeypatch):
sleeps = []
events = []
snapshot = {
"pr": sample_pr(),
"checks": sample_checks(),
"failed_runs": [],
"failed_jobs": [],
"new_review_items": [],
"actions": ["ready_to_merge"],
"retry_state": {
"current_sha_retries_used": 0,
"max_flaky_retries": 3,
},
}
monkeypatch.setattr(
gh_pr_watch,
"collect_snapshot",
lambda args: (snapshot, Path("/tmp/codex-babysit-pr-state.json")),
)
monkeypatch.setattr(
gh_pr_watch,
"print_event",
lambda event, payload: events.append((event, payload)),
)
class StopWatch(Exception):
pass
def fake_sleep(seconds):
sleeps.append(seconds)
if len(sleeps) >= 2:
raise StopWatch
monkeypatch.setattr(gh_pr_watch.time, "sleep", fake_sleep)
with pytest.raises(StopWatch):
gh_pr_watch.run_watch(argparse.Namespace(poll_seconds=30))
assert sleeps == [30, 30]
assert [event for event, _ in events] == ["snapshot", "snapshot"]
def test_failed_jobs_include_direct_logs_endpoint(monkeypatch):
jobs_by_run = {
99: [
{
"id": 555,
"name": "unit tests",
"status": "completed",
"conclusion": "failure",
"html_url": "https://github.com/openai/codex/actions/runs/99/job/555",
},
{
"id": 556,
"name": "lint",
"status": "completed",
"conclusion": "success",
},
]
}
monkeypatch.setattr(
gh_pr_watch,
"get_jobs_for_run",
lambda repo, run_id: jobs_by_run[run_id],
)
failed_jobs = gh_pr_watch.failed_jobs_from_workflow_runs(
"openai/codex",
[
{
"id": 99,
"name": "CI",
"status": "in_progress",
"conclusion": "",
"head_sha": "abc123",
}
],
"abc123",
)
assert failed_jobs == [
{
"run_id": 99,
"workflow_name": "CI",
"run_status": "in_progress",
"run_conclusion": "",
"job_id": 555,
"job_name": "unit tests",
"status": "completed",
"conclusion": "failure",
"html_url": "https://github.com/openai/codex/actions/runs/99/job/555",
"logs_endpoint": "repos/openai/codex/actions/jobs/555/logs",
}
]

View File

@@ -1,12 +0,0 @@
---
name: code-breaking-changes
description: Breaking changes
---
Search for breaking changes in external integration surfaces:
- app-server APIs
- CLI parameters
- configuration loading
- resuming sessions from existing rollouts
Do not stop after finding one issue; analyze all possible ways breaking changes can happen.

View File

@@ -1,11 +0,0 @@
---
name: code-review-change-size
description: Change size guidance (800 lines)
---
Unless the change is mechanical the total number of changed lines should not exceed 800 lines.
For complex logic changes the size should be under 500 lines.
If the change is larger, explain whether it can be split into reviewable stages and identify the smallest coherent stage to land first.
Base the staging suggestion on the actual diff, dependencies, and affected call sites.

View File

@@ -1,13 +0,0 @@
---
name: code-review-context
description: Model visible context
---
Codex maintains a context (history of messages) that is sent to the model in inference requests.
1. No history rewrite - the context must be built up incrementally.
2. Avoid frequent changes to context that cause cache misses.
3. No unbounded items - everything injected in the model context must have a bounded size and a hard cap.
4. No items larger than 10K tokens.
5. Highlight new individual items that can cross >1k tokens as P0. These need an additional manual review.
6. All injected fragments must be defined as structs in `core/context` and implement ContextualUserFragment trait

View File

@@ -1,14 +0,0 @@
---
name: code-review-testing
description: Test authoring guidance
---
For agent changes prefer integration tests over unit tests. Integration tests are under `core/suite` and use `test_codex` to set up a test instance of codex.
Features that change the agent logic MUST add an integration test:
- Provide a list of major logic changes and user-facing behaviors that need to be tested.
If unit tests are needed, put them in a dedicated test file (*_tests.rs).
Avoid test-only functions in the main implementation.
Check whether there are existing helpers to make tests more streamlined and readable.

View File

@@ -1,14 +0,0 @@
---
name: code-review
description: Run a final code review on a pull request
---
Use subagents to review code using all code-review-* skills in this repository other than this orchestrator. One subagent per skill. Pass full skill path to subagents. Use xhigh reasoning.
You must return every single issue from every subagent. You can return an unlimited number of findings.
Use raw Markdown to report findings.
Number findings for ease of reference.
Each finding must include a specific file path and line number.
If the GitHub user running the review is the owner of the pull request add a `code-reviewed` label.
Do not leave GitHub comments unless explicitly asked.

View File

@@ -1,48 +0,0 @@
---
name: codex-bug
description: Diagnose GitHub bug reports in openai/codex. Use when given a GitHub issue URL from openai/codex and asked to decide next steps such as verifying against the repo, requesting more info, or explaining why it is not a bug; follow any additional user-provided instructions.
---
# Codex Bug
## Overview
Diagnose a Codex GitHub bug report and decide the next action: verify against sources, request more info, or explain why it is not a bug.
## Workflow
1. Confirm the input
- Require a GitHub issue URL that points to `github.com/openai/codex/issues/…`.
- If the URL is missing or not in the right repo, ask the user for the correct link.
2. Network access
- Always access the issue over the network immediately, even if you think access is blocked or unavailable.
- Prefer the GitHub API over HTML pages because the HTML is noisy:
- Issue: `https://api.github.com/repos/openai/codex/issues/<number>`
- Comments: `https://api.github.com/repos/openai/codex/issues/<number>/comments`
- If the environment requires explicit approval, request it on demand via the tool and continue without additional user prompting.
- Only if the network attempt fails after requesting approval, explain what you can do offline (e.g., draft a response template) and ask how to proceed.
3. Read the issue
- Use the GitHub API responses (issue + comments) as the source of truth rather than scraping the HTML issue page.
- Extract: title, body, repro steps, expected vs actual, environment, logs, and any attachments.
- Note whether the report already includes logs or session details.
- If the report includes a thread ID, mention it in the summary and use it to look up the logs and session details if you have access to them.
4. Summarize the bug before investigating
- Before inspecting code, docs, or logs in depth, write a short summary of the report in your own words.
- Include the reported behavior, expected behavior, repro steps, environment, and what evidence is already attached or missing.
5. Decide the course of action
- **Verify with sources** when the report is specific and likely reproducible. Inspect relevant Codex files (or mention the files to inspect if access is unavailable).
- **Request more information** when the report is vague, missing repro steps, or lacks logs/environment.
- **Explain not a bug** when the report contradicts current behavior or documented constraints (cite the evidence from the issue and any local sources you checked).
6. Respond
- Provide a concise report of your findings and next steps.

View File

@@ -1,127 +0,0 @@
---
name: codex-issue-digest
description: Run a GitHub issue digest for openai/codex by feature-area labels, all areas, and configurable time windows. Use when asked to summarize recent Codex bug reports or enhancement requests, especially for owner-specific labels such as tui, exec, app, or similar areas.
---
# Codex Issue Digest
## Objective
Produce a headline-first, insight-oriented digest of `openai/codex` issues for the requested feature-area labels over the previous 24 hours by default. Honor a different duration when the user asks for one, for example "past week" or "48 hours". Default to a summary-only response; include details only when requested.
Include only issues that currently have `bug` or `enhancement` plus at least one requested owner label. If the user asks for all areas or all labels, collect `bug`/`enhancement` issues across all labels.
## Inputs
- Feature-area labels, for example `tui exec`
- `all areas` / `all labels` to scan all current feature labels
- Optional repo override, default `openai/codex`
- Optional time window, default previous 24 hours; examples: `48h`, `7d`, `1w`, `past week`
## Workflow
1. Run the collector from a current Codex repo checkout:
```bash
python3 .codex/skills/codex-issue-digest/scripts/collect_issue_digest.py --labels tui exec --window-hours 24
```
Use `--window "past week"` or `--window-hours 168` when the user asks for a non-default duration. Use `--all-labels` when the user says all areas or all labels.
2. Use the JSON as the source of truth. It includes new issues, new issue comments, new reactions/upvotes, current labels, current reaction counts, model-ready `summary_inputs`, and detailed `digest_rows`.
3. Choose the output mode from the user's request:
- Default mode: start the report with `## Summary` and do not emit `## Details`.
- Details-upfront mode: if the user asks for details, a table, a full digest, "include details", or similar, start with `## Summary`, then include `## Details`.
- Follow-up details mode: if the user asks for more detail after a summary-only digest, produce `## Details` from the existing collector JSON when it is still available; otherwise rerun the collector.
4. In `## Summary`, write a headline-first executive summary:
- The first nonblank line under `## Summary` must be a single-line headline or judgment, not a bullet. It should be useful even if the reader stops there.
- On quiet days, prefer exactly: `No major issues reported by users.` Use this when there are no elevated rows, no newly repeated theme, and nothing that needs owner action.
- When users are surfacing notable issues, make the headline name the count or theme, for example `Two issues are being surfaced by users:`.
- Immediately under an active headline, list only the issues or themes driving attention, ordered by importance. Start each line with the row's `attention_marker` when present, then a concise owner-readable description and inline issue refs.
- Treat `🔥🔥` as headline-worthy and `🔥` as elevated. Do not add fire emoji yourself; only copy the row's `attention_marker`.
- Keep any extra summary detail after the headline to 1-3 terse lines, only when it adds a decision-relevant caveat, repeated theme, or owner action.
- Do not include routine counts, broad stats, or low-signal table summaries in `## Summary` unless they change the headline. Put metadata and optional counts in `## Details` or the footer.
- In default mode, end the report with a concise prompt such as `Want details? I can expand this into the issue table.` Keep this separate from the summary headline so the headline stays clean.
- Cluster and name themes yourself from `summary_inputs`; the collector intentionally does not hard-code issue categories.
- Use a cluster only when the issues genuinely share the same product problem. If several issues merely share a broad platform or label, describe them individually.
- Do not omit a repeated theme just because its individual issues fall below the details table cutoff. Several similar reports should be called out as a repeated customer concern.
- For single-issue rows, summarize the concern directly instead of calling it a cluster.
- Use inline numbered issue links from each relevant row's `ref_markdown`.
- Example quiet summary:
```markdown
## Summary
No major issues reported by users.
Source: collector v5, git `abc123def456`, window `2026-04-27T00:00:00Z` to `2026-04-28T00:00:00Z`.
Want details? I can expand this into the issue table.
```
- Example active summary:
```markdown
## Summary
Two issues are being surfaced by users:
🔥🔥 Terminal launch hangs on startup [1](https://github.com/openai/codex/issues/123)
🔥 Resume switches model providers unexpectedly [2](https://github.com/openai/codex/issues/456)
Source: collector v5, git `abc123def456`, window `2026-04-27T00:00:00Z` to `2026-04-28T00:00:00Z`.
Want details? I can expand this into the issue table.
```
5. In `## Details`, when details are requested, include a compact table only when useful:
- Prefer rows from `digest_rows`; include a `Refs` column using each row's `ref_markdown`.
- Keep the table short; omit low-signal rows when the summary already covers them.
- Use compact columns such as marker, area, type, description, interactions, and refs.
- The `Description` cell should be a short owner-readable phrase. Use row `description`, title, body excerpts, and recent comments, but do not mechanically copy the raw GitHub issue title when it contains incidental details.
- A clear quiet/no-concern sentence when there is no meaningful signal.
6. Use the JSON `attention_marker` exactly. It is empty for normal rows, `🔥` for elevated rows, and `🔥🔥` for very high-attention rows. The actual cutoffs are in `attention_thresholds`.
7. Use inline numbered references where a row or bullet points to issues, for example `Compaction bugs [1](https://github.com/openai/codex/issues/123), [2](https://github.com/openai/codex/issues/456)`. Do not add a separate footnotes section.
8. Label `interactions` as `Interactions`; it counts unique human GitHub users who created a new issue, added a new comment, or reacted during the requested window. Multiple posts/reactions from the same user on the same issue count once.
9. Mention the collector `script_version`, repo checkout `git_head`, and time window in one compact source line. In default mode, put this before the details prompt so the final line still asks whether the user wants details. In details-upfront mode, it can be the footer.
## Reaction Handling
The collector uses GitHub reactions endpoints, which include `created_at`, to count reactions created during the digest window for hydrated issues. It reports both in-window reaction counts and current reaction totals. Treat current reaction totals as standing engagement, and treat `new_reactions` / `new_upvotes` as windowed activity.
By default, the collector fetches issue comments with `since=<window start>` and caps the number of comment pages per issue. This keeps very long historical threads from dominating a digest run and focuses the report on recent posts. Use `--fetch-all-comments` only when exhaustive comment history is more important than runtime.
GitHub issue search is still seeded by issue `updated_at`, so a purely reaction-only issue may be missed if reactions do not bump `updated_at`. Covering every reaction-only case would require either a persisted snapshot store or a broader scan of labeled issues.
## Attention Markers
The collector scales attention markers by the requested time window. The baseline is 5 unique human users for `🔥` and 10 unique human users for `🔥🔥` over 24 hours; longer or shorter windows scale those cutoffs linearly and round up. For example, a one-week report uses 35 and 70 interactions. Unique human users are users who authored a new issue, authored a new comment, or reacted during the window, including upvotes. Multiple actions from the same user on the same issue count once. Bot posts and bot reactions are excluded. In prose, explain this as high user interaction rather than naming the emoji.
## Freshness
The automation should run from a repo checkout that contains this skill. For shared daily use, prefer one of these patterns:
- Run the automation in a checkout that is refreshed before the automation starts, for example with `git pull --ff-only`.
- If the automation cannot safely mutate the checkout, have it report the current `git_head` from the collector output so readers know which skill/script version produced the digest.
## Sample Owner Prompt
```text
Use $codex-issue-digest to run the Codex issue digest for labels tui and exec over the previous 24 hours.
```
```text
Use $codex-issue-digest to run the Codex issue digest for all areas over the past week.
```
## Validation
Dry run the collector against recent issues:
```bash
python3 .codex/skills/codex-issue-digest/scripts/collect_issue_digest.py --labels tui exec --window-hours 24
```
```bash
python3 .codex/skills/codex-issue-digest/scripts/collect_issue_digest.py --all-labels --window "past week" --limit-issues 10
```
Run the focused script tests:
```bash
pytest .codex/skills/codex-issue-digest/scripts/test_collect_issue_digest.py
```

View File

@@ -1,4 +0,0 @@
interface:
display_name: "Codex Issue Digest"
short_description: "Summarize Codex issues by labels or all areas"
default_prompt: "Use $codex-issue-digest to run the Codex issue digest for labels tui and exec over the previous 24 hours."

View File

@@ -1,749 +0,0 @@
import importlib.util
from datetime import timezone
from pathlib import Path
MODULE_PATH = Path(__file__).with_name("collect_issue_digest.py")
MODULE_SPEC = importlib.util.spec_from_file_location(
"collect_issue_digest", MODULE_PATH
)
collect_issue_digest = importlib.util.module_from_spec(MODULE_SPEC)
assert MODULE_SPEC.loader is not None
MODULE_SPEC.loader.exec_module(collect_issue_digest)
def test_build_search_queries_uses_each_owner_and_kind_label():
since = collect_issue_digest.parse_timestamp("2026-04-25T12:34:56Z", "--since")
queries = collect_issue_digest.build_search_queries(
"openai/codex", ["tui", "exec"], since
)
assert queries == [
"repo:openai/codex is:issue updated:>=2026-04-25 label:tui label:bug",
"repo:openai/codex is:issue updated:>=2026-04-25 label:tui label:enhancement",
"repo:openai/codex is:issue updated:>=2026-04-25 label:exec label:bug",
"repo:openai/codex is:issue updated:>=2026-04-25 label:exec label:enhancement",
]
def test_build_search_queries_can_scan_all_labels():
since = collect_issue_digest.parse_timestamp("2026-04-25T12:34:56Z", "--since")
queries = collect_issue_digest.build_search_queries(
"openai/codex", [], since, all_labels=True
)
assert queries == [
"repo:openai/codex is:issue updated:>=2026-04-25 label:bug",
"repo:openai/codex is:issue updated:>=2026-04-25 label:enhancement",
]
def test_normalize_requested_labels_accepts_all_area_phrases():
assert collect_issue_digest.normalize_requested_labels(["all", "areas"]) == (
[],
True,
)
assert collect_issue_digest.normalize_requested_labels(["all-labels"]) == (
[],
True,
)
def test_search_issue_numbers_requests_updated_sort(monkeypatch):
calls = []
def fake_gh_json(args):
calls.append(args)
return {
"items": [
{"number": 1, "updated_at": "2026-04-25T00:00:00Z"},
]
}
monkeypatch.setattr(collect_issue_digest, "gh_json", fake_gh_json)
assert collect_issue_digest.search_issue_numbers(["query"], limit=10) == [1]
assert "-f" in calls[0]
assert "sort=updated" in calls[0]
assert "order=desc" in calls[0]
def test_search_issue_numbers_applies_limit_per_query(monkeypatch):
calls = []
def fake_gh_json(args):
calls.append(args)
query = next(
value.removeprefix("q=") for value in args if value.startswith("q=")
)
page = int(
next(
value.removeprefix("page=")
for value in args
if value.startswith("page=")
)
)
base = 10_000 if query == "first" else 20_000
offset = (page - 1) * 100
return {
"items": [
{
"number": base + offset + idx,
"updated_at": f"2026-04-25T00:{idx:02d}:00Z",
}
for idx in range(100)
]
}
monkeypatch.setattr(collect_issue_digest, "gh_json", fake_gh_json)
collect_issue_digest.search_issue_numbers(["first", "second"], limit=150)
queried_pages = [
(
next(
value.removeprefix("q=") for value in args if value.startswith("q=")
),
next(
value.removeprefix("page=")
for value in args
if value.startswith("page=")
),
)
for args in calls
]
assert queried_pages == [
("first", "1"),
("first", "2"),
("second", "1"),
("second", "2"),
]
def test_summarize_issue_keeps_new_comments_and_reaction_signals():
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
issue = {
"number": 123,
"title": "TUI does not redraw",
"html_url": "https://github.com/openai/codex/issues/123",
"state": "open",
"created_at": "2026-04-24T20:00:00Z",
"updated_at": "2026-04-25T10:00:00Z",
"user": {"login": "alice"},
"author_association": "NONE",
"comments": 2,
"body": "The terminal freezes after resize.",
"labels": [{"name": "bug"}, {"name": "tui"}],
"reactions": {"total_count": 3, "+1": 2, "rocket": 1},
}
comments = [
{
"id": 1,
"created_at": "2026-04-25T11:00:00Z",
"updated_at": "2026-04-25T11:00:00Z",
"html_url": "https://github.com/openai/codex/issues/123#issuecomment-1",
"user": {"login": "bob"},
"author_association": "MEMBER",
"body": "I can reproduce this on main.",
"reactions": {"total_count": 4, "heart": 1, "+1": 3},
},
{
"id": 2,
"created_at": "2026-04-24T11:00:00Z",
"updated_at": "2026-04-24T11:00:00Z",
"html_url": "https://github.com/openai/codex/issues/123#issuecomment-2",
"user": {"login": "carol"},
"author_association": "NONE",
"body": "Older comment.",
"reactions": {"total_count": 1, "eyes": 1},
},
]
summary = collect_issue_digest.summarize_issue(
issue,
comments,
["tui", "exec"],
since,
until,
body_chars=200,
comment_chars=200,
)
assert summary == {
"number": 123,
"title": "TUI does not redraw",
"description": "TUI does not redraw",
"url": "https://github.com/openai/codex/issues/123",
"state": "open",
"author": "alice",
"author_association": "NONE",
"created_at": "2026-04-24T20:00:00Z",
"updated_at": "2026-04-25T10:00:00Z",
"labels": ["bug", "tui"],
"kind_labels": ["bug"],
"owner_labels": ["tui"],
"comments_total": 2,
"comments_hydration": {
"fetched": 2,
"since": None,
"truncated": False,
"max_pages": None,
},
"issue_reactions": {"+1": 2, "rocket": 1},
"issue_reaction_total": 3,
"comment_reaction_total": 5,
"new_comment_reaction_total": 4,
"new_issue_reactions": 0,
"new_issue_upvotes": 0,
"new_comment_reactions": 0,
"new_comment_upvotes": 0,
"new_reactions": 0,
"new_upvotes": 0,
"user_interactions": 1,
"attention": False,
"attention_level": 0,
"attention_marker": "",
"engagement_score": 12,
"activity": {
"new_issue": False,
"new_comments": 1,
"new_human_comments": 1,
"new_reactions": 0,
"new_upvotes": 0,
"updated_without_visible_new_post": False,
},
"body_excerpt": "The terminal freezes after resize.",
"new_comments": [
{
"id": 1,
"author": "bob",
"author_association": "MEMBER",
"created_at": "2026-04-25T11:00:00Z",
"updated_at": "2026-04-25T11:00:00Z",
"url": "https://github.com/openai/codex/issues/123#issuecomment-1",
"human_user_interaction": True,
"reactions": {"+1": 3, "heart": 1},
"reaction_total": 4,
"new_reactions": 0,
"new_upvotes": 0,
"new_reaction_counts": {},
"body_excerpt": "I can reproduce this on main.",
}
],
}
def test_summarize_issue_filters_non_owner_or_non_kind_labels():
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
base_issue = {
"number": 1,
"title": "Question",
"created_at": "2026-04-25T01:00:00Z",
"updated_at": "2026-04-25T01:00:00Z",
"labels": [{"name": "question"}, {"name": "tui"}],
}
assert (
collect_issue_digest.summarize_issue(
base_issue,
[],
["tui"],
since,
until,
body_chars=100,
comment_chars=100,
)
is None
)
issue_without_owner = dict(base_issue)
issue_without_owner["labels"] = [{"name": "bug"}, {"name": "app"}]
assert (
collect_issue_digest.summarize_issue(
issue_without_owner,
[],
["tui"],
since,
until,
body_chars=100,
comment_chars=100,
)
is None
)
def test_resolve_window_defaults_to_previous_hours():
class Args:
since = None
until = "2026-04-26T12:00:00Z"
window_hours = 24
since, until = collect_issue_digest.resolve_window(Args())
assert since.isoformat() == "2026-04-25T12:00:00+00:00"
assert until.tzinfo == timezone.utc
def test_parse_duration_hours_accepts_common_phrases():
assert collect_issue_digest.parse_duration_hours("past week") == 168
assert collect_issue_digest.parse_duration_hours("48h") == 48
assert collect_issue_digest.parse_duration_hours("2 days") == 48
assert collect_issue_digest.parse_duration_hours("1w") == 168
def test_attention_thresholds_scale_by_window_length():
one_day = collect_issue_digest.attention_thresholds_for_window(24)
assert one_day["elevated"] == 5
assert one_day["very_high"] == 10
half_day = collect_issue_digest.attention_thresholds_for_window(12)
assert half_day["elevated"] == 3
assert half_day["very_high"] == 5
week = collect_issue_digest.attention_thresholds_for_window(168)
assert week["elevated"] == 35
assert week["very_high"] == 70
assert collect_issue_digest.attention_marker_for(34, week) == ""
assert collect_issue_digest.attention_marker_for(35, week) == "🔥"
assert collect_issue_digest.attention_marker_for(70, week) == "🔥🔥"
def test_fetch_comments_uses_since_filter_and_page_cap(monkeypatch):
calls = []
def fake_gh_json(args):
calls.append(args)
return [{"id": idx} for idx in range(100)]
monkeypatch.setattr(collect_issue_digest, "gh_json", fake_gh_json)
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
payload = collect_issue_digest.fetch_comments(
"openai/codex", 123, since=since, max_pages=1
)
assert len(payload["items"]) == 100
assert payload["truncated"] is True
assert payload["max_pages"] == 1
assert calls == [
[
"api",
"repos/openai/codex/issues/123/comments?since=2026-04-25T00%3A00%3A00Z&per_page=100&page=1",
]
]
def test_issue_description_prefers_title_over_body_noise():
issue = {
"title": "Codex.app GUI: MCP child processes not reaped after task completion",
"body": "A later crash mention should not override the title-level symptom.",
"labels": [{"name": "app"}, {"name": "bug"}],
}
description = collect_issue_digest.issue_description(issue)
assert "MCP child processes" in description
assert "crash" not in description.casefold()
def test_attention_markers_count_human_user_interactions():
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
issue = {
"number": 456,
"title": "Agent context is exploding",
"html_url": "https://github.com/openai/codex/issues/456",
"state": "open",
"created_at": "2026-04-25T01:00:00Z",
"updated_at": "2026-04-25T12:00:00Z",
"user": {"login": "alice"},
"labels": [{"name": "bug"}, {"name": "agent"}],
}
comments = [
{
"id": idx,
"created_at": "2026-04-25T02:00:00Z",
"updated_at": "2026-04-25T02:00:00Z",
"user": {"login": f"user-{idx}"},
"body": "same here",
}
for idx in range(4)
]
comments.append(
{
"id": 99,
"created_at": "2026-04-25T02:00:00Z",
"updated_at": "2026-04-25T02:00:00Z",
"user": {"login": "github-actions[bot]"},
"body": "duplicate bot note",
}
)
summary = collect_issue_digest.summarize_issue(
issue,
comments,
["agent"],
since,
until,
body_chars=100,
comment_chars=100,
)
assert summary["user_interactions"] == 5
assert summary["activity"]["new_human_comments"] == 4
assert summary["attention"] is True
assert summary["attention_level"] == 1
assert summary["attention_marker"] == "🔥"
issue["created_at"] = "2026-04-24T01:00:00Z"
comments.extend(
{
"id": idx,
"created_at": "2026-04-25T03:00:00Z",
"updated_at": "2026-04-25T03:00:00Z",
"user": {"login": f"extra-user-{idx}"},
"body": "also seeing this",
}
for idx in range(100, 106)
)
summary = collect_issue_digest.summarize_issue(
issue,
comments,
["agent"],
since,
until,
body_chars=100,
comment_chars=100,
)
assert summary["user_interactions"] == 10
assert summary["attention_level"] == 2
assert summary["attention_marker"] == "🔥🔥"
def test_reactions_count_toward_attention_markers():
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
issue = {
"number": 789,
"title": "Support 1M token context",
"html_url": "https://github.com/openai/codex/issues/789",
"state": "open",
"created_at": "2026-04-24T01:00:00Z",
"updated_at": "2026-04-25T12:00:00Z",
"user": {"login": "alice"},
"labels": [{"name": "enhancement"}, {"name": "context"}],
"reactions": {"total_count": 20, "+1": 20},
}
comments = [
{
"id": 1,
"created_at": "2026-04-25T02:00:00Z",
"updated_at": "2026-04-25T02:00:00Z",
"user": {"login": "commenter"},
"body": "please",
"reactions": {"total_count": 2, "+1": 2},
}
]
issue_reactions = [
{
"content": "+1",
"created_at": "2026-04-25T03:00:00Z",
"user": {"login": f"reactor-{idx}"},
}
for idx in range(18)
]
comment_reactions_by_id = {
1: [
{
"content": "heart",
"created_at": "2026-04-25T04:00:00Z",
"user": {"login": "human-reactor"},
},
{
"content": "+1",
"created_at": "2026-04-25T04:00:00Z",
"user": {"login": "github-actions[bot]"},
},
]
}
summary = collect_issue_digest.summarize_issue(
issue,
comments,
["context"],
since,
until,
body_chars=100,
comment_chars=100,
issue_reaction_events=issue_reactions,
comment_reactions_by_id=comment_reactions_by_id,
)
assert summary["new_reactions"] == 19
assert summary["new_upvotes"] == 18
assert summary["user_interactions"] == 20
assert summary["attention_level"] == 2
assert summary["attention_marker"] == "🔥🔥"
assert summary["new_comments"][0]["new_reactions"] == 1
assert summary["new_comments"][0]["new_upvotes"] == 0
def test_user_interactions_are_deduped_by_human_login():
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
def comment(comment_id, login):
return {
"id": comment_id,
"created_at": f"2026-04-25T0{comment_id + 1}:00:00Z",
"updated_at": f"2026-04-25T0{comment_id + 1}:00:00Z",
"user": {"login": login},
"body": "same issue",
}
def reaction(content, login, created_at="2026-04-25T10:00:00Z"):
return {
"content": content,
"created_at": created_at,
"user": {"login": login},
}
issue = {
"number": 790,
"title": "Repeated pings should not boost attention",
"html_url": "https://github.com/openai/codex/issues/790",
"state": "open",
"created_at": "2026-04-25T01:00:00Z",
"updated_at": "2026-04-25T12:00:00Z",
"user": {"login": "Alice"},
"labels": [{"name": "bug"}, {"name": "tui"}],
}
comments = [comment(1, "alice"), comment(2, "ALICE"), comment(3, "bob")]
comments.append(comment(4, "github-actions[bot]"))
issue_reactions = [
reaction("+1", "alice"),
reaction("rocket", "Alice"),
reaction("+1", "bob"),
reaction("+1", "github-actions[bot]"),
reaction("+1", "carol", created_at="2026-04-24T23:00:00Z"),
]
comment_reactions_by_id = {
1: [reaction("heart", "alice")],
2: [reaction("+1", "bob")],
3: [reaction("eyes", "carol")],
}
summary = collect_issue_digest.summarize_issue(
issue,
comments,
["tui"],
since,
until,
body_chars=100,
comment_chars=100,
issue_reaction_events=issue_reactions,
comment_reactions_by_id=comment_reactions_by_id,
)
assert summary["activity"]["new_human_comments"] == 3
assert summary["new_reactions"] == 6
assert summary["user_interactions"] == 3
assert summary["attention"] is False
assert summary["attention_marker"] == ""
def test_digest_rows_are_table_ready_with_concise_descriptions():
rows = collect_issue_digest.digest_rows(
[
{
"number": 1,
"title": "Quiet bug",
"description": "Quiet bug",
"url": "https://github.com/openai/codex/issues/1",
"owner_labels": ["context"],
"kind_labels": ["bug"],
"state": "open",
"attention": False,
"attention_level": 0,
"attention_marker": "",
"user_interactions": 1,
"new_reactions": 0,
"new_upvotes": 0,
"engagement_score": 3,
"issue_reaction_total": 0,
"comment_reaction_total": 0,
"updated_at": "2026-04-25T01:00:00Z",
"activity": {
"new_issue": True,
"new_comments": 0,
"new_reactions": 0,
"updated_without_visible_new_post": False,
},
},
{
"number": 2,
"title": "Busy bug",
"description": "High-volume bug report",
"url": "https://github.com/openai/codex/issues/2",
"owner_labels": ["agent"],
"kind_labels": ["bug"],
"state": "open",
"attention": True,
"attention_level": 1,
"attention_marker": "🔥",
"user_interactions": 17,
"new_reactions": 3,
"new_upvotes": 2,
"engagement_score": 20,
"issue_reaction_total": 5,
"comment_reaction_total": 2,
"updated_at": "2026-04-25T02:00:00Z",
"activity": {
"new_issue": False,
"new_comments": 16,
"new_reactions": 3,
"updated_without_visible_new_post": False,
},
},
]
)
assert rows[0] == {
"ref": 1,
"ref_markdown": "[1](https://github.com/openai/codex/issues/2)",
"marker": "🔥",
"attention_marker": "🔥",
"number": 2,
"description": "High-volume bug report",
"title": "Busy bug",
"url": "https://github.com/openai/codex/issues/2",
"area": "agent",
"kind": "bug",
"state": "open",
"interactions": 17,
"user_interactions": 17,
"new_reactions": 3,
"new_upvotes": 2,
"current_reactions": 7,
}
def test_summary_inputs_are_model_ready_without_preclustering():
issues = [
{
"number": 20,
"title": "Windows app Browser Use external navigation fails",
"description": "Browser Use navigation or app-server failure",
"url": "https://github.com/openai/codex/issues/20",
"labels": ["app", "bug"],
"owner_labels": ["app"],
"kind_labels": ["bug"],
"attention": False,
"attention_level": 0,
"attention_marker": "",
"user_interactions": 3,
"new_reactions": 1,
"engagement_score": 8,
"updated_at": "2026-04-25T04:00:00Z",
"activity": {"new_comments": 2},
},
{
"number": 21,
"title": "On Windows, cmake output waits until timeout",
"description": "Windows command timeout/capture problem",
"url": "https://github.com/openai/codex/issues/21",
"labels": ["app", "bug"],
"owner_labels": ["app"],
"kind_labels": ["bug"],
"attention": False,
"attention_level": 0,
"attention_marker": "",
"user_interactions": 3,
"new_reactions": 0,
"engagement_score": 7,
"updated_at": "2026-04-25T03:00:00Z",
"activity": {"new_comments": 3},
},
{
"number": 22,
"title": "Windows computer use tool fails to click buttons",
"description": "Computer-use workflow failure",
"url": "https://github.com/openai/codex/issues/22",
"labels": ["app", "bug"],
"owner_labels": ["app"],
"kind_labels": ["bug"],
"attention": False,
"attention_level": 0,
"attention_marker": "",
"user_interactions": 3,
"new_reactions": 0,
"engagement_score": 6,
"updated_at": "2026-04-25T02:00:00Z",
"activity": {"new_comments": 3},
},
]
rows = collect_issue_digest.summary_inputs(issues, ref_map={20: 1, 21: 2, 22: 3})
assert rows == [
{
"ref": 1,
"ref_markdown": "[1](https://github.com/openai/codex/issues/20)",
"number": 20,
"title": "Windows app Browser Use external navigation fails",
"description": "Browser Use navigation or app-server failure",
"url": "https://github.com/openai/codex/issues/20",
"labels": ["app", "bug"],
"owner_labels": ["app"],
"kind_labels": ["bug"],
"state": "",
"attention_marker": "",
"interactions": 3,
"new_comments": 2,
"new_reactions": 1,
"new_upvotes": 0,
"current_reactions": 0,
},
{
"ref": 2,
"ref_markdown": "[2](https://github.com/openai/codex/issues/21)",
"number": 21,
"title": "On Windows, cmake output waits until timeout",
"description": "Windows command timeout/capture problem",
"url": "https://github.com/openai/codex/issues/21",
"labels": ["app", "bug"],
"owner_labels": ["app"],
"kind_labels": ["bug"],
"state": "",
"attention_marker": "",
"interactions": 3,
"new_comments": 3,
"new_reactions": 0,
"new_upvotes": 0,
"current_reactions": 0,
},
{
"ref": 3,
"ref_markdown": "[3](https://github.com/openai/codex/issues/22)",
"number": 22,
"title": "Windows computer use tool fails to click buttons",
"description": "Computer-use workflow failure",
"url": "https://github.com/openai/codex/issues/22",
"labels": ["app", "bug"],
"owner_labels": ["app"],
"kind_labels": ["bug"],
"state": "",
"attention_marker": "",
"interactions": 3,
"new_comments": 3,
"new_reactions": 0,
"new_upvotes": 0,
"current_reactions": 0,
},
]

View File

@@ -1,59 +0,0 @@
---
name: codex-pr-body
description: Update the title and body of one or more pull requests.
---
## Determining the PR(s)
When this skill is invoked, the PR(s) to update may be specified explicitly, but in the common case, the PR(s) to update will be inferred from the branch / commit that the user is currently working on. For ordinary Git usage (i.e., not Sapling as discussed below), you may have to use a combination of `git branch` and `gh pr view <branch> --repo openai/codex --json number --jq '.number'` to determine the PR associated with the current branch / commit.
## PR Body Contents
When invoked, use `gh` to edit the pull request body and title to reflect the contents of the specified PR. Make sure to check the existing pull request body to see if there is key information that should be preserved. For example, NEVER remove an image in the existing pull request body, as the author may have no way to recover it if you remove it.
It is critically important to explain _why_ the change is being made. If the current conversation in which this skill is invoked has discussed the motivation, be sure to capture this in the pull request body.
The body should also explain _what_ changed, but this should appear after the _why_.
Limit discussion to the _net change_ of the commit. It is generally frowned upon to discuss changes that were attempted but later undone in the course of the development of the pull request. When rewriting the pull request body, you may need to eliminate details such as these when they are no longer appropriate / of interest to future readers.
Avoid references to absolute paths on my local disk. When talking about a path that is within the repository, simply use the repo-relative path.
It is generally helpful to discuss how the change was verified. That said, it is unnecessary to mention things that CI checks automatically, e.g., do not include "ran `just fmt`" as part of the test plan. Though identifying the new tests that were purposely introduced to verify the new behavior introduced by the pull request is often appropriate.
Make use of Markdown to format the pull request professionally. Ensure "code things" appear in single backticks when referenced inline. Fenced code blocks are useful when referencing code or showing a shell transcript. Also, make use of GitHub permalinks when citing existing pieces of code that are relevant to the change.
Make sure to reference any relevant pull requests or issues, though there should be no need to reference the pull request in its own PR body.
If there is documentation that should be updated on https://developers.openai.com/codex as a result of this change, please note that in a separate section near the end of the pull request. Omit this section if there is no documentation that needs to be updated.
## Working with Stacks
Sometimes a pull request is composed of a stack of commits that build on one another. In these cases, the PR body should reflect the _net_ change introduced by the stack as a whole, rather than the individual commits that make up the stack.
Similarly, sometimes a user may be using a tool like Sapling to leverage _stacked pull requests_, in which case the `base` of the PR may be the a branch that is the `head` of another PR in the stack rather than `main`. In this case, be sure to discuss only the net change between the `base` and `head` of the PR that is being opened against that stacked base, rather than the changes relative to `main`.
## Sapling
If `.git/sl/store` is present, then this Git repository is governed by Sapling SCM (https://sapling-scm.com).
In Sapling, run the following to see if there is a GitHub pull request associated with the current revision:
```shell
sl log --template '{github_pull_request_url}' -r .
```
Alternatively, you can run `sl sl` to see the current development branch and whether there is a GitHub pull request associated with the current commit. For example, if the output were:
```
@ cb032b31cf 72 minutes ago mbolin #11412
╭─╯ tui: show non-file layer content in /debug-config
o fdd0cd1de9 Today at 20:09 origin/main
~
```
- `@` indicates the current commit is `cb032b31cf`
- it is a development branch containing a single commit branched off of `origin/main`
- it is associated with GitHub pull request #11412

View File

@@ -1,16 +0,0 @@
---
name: remote-tests
description: How to run tests using remote executor.
---
Some codex integration tests support a running against a remote executor.
This means that when CODEX_TEST_REMOTE_ENV environment variable is set they will attempt to start an executor process in a docker container CODEX_TEST_REMOTE_ENV points to and use it in tests.
Docker container is built and initialized via ./scripts/test-remote-env.sh
Currently running remote tests is only supported on Linux, so you need to use a devbox to run them
You can list devboxes via `applied_devbox ls`, pick the one with `codex` in the name.
Connect to devbox via `ssh <devbox_name>`.
Reuse the same checkout of codex in `~/code/codex`. Reset files if needed. Multiple checkouts take longer to build and take up more space.
Check whether the SHA and modified files are in sync between remote and local.

View File

@@ -1,72 +0,0 @@
---
name: update-v8-version
description: Update Codex's pinned `v8` / `rusty_v8` versions, validate the release-candidate path, and investigate failed V8 canary or artifact builds. Use when asked to bump V8, update `rusty_v8` artifacts, prepare or validate a V8 release candidate, check `v8-canary`, or diagnose why a V8 version update no longer builds.
---
# Update V8 Version
## Core Workflow
1. Read `third_party/v8/README.md` and follow its version-bump sequence. Treat
that document as the release-process source of truth.
2. Inspect and update the concrete repo surfaces that carry the pin:
- `codex-rs/Cargo.toml`
- `codex-rs/Cargo.lock`
- `MODULE.bazel`
- `third_party/v8/BUILD.bazel`
- `third_party/v8/README.md`
- the matching `third_party/v8/rusty_v8_<version>.sha256` manifest when the
remaining prebuilt inputs change
3. Keep the existing checksum helpers in the loop:
```bash
python3 .github/scripts/rusty_v8_bazel.py update-module-bazel
python3 .github/scripts/rusty_v8_bazel.py check-module-bazel
python3 -m unittest discover -s .github/scripts -p test_rusty_v8_bazel.py
```
4. Validate the release-candidate path before broadening the work:
- Prefer checking the `v8-canary` CI result for the candidate branch or PR
when one exists, using GitHub check tooling or `gh` as appropriate.
- If CI is unavailable or the user asked for a local-only check, run the
closest local validation that is practical for the changed surface and say
explicitly that it is a local substitute, not the full hosted canary.
5. If the canary path passes, stop there. Summarize the result and encourage the
user to commit the candidate changes or proceed with the release flow they
requested. Do not publish tags, releases, or pushes unless the user asked.
## Failure Path
Enter this path only when the canary or local build path fails.
1. Capture the failing target, workflow job, and first actionable error.
2. Compare the currently pinned version with the target version at the relevant
upstream tag or SHA. Inspect both:
- `denoland/rusty_v8`
- upstream V8 source at the target Bazel-pinned version
3. Track build-relevant deltas rather than broad source churn:
- generated binding layout changes
- archive or asset naming changes
- GN/Bazel target changes
- custom libc++ / libc++abi / llvm-libc inputs
- sandbox or pointer-compression feature relationships
- patch hunks in `patches/` that no longer apply or no longer match upstream
4. Trace each failing delta back into Codex's build graph:
- `MODULE.bazel`
- `third_party/v8/BUILD.bazel`
- `.github/scripts/rusty_v8_bazel.py`
- `.github/workflows/v8-canary.yml`
- `.github/workflows/rusty-v8-release.yml`
5. Update only the pieces required to restore the target version's build and
artifact contract. Keep patch explanations and doc changes close to the
affected files.
6. Re-run the focused validation. If it becomes green, return to the normal
workflow and stop with a concise summary plus the remaining release step.
## Reporting
- Say whether validation came from hosted `v8-canary` or from a local
substitute.
- Distinguish "version bump complete" from "release published".
- When blocked, report the upstream delta that matters, the Codex file it hits,
and the next concrete fix to try.

View File

@@ -1,4 +0,0 @@
interface:
display_name: "Update V8 Version"
short_description: "Guide V8 bumps and release validation"
default_prompt: "Use $update-v8-version to update Codex to a new v8 release and validate the release-candidate path."

View File

@@ -1,82 +0,0 @@
FROM mcr.microsoft.com/devcontainers/base:ubuntu-24.04
ARG TZ
ARG DEBIAN_FRONTEND=noninteractive
ARG NODE_MAJOR=22
ARG RUST_TOOLCHAIN=1.92.0
# Keep this in sync with .devcontainer/codex-install/package.json and pnpm-lock.yaml.
ARG CODEX_NPM_VERSION=0.121.0
ENV TZ="$TZ"
ENV COREPACK_ENABLE_DOWNLOAD_PROMPT=0
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
# Devcontainers run as a non-root user, so enable bubblewrap's setuid mode.
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
build-essential \
curl \
git \
ca-certificates \
pkg-config \
clang \
musl-tools \
libssl-dev \
libsqlite3-dev \
just \
python3 \
python3-pip \
jq \
less \
man-db \
unzip \
ripgrep \
fzf \
fd-find \
zsh \
dnsutils \
iproute2 \
ipset \
iptables \
aggregate \
bubblewrap \
&& chmod u+s /usr/bin/bwrap \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY .devcontainer/codex-install/package.json \
.devcontainer/codex-install/pnpm-lock.yaml \
.devcontainer/codex-install/pnpm-workspace.yaml \
/opt/codex-install/
RUN curl -fsSL "https://deb.nodesource.com/setup_${NODE_MAJOR}.x" | bash - \
&& apt-get update \
&& apt-get install -y --no-install-recommends nodejs \
&& test "$(node -p "require('/opt/codex-install/package.json').dependencies['@openai/codex']")" = "${CODEX_NPM_VERSION}" \
&& cd /opt/codex-install \
&& corepack pnpm install --prod --frozen-lockfile \
&& ln -s /opt/codex-install/node_modules/.bin/codex /usr/local/bin/codex \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY .devcontainer/init-firewall.sh /usr/local/bin/init-firewall.sh
COPY .devcontainer/post_install.py /opt/post_install.py
COPY .devcontainer/post-start.sh /opt/post_start.sh
RUN chmod 500 /usr/local/bin/init-firewall.sh \
&& chmod 755 /opt/post_start.sh \
&& chmod 644 /opt/post_install.py \
&& chown vscode:vscode /opt/post_install.py
RUN install -d -m 0775 -o vscode -g vscode /commandhistory /workspace \
&& touch /commandhistory/.bash_history /commandhistory/.zsh_history \
&& chown vscode:vscode /commandhistory/.bash_history /commandhistory/.zsh_history
USER vscode
ENV PATH="/home/vscode/.cargo/bin:${PATH}"
WORKDIR /workspace
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain "${RUST_TOOLCHAIN}" \
&& rustup component add clippy rustfmt rust-src \
&& rustup target add x86_64-unknown-linux-musl aarch64-unknown-linux-musl

View File

@@ -1,38 +1,10 @@
# Containerized Development
We provide two container paths:
- `devcontainer.json` keeps the existing Codex contributor setup for working on this repository.
- `devcontainer.secure.json` adds a customer-oriented profile with stricter outbound network controls.
## Codex contributor profile
Use `devcontainer.json` when you are developing Codex itself. This is the same lightweight arm64 container that already exists in the repo.
## Secure customer profile
Use `devcontainer.secure.json` when you want a stricter runtime profile for running Codex inside a project container:
- installs the Codex CLI plus common build tools
- installs bubblewrap in setuid mode for Codex's Linux sandbox
- disables Docker's outer seccomp and AppArmor profiles so bubblewrap can construct Codex's inner sandbox
- enables firewall startup with an allowlist-driven outbound policy
- blocks IPv6 by default so the allowlist cannot be bypassed over AAAA routes
- requires `NET_ADMIN` and `NET_RAW` so the firewall can be installed at startup
This profile keeps the stricter networking isolated to the customer path instead of changing the default Codex contributor container.
Start it from the CLI with:
```bash
devcontainer up --workspace-folder . --config .devcontainer/devcontainer.secure.json
```
In VS Code, choose **Dev Containers: Open Folder in Container...** and select `.devcontainer/devcontainer.secure.json`.
We provide the following options to facilitate Codex development in a container. This is particularly useful for verifying the Linux build when working on a macOS host.
## Docker
To build the contributor image locally for x64 and then run it with the repo mounted under `/workspace`:
To build the Docker image locally for x64 and then run it with the repo mounted under `/workspace`:
```shell
CODEX_DOCKER_IMAGE_NAME=codex-linux-dev
@@ -42,8 +14,17 @@ docker run --platform=linux/amd64 --rm -it -e CARGO_TARGET_DIR=/workspace/codex-
Note that `/workspace/target` will contain the binaries built for your host platform, so we include `-e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64` in the `docker run` command so that the binaries built inside your container are written to a separate directory.
For arm64, specify `--platform=linux/arm64` instead for both `docker build` and `docker run`.
For arm64, specify `--platform=linux/amd64` instead for both `docker build` and `docker run`.
Currently, the contributor `Dockerfile` works for both x64 and arm64 Linux, though you need to run `rustup target add x86_64-unknown-linux-musl` yourself to install the musl toolchain for x64.
Currently, the `Dockerfile` works for both x64 and arm64 Linux, though you need to run `rustup target add x86_64-unknown-linux-musl` yourself to install the musl toolchain for x64.
The secure profile's capability, seccomp, and AppArmor options are required when you want Codex's bubblewrap sandbox to run inside Docker as the non-root devcontainer user. Without them, Docker's default runtime profile can block bubblewrap's namespace setup before Codex's own seccomp filter is installed. This keeps the Docker relaxation explicit in the profile that is meant to run Codex inside a project container, while the default contributor profile stays lightweight.
## VS Code
VS Code recognizes the `devcontainer.json` file and gives you the option to develop Codex in a container. Currently, `devcontainer.json` builds and runs the `arm64` flavor of the container.
From the integrated terminal in VS Code, you can build either flavor of the `arm64` build (GNU or musl):
```shell
cargo build --target aarch64-unknown-linux-musl
cargo build --target aarch64-unknown-linux-gnu
```

View File

@@ -1,13 +0,0 @@
{
"name": "codex-devcontainer-install",
"private": true,
"description": "Locked Codex CLI install boundary for the secure devcontainer.",
"dependencies": {
"@openai/codex": "0.121.0"
},
"engines": {
"node": ">=22",
"pnpm": ">=10.33.0"
},
"packageManager": "pnpm@10.33.0+sha512.10568bb4a6afb58c9eb3630da90cc9516417abebd3fabbe6739f0ae795728da1491e9db5a544c76ad8eb7570f5c4bb3d6c637b2cb41bfdcdb47fa823c8649319"
}

View File

@@ -1,85 +0,0 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
importers:
.:
dependencies:
'@openai/codex':
specifier: 0.121.0
version: 0.121.0
packages:
'@openai/codex@0.121.0':
resolution: {integrity: sha512-kCJ2NeATd4QBQRmqV04ymdN1ZU3MSwnJQDm/KzjpuzGvCuUVEn7no/T2mRyxQ2x77AACqriNOyPPoM/yufyvNg==}
engines: {node: '>=16'}
hasBin: true
'@openai/codex@0.121.0-darwin-arm64':
resolution: {integrity: sha512-ZyBqIB6Fb4I0hGb/h65Vu7ePYjHSmGiqqfm+/1djEuxDPkqjfi4wkxYxNYNY+6najyNGN4UijOSTTf19eDCrqw==}
engines: {node: '>=16'}
cpu: [arm64]
os: [darwin]
'@openai/codex@0.121.0-darwin-x64':
resolution: {integrity: sha512-1/OAtdkAZ5yPI3xqaEFlHuPziS1yCqL2gOZdswE7HTmmwpIxi6Z3FCo60JWDPluIp89z4tftdjq73/OCN0YVcw==}
engines: {node: '>=16'}
cpu: [x64]
os: [darwin]
'@openai/codex@0.121.0-linux-arm64':
resolution: {integrity: sha512-2UgMmdo237o7SCMsfb529cOSEM2HFUgN6OBkv5SBLwfNY1NO2Ex6JnUjlppEXlX6/4cXfZ5qjDghVz5j/+B9zw==}
engines: {node: '>=16'}
cpu: [arm64]
os: [linux]
'@openai/codex@0.121.0-linux-x64':
resolution: {integrity: sha512-vlpNJXIqss800J+32Vy7TUZzv31n61b45OLxmsVQGFkTNLJcjFrj9jDUC7I62eC4F16gLioilefNfv4CdJQOEw==}
engines: {node: '>=16'}
cpu: [x64]
os: [linux]
'@openai/codex@0.121.0-win32-arm64':
resolution: {integrity: sha512-m88q4f3XI5npn1t6OG0nWGHWWAjO5FgjRwxh4hdujbLO6t9CiCNfhfPZIOSsoATbrCNwLC+6S77m3cjbNToPNg==}
engines: {node: '>=16'}
cpu: [arm64]
os: [win32]
'@openai/codex@0.121.0-win32-x64':
resolution: {integrity: sha512-Fp0ecVOyM+VcBi/y4HVvRzhifO9YqRiHzhV3rhtAppC7flh22WPguLC4kmvXYAR0p3RPzbo35M2CedWnkOT+cw==}
engines: {node: '>=16'}
cpu: [x64]
os: [win32]
snapshots:
'@openai/codex@0.121.0':
optionalDependencies:
'@openai/codex-darwin-arm64': '@openai/codex@0.121.0-darwin-arm64'
'@openai/codex-darwin-x64': '@openai/codex@0.121.0-darwin-x64'
'@openai/codex-linux-arm64': '@openai/codex@0.121.0-linux-arm64'
'@openai/codex-linux-x64': '@openai/codex@0.121.0-linux-x64'
'@openai/codex-win32-arm64': '@openai/codex@0.121.0-win32-arm64'
'@openai/codex-win32-x64': '@openai/codex@0.121.0-win32-x64'
'@openai/codex@0.121.0-darwin-arm64':
optional: true
'@openai/codex@0.121.0-darwin-x64':
optional: true
'@openai/codex@0.121.0-linux-arm64':
optional: true
'@openai/codex@0.121.0-linux-x64':
optional: true
'@openai/codex@0.121.0-win32-arm64':
optional: true
'@openai/codex@0.121.0-win32-x64':
optional: true

View File

@@ -1,12 +0,0 @@
packages:
- "."
minimumReleaseAge: 10080
minimumReleaseAgeExclude: []
blockExoticSubdeps: true
strictDepBuilds: true
trustPolicy: no-downgrade
trustPolicyIgnoreAfter: 10080
trustPolicyExclude: []
allowBuilds: {}

View File

@@ -1,83 +0,0 @@
{
"$schema": "https://raw.githubusercontent.com/devcontainers/spec/main/schemas/devContainer.schema.json",
"name": "Codex (Secure)",
"build": {
"dockerfile": "Dockerfile.secure",
"context": "..",
"args": {
"TZ": "${localEnv:TZ:UTC}",
"NODE_MAJOR": "22",
"RUST_TOOLCHAIN": "1.92.0",
"CODEX_NPM_VERSION": "0.121.0"
}
},
"runArgs": [
"--cap-add=SYS_ADMIN",
"--cap-add=SYS_CHROOT",
"--cap-add=SETUID",
"--cap-add=SETGID",
"--cap-add=SYS_PTRACE",
"--security-opt=seccomp=unconfined",
"--security-opt=apparmor=unconfined",
"--cap-add=NET_ADMIN",
"--cap-add=NET_RAW"
],
"init": true,
"updateRemoteUserUID": true,
"remoteUser": "vscode",
"workspaceMount": "source=${localWorkspaceFolder},target=/workspace,type=bind,consistency=delegated",
"workspaceFolder": "/workspace",
"mounts": [
"source=codex-commandhistory-${devcontainerId},target=/commandhistory,type=volume",
"source=codex-home-${devcontainerId},target=/home/vscode/.codex,type=volume",
"source=codex-gh-${devcontainerId},target=/home/vscode/.config/gh,type=volume",
"source=codex-cargo-registry-${devcontainerId},target=/home/vscode/.cargo/registry,type=volume",
"source=codex-cargo-git-${devcontainerId},target=/home/vscode/.cargo/git,type=volume",
"source=codex-rustup-${devcontainerId},target=/home/vscode/.rustup,type=volume",
"source=${localEnv:HOME}/.gitconfig,target=/home/vscode/.gitconfig,type=bind,readonly"
],
"containerEnv": {
"RUST_BACKTRACE": "1",
"CODEX_UNSAFE_ALLOW_NO_SANDBOX": "1",
"CODEX_ENABLE_FIREWALL": "1",
"CODEX_INCLUDE_GITHUB_META_RANGES": "1",
"OPENAI_ALLOWED_DOMAINS": "api.openai.com auth.openai.com github.com api.github.com codeload.github.com raw.githubusercontent.com objects.githubusercontent.com crates.io index.crates.io static.crates.io static.rust-lang.org registry.npmjs.org pypi.org files.pythonhosted.org",
"CARGO_TARGET_DIR": "/workspace/.cache/cargo-target",
"GIT_CONFIG_GLOBAL": "/home/vscode/.gitconfig.local",
"COREPACK_ENABLE_DOWNLOAD_PROMPT": "0",
"PYTHONDONTWRITEBYTECODE": "1",
"PIP_DISABLE_PIP_VERSION_CHECK": "1"
},
"remoteEnv": {
"OPENAI_API_KEY": "${localEnv:OPENAI_API_KEY}"
},
"postCreateCommand": "python3 /opt/post_install.py",
"postStartCommand": "bash /opt/post_start.sh",
"waitFor": "postStartCommand",
"customizations": {
"vscode": {
"settings": {
"terminal.integrated.defaultProfile.linux": "zsh",
"terminal.integrated.profiles.linux": {
"bash": {
"path": "bash",
"icon": "terminal-bash"
},
"zsh": {
"path": "zsh"
}
},
"files.trimTrailingWhitespace": true,
"files.insertFinalNewline": true,
"files.trimFinalNewlines": true
},
"extensions": [
"openai.chatgpt",
"rust-lang.rust-analyzer",
"tamasfe.even-better-toml",
"vadimcn.vscode-lldb",
"ms-azuretools.vscode-docker"
]
}
}
}

View File

@@ -1,170 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
allowed_domains_file="/etc/codex/allowed_domains.txt"
include_github_meta_ranges="${CODEX_INCLUDE_GITHUB_META_RANGES:-1}"
if [ -f "$allowed_domains_file" ]; then
mapfile -t allowed_domains < <(sed '/^\s*#/d;/^\s*$/d' "$allowed_domains_file")
else
allowed_domains=("api.openai.com")
fi
if [ "${#allowed_domains[@]}" -eq 0 ]; then
echo "ERROR: No allowed domains configured"
exit 1
fi
add_ipv4_cidr_to_allowlist() {
local source="$1"
local cidr="$2"
if [[ ! "$cidr" =~ ^[0-9]{1,3}(\.[0-9]{1,3}){3}/[0-9]{1,2}$ ]]; then
echo "ERROR: Invalid ${source} CIDR range: $cidr"
exit 1
fi
ipset add allowed-domains "$cidr" -exist
}
configure_ipv6_default_deny() {
if ! command -v ip6tables >/dev/null 2>&1; then
echo "ERROR: ip6tables is required to enforce IPv6 default-deny policy"
exit 1
fi
ip6tables -F
ip6tables -X
ip6tables -t mangle -F
ip6tables -t mangle -X
ip6tables -t nat -F 2>/dev/null || true
ip6tables -t nat -X 2>/dev/null || true
ip6tables -A INPUT -i lo -j ACCEPT
ip6tables -A OUTPUT -o lo -j ACCEPT
ip6tables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
ip6tables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
ip6tables -P INPUT DROP
ip6tables -P FORWARD DROP
ip6tables -P OUTPUT DROP
echo "IPv6 firewall policy configured (default-deny)"
}
# Preserve docker-managed DNS NAT rules before clearing tables.
docker_dns_rules="$(iptables-save -t nat | grep "127\\.0\\.0\\.11" || true)"
iptables -F
iptables -X
iptables -t nat -F
iptables -t nat -X
iptables -t mangle -F
iptables -t mangle -X
ipset destroy allowed-domains 2>/dev/null || true
if [ -n "$docker_dns_rules" ]; then
echo "Restoring Docker DNS NAT rules"
iptables -t nat -N DOCKER_OUTPUT 2>/dev/null || true
iptables -t nat -N DOCKER_POSTROUTING 2>/dev/null || true
while IFS= read -r rule; do
[ -z "$rule" ] && continue
iptables -t nat $rule
done <<< "$docker_dns_rules"
fi
# Allow DNS resolution and localhost communication.
iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 53 -j ACCEPT
iptables -A INPUT -p udp --sport 53 -j ACCEPT
iptables -A INPUT -p tcp --sport 53 -j ACCEPT
iptables -A INPUT -i lo -j ACCEPT
iptables -A OUTPUT -o lo -j ACCEPT
ipset create allowed-domains hash:net
for domain in "${allowed_domains[@]}"; do
echo "Resolving $domain"
ips="$(dig +short A "$domain" | sed '/^\s*$/d')"
if [ -z "$ips" ]; then
echo "ERROR: Failed to resolve $domain"
exit 1
fi
while IFS= read -r ip; do
if [[ ! "$ip" =~ ^[0-9]{1,3}(\.[0-9]{1,3}){3}$ ]]; then
echo "ERROR: Invalid IPv4 address from DNS for $domain: $ip"
exit 1
fi
ipset add allowed-domains "$ip" -exist
done <<< "$ips"
done
if [ "$include_github_meta_ranges" = "1" ]; then
echo "Fetching GitHub meta ranges"
github_meta="$(curl -fsSL --connect-timeout 10 https://api.github.com/meta)"
if ! echo "$github_meta" | jq -e '.web and .api and .git' >/dev/null; then
echo "ERROR: GitHub meta response missing expected fields"
exit 1
fi
while IFS= read -r cidr; do
[ -z "$cidr" ] && continue
if [[ "$cidr" == *:* ]]; then
# Current policy enforces IPv4-only ipset entries.
continue
fi
add_ipv4_cidr_to_allowlist "GitHub" "$cidr"
done < <(echo "$github_meta" | jq -r '((.web // []) + (.api // []) + (.git // []))[]' | sort -u)
fi
host_ip="$(ip route | awk '/default/ {print $3; exit}')"
if [ -z "$host_ip" ]; then
echo "ERROR: Failed to detect host IP"
exit 1
fi
host_network="$(echo "$host_ip" | sed 's/\.[0-9]*$/.0\/24/')"
iptables -A INPUT -s "$host_network" -j ACCEPT
iptables -A OUTPUT -d "$host_network" -j ACCEPT
iptables -P INPUT DROP
iptables -P FORWARD DROP
iptables -P OUTPUT DROP
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -m set --match-set allowed-domains dst -j ACCEPT
# Reject rather than silently drop to make policy failures obvious.
iptables -A INPUT -j REJECT --reject-with icmp-admin-prohibited
iptables -A OUTPUT -j REJECT --reject-with icmp-admin-prohibited
iptables -A FORWARD -j REJECT --reject-with icmp-admin-prohibited
configure_ipv6_default_deny
echo "Firewall configuration complete"
if curl --connect-timeout 5 https://example.com >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - was able to reach https://example.com"
exit 1
fi
if ! curl --connect-timeout 5 https://api.openai.com >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - unable to reach https://api.openai.com"
exit 1
fi
if [ "$include_github_meta_ranges" = "1" ] && ! curl --connect-timeout 5 https://api.github.com/zen >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - unable to reach https://api.github.com"
exit 1
fi
if curl --connect-timeout 5 -6 https://example.com >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - was able to reach https://example.com over IPv6"
exit 1
fi
echo "Firewall verification passed"

View File

@@ -1,36 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
if [ "${CODEX_ENABLE_FIREWALL:-1}" != "1" ]; then
echo "[devcontainer] Firewall mode: permissive (CODEX_ENABLE_FIREWALL=${CODEX_ENABLE_FIREWALL:-unset})."
exit 0
fi
echo "[devcontainer] Firewall mode: strict"
domains_raw="${OPENAI_ALLOWED_DOMAINS:-api.openai.com}"
mapfile -t domains < <(printf '%s\n' "$domains_raw" | tr ', ' '\n\n' | sed '/^$/d' | sort -u)
if [ "${#domains[@]}" -eq 0 ]; then
echo "[devcontainer] No allowed domains configured."
exit 1
fi
tmp_file="$(mktemp)"
for domain in "${domains[@]}"; do
if [[ ! "$domain" =~ ^[a-zA-Z0-9][a-zA-Z0-9.-]*\.[a-zA-Z]{2,}$ ]]; then
echo "[devcontainer] Invalid domain in OPENAI_ALLOWED_DOMAINS: $domain"
rm -f "$tmp_file"
exit 1
fi
printf '%s\n' "$domain" >> "$tmp_file"
done
sudo install -d -m 0755 /etc/codex
sudo cp "$tmp_file" /etc/codex/allowed_domains.txt
sudo chown root:root /etc/codex/allowed_domains.txt
sudo chmod 0444 /etc/codex/allowed_domains.txt
rm -f "$tmp_file"
echo "[devcontainer] Applying firewall policy for domains: ${domains[*]}"
sudo --preserve-env=CODEX_INCLUDE_GITHUB_META_RANGES /usr/local/bin/init-firewall.sh

View File

@@ -1,113 +0,0 @@
#!/usr/bin/env python3
"""Post-install configuration for the Codex devcontainer."""
from __future__ import annotations
import os
import subprocess
import sys
from pathlib import Path
def ensure_history_files() -> None:
command_history_dir = Path("/commandhistory")
command_history_dir.mkdir(parents=True, exist_ok=True)
for filename in (".bash_history", ".zsh_history"):
(command_history_dir / filename).touch(exist_ok=True)
def fix_directory_ownership() -> None:
uid = os.getuid()
gid = os.getgid()
paths = [
Path.home() / ".codex",
Path.home() / ".config" / "gh",
Path.home() / ".cargo",
Path.home() / ".rustup",
Path("/commandhistory"),
]
for path in paths:
if not path.exists():
continue
stat_info = path.stat()
if stat_info.st_uid == uid and stat_info.st_gid == gid:
continue
try:
subprocess.run(
["sudo", "chown", "-R", f"{uid}:{gid}", str(path)],
check=True,
capture_output=True,
text=True,
)
print(f"[post_install] fixed ownership: {path}", file=sys.stderr)
except subprocess.CalledProcessError as err:
print(
f"[post_install] warning: could not fix ownership of {path}: {err.stderr.strip()}",
file=sys.stderr,
)
def setup_git_config() -> None:
home = Path.home()
host_gitconfig = home / ".gitconfig"
local_gitconfig = home / ".gitconfig.local"
gitignore_global = home / ".gitignore_global"
gitignore_global.write_text(
"""# Codex
.codex/
# Rust
/target/
# Node
node_modules/
# Python
__pycache__/
*.pyc
# Editors
.vscode/
.idea/
# macOS
.DS_Store
""",
encoding="utf-8",
)
include_line = (
f"[include]\n path = {host_gitconfig}\n\n" if host_gitconfig.exists() else ""
)
local_gitconfig.write_text(
f"""# Container-local git configuration
{include_line}[core]
excludesfile = {gitignore_global}
[merge]
conflictstyle = diff3
[diff]
colorMoved = default
""",
encoding="utf-8",
)
def main() -> None:
print("[post_install] configuring devcontainer...", file=sys.stderr)
ensure_history_files()
fix_directory_ownership()
setup_git_config()
print("[post_install] complete", file=sys.stderr)
if __name__ == "__main__":
main()

2
.gitattributes vendored
View File

@@ -1,2 +0,0 @@
codex-rs/app-server-protocol/schema/** linguist-generated
codex-rs/hooks/schema/generated/** linguist-generated

5
.github/CODEOWNERS vendored
View File

@@ -1,5 +0,0 @@
# Core crate ownership.
/codex-rs/core/ @openai/codex-core-agent-team
# Keep ownership changes reviewed by the same team.
/.github/CODEOWNERS @openai/codex-core-agent-team

View File

@@ -2,6 +2,7 @@ name: 💻 CLI Bug
description: Report an issue in the Codex CLI
labels:
- bug
- needs triage
body:
- type: markdown
attributes:
@@ -40,9 +41,9 @@ body:
id: terminal
attributes:
label: What terminal emulator and version are you using (if applicable)?
description: Also note any multiplexer in use (screen / tmux / zellij)
description: |
Also note any multiplexer in use (screen / tmux / zellij).
E.g., VS Code, Terminal.app, iTerm2, Ghostty, Windows Terminal (WSL / PowerShell)
E.g, VSCode, Terminal.app, iTerm2, Ghostty, Windows Terminal (WSL / PowerShell)
- type: textarea
id: actual
attributes:

View File

@@ -10,7 +10,7 @@ body:
Before you submit a feature:
1. Search existing issues for similar features. If you find one, 👍 it rather than opening a new one.
2. The Codex team will try to balance the varying needs of the community when prioritizing or rejecting new features. Not all features will be accepted. See [Contributing](https://github.com/openai/codex/blob/main/docs/contributing.md) for more details.
2. The Codex team will try to balance the varying needs of the community when prioritizing or rejecting new features. Not all features will be accepted. See [Contributing](https://github.com/openai/codex#contributing) for more details.
- type: input
id: variant

View File

@@ -1,6 +1,6 @@
name: 📗 Documentation Issue
description: Tell us if there is missing or incorrect documentation
labels: [documentation]
labels: [docs]
body:
- type: markdown
attributes:
@@ -24,4 +24,4 @@ body:
- type: textarea
attributes:
label: Where did you find it?
description: If possible, please provide the URL(s) where you found this issue.
description: If possible, please provide the URL(s) where you found this issue.

View File

@@ -7,21 +7,16 @@ inputs:
artifacts-dir:
description: Absolute path to the directory containing built binaries to sign.
required: true
binaries:
description: Space-delimited binary basenames to sign.
default: "codex codex-responses-api-proxy"
runs:
using: composite
steps:
- name: Install cosign
uses: sigstore/cosign-installer@dc72c7d5c4d10cd6bcb8cf6e3fd625a9e5e537da # v3.7.0
uses: sigstore/cosign-installer@v3.7.0
- name: Cosign Linux artifacts
shell: bash
env:
ARTIFACTS_DIR: ${{ inputs.artifacts-dir }}
BINARIES: ${{ inputs.binaries }}
COSIGN_EXPERIMENTAL: "1"
COSIGN_YES: "true"
COSIGN_OIDC_CLIENT_ID: "sigstore"
@@ -29,13 +24,13 @@ runs:
run: |
set -euo pipefail
dest="$ARTIFACTS_DIR"
dest="${{ inputs.artifacts-dir }}"
if [[ ! -d "$dest" ]]; then
echo "Destination $dest does not exist"
exit 1
fi
for binary in ${BINARIES}; do
for binary in codex codex-responses-api-proxy; do
artifact="${dest}/${binary}"
if [[ ! -f "$artifact" ]]; then
echo "Binary $artifact not found"

View File

@@ -4,9 +4,6 @@ inputs:
target:
description: Rust compilation target triple (e.g. aarch64-apple-darwin).
required: true
binaries:
description: Space-delimited binary basenames to sign and notarize.
default: "codex codex-responses-api-proxy"
sign-binaries:
description: Whether to sign and notarize the macOS binaries.
required: false
@@ -120,9 +117,6 @@ runs:
- name: Sign macOS binaries
if: ${{ inputs.sign-binaries == 'true' }}
shell: bash
env:
TARGET: ${{ inputs.target }}
BINARIES: ${{ inputs.binaries }}
run: |
set -euo pipefail
@@ -136,19 +130,15 @@ runs:
keychain_args+=(--keychain "${APPLE_CODESIGN_KEYCHAIN}")
fi
entitlements_path="$GITHUB_ACTION_PATH/codex.entitlements.plist"
for binary in ${BINARIES}; do
path="codex-rs/target/${TARGET}/release/${binary}"
codesign --force --options runtime --timestamp --entitlements "$entitlements_path" --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$path"
for binary in codex codex-responses-api-proxy; do
path="codex-rs/target/${{ inputs.target }}/release/${binary}"
codesign --force --options runtime --timestamp --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$path"
done
- name: Notarize macOS binaries
if: ${{ inputs.sign-binaries == 'true' }}
shell: bash
env:
TARGET: ${{ inputs.target }}
BINARIES: ${{ inputs.binaries }}
APPLE_NOTARIZATION_KEY_P8: ${{ inputs.apple-notarization-key-p8 }}
APPLE_NOTARIZATION_KEY_ID: ${{ inputs.apple-notarization-key-id }}
APPLE_NOTARIZATION_ISSUER_ID: ${{ inputs.apple-notarization-issuer-id }}
@@ -173,7 +163,7 @@ runs:
notarize_binary() {
local binary="$1"
local source_path="codex-rs/target/${TARGET}/release/${binary}"
local source_path="codex-rs/target/${{ inputs.target }}/release/${binary}"
local archive_path="${RUNNER_TEMP}/${binary}.zip"
if [[ ! -f "$source_path" ]]; then
@@ -187,15 +177,13 @@ runs:
notarize_submission "$binary" "$archive_path" "$notary_key_path"
}
for binary in ${BINARIES}; do
notarize_binary "${binary}"
done
notarize_binary "codex"
notarize_binary "codex-responses-api-proxy"
- name: Sign and notarize macOS dmg
if: ${{ inputs.sign-dmg == 'true' }}
shell: bash
env:
TARGET: ${{ inputs.target }}
APPLE_NOTARIZATION_KEY_P8: ${{ inputs.apple-notarization-key-p8 }}
APPLE_NOTARIZATION_KEY_ID: ${{ inputs.apple-notarization-key-id }}
APPLE_NOTARIZATION_ISSUER_ID: ${{ inputs.apple-notarization-issuer-id }}
@@ -218,8 +206,7 @@ runs:
source "$GITHUB_ACTION_PATH/notary_helpers.sh"
dmg_name="codex-${TARGET}.dmg"
dmg_path="codex-rs/target/${TARGET}/release/${dmg_name}"
dmg_path="codex-rs/target/${{ inputs.target }}/release/codex-${{ inputs.target }}.dmg"
if [[ ! -f "$dmg_path" ]]; then
echo "dmg $dmg_path not found"
@@ -232,7 +219,7 @@ runs:
fi
codesign --force --timestamp --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$dmg_path"
notarize_submission "$dmg_name" "$dmg_path" "$notary_key_path"
notarize_submission "codex-${{ inputs.target }}.dmg" "$dmg_path" "$notary_key_path"
xcrun stapler staple "$dmg_path"
- name: Remove signing keychain

View File

@@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>com.apple.security.cs.allow-jit</key>
<true/>
</dict>
</plist>

View File

@@ -1,64 +0,0 @@
name: prepare-bazel-ci
description: Prepare a Bazel CI job with shared setup, repository cache restore, and execution logs.
inputs:
target:
description: Target triple used for setup and cache namespacing.
required: true
cache-scope:
description: Logical namespace used to keep concurrent Bazel jobs from reserving the same repository cache key.
required: true
install-test-prereqs:
description: Install DotSlash for Bazel-backed test jobs.
required: false
default: "false"
outputs:
repository-cache-path:
description: Filesystem path used for the Bazel repository cache.
value: ${{ steps.setup_bazel.outputs.repository-cache-path }}
repository-cache-key:
description: Primary actions/cache key for the Bazel repository cache.
value: ${{ steps.cache_bazel_repository_key.outputs.repository-cache-key }}
repository-cache-hit:
description: Whether the Bazel repository cache restore found an exact key match.
value: ${{ steps.cache_bazel_repository_restore.outputs.cache-hit }}
runs:
using: composite
steps:
- name: Set up Bazel CI
id: setup_bazel
uses: ./.github/actions/setup-bazel-ci
with:
target: ${{ inputs.target }}
install-test-prereqs: ${{ inputs.install-test-prereqs }}
- name: Compute bazel repository cache key
id: cache_bazel_repository_key
shell: bash
env:
CACHE_SCOPE: ${{ inputs.cache-scope }}
TARGET: ${{ inputs.target }}
CACHE_HASH: ${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }}
run: |
echo "repository-cache-key=bazel-cache-${CACHE_SCOPE}-${TARGET}-${CACHE_HASH}" >> "${GITHUB_OUTPUT}"
echo "repository-cache-restore-key=bazel-cache-${CACHE_SCOPE}-${TARGET}-" >> "${GITHUB_OUTPUT}"
# Restore the Bazel repository cache explicitly so external dependencies
# do not need to be re-downloaded on every CI run. Keep restore failures
# non-fatal so transient cache-service errors degrade to a cold build
# instead of failing the job.
- name: Restore bazel repository cache
id: cache_bazel_repository_restore
continue-on-error: true
uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: ${{ steps.setup_bazel.outputs.repository-cache-path }}
key: ${{ steps.cache_bazel_repository_key.outputs.repository-cache-key }}
restore-keys: |
${{ steps.cache_bazel_repository_key.outputs.repository-cache-restore-key }}
- name: Set up Bazel execution logs
shell: bash
run: |
mkdir -p "${RUNNER_TEMP}/bazel-execution-logs"
echo "CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR=${RUNNER_TEMP}/bazel-execution-logs" >> "${GITHUB_ENV}"

View File

@@ -1,54 +0,0 @@
name: Run argument comment lint
description: Run argument-comment-lint on codex-rs via Bazel.
inputs:
target:
description: Runner target passed to setup-bazel-ci.
required: true
buildbuddy-api-key:
description: BuildBuddy API key used by Bazel CI.
required: false
default: ""
runs:
using: composite
steps:
- uses: ./.github/actions/setup-bazel-ci
with:
target: ${{ inputs.target }}
install-test-prereqs: true
- name: Install Linux sandbox build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
sudo DEBIAN_FRONTEND=noninteractive apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ runner.os != 'Windows' }}
env:
BUILDBUDDY_API_KEY: ${{ inputs.buildbuddy-api-key }}
shell: bash
run: |
bazel_targets="$(./tools/argument-comment-lint/list-bazel-targets.sh)"
./.github/scripts/run-bazel-ci.sh \
-- \
build \
--config=argument-comment-lint \
--keep_going \
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
-- \
${bazel_targets}
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ runner.os == 'Windows' }}
env:
BUILDBUDDY_API_KEY: ${{ inputs.buildbuddy-api-key }}
shell: bash
run: |
./.github/scripts/run-argument-comment-lint-bazel.sh \
--config=argument-comment-lint \
--platforms=//:local_windows \
--keep_going \
--build_metadata=COMMIT_SHA=${GITHUB_SHA}

View File

@@ -1,127 +0,0 @@
name: setup-bazel-ci
description: Prepare a Bazel CI runner with shared caches and optional test prerequisites.
inputs:
target:
description: Target triple used for cache namespacing.
required: true
install-test-prereqs:
description: Install DotSlash for Bazel-backed test jobs.
required: false
default: "false"
outputs:
repository-cache-path:
description: Filesystem path used for the Bazel repository cache.
value: ${{ steps.configure_bazel_repository_cache.outputs.repository-cache-path }}
runs:
using: composite
steps:
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash
if: inputs.install-test-prereqs == 'true'
uses: facebook/install-dotslash@1e4e7b3e07eaca387acb98f1d4720e0bee8dbb6a # v2
- name: Make DotSlash available in PATH (Unix)
if: inputs.install-test-prereqs == 'true' && runner.os != 'Windows'
shell: bash
run: cp "$(which dotslash)" /usr/local/bin
- name: Make DotSlash available in PATH (Windows)
if: inputs.install-test-prereqs == 'true' && runner.os == 'Windows'
shell: pwsh
run: Copy-Item (Get-Command dotslash).Source -Destination "$env:LOCALAPPDATA\Microsoft\WindowsApps\dotslash.exe"
- name: Set up Bazel
uses: bazel-contrib/setup-bazel@c5acdfb288317d0b5c0bbd7a396a3dc868bb0f86 # 0.19.0
- name: Configure Bazel repository cache
id: configure_bazel_repository_cache
shell: pwsh
run: |
# Keep the repository cache under HOME on all runners. Windows `D:\a`
# cache paths match `.bazelrc`, but `actions/cache/restore` currently
# returns HTTP 400 for that path in the Windows clippy job.
$repositoryCachePath = Join-Path $HOME '.cache/bazel-repo-cache'
"repository-cache-path=$repositoryCachePath" | Out-File -FilePath $env:GITHUB_OUTPUT -Encoding utf8 -Append
"BAZEL_REPOSITORY_CACHE=$repositoryCachePath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
- name: Configure Bazel output root (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
# Use the shortest available drive to reduce argv/path length issues,
# but avoid the drive root because some Windows test launchers mis-handle
# MANIFEST paths there.
$hasDDrive = Test-Path 'D:\'
$bazelOutputUserRoot = if ($hasDDrive) { 'D:\b' } else { 'C:\b' }
$repoContentsCache = Join-Path $env:RUNNER_TEMP "bazel-repo-contents-cache-$env:GITHUB_RUN_ID-$env:GITHUB_JOB"
"BAZEL_OUTPUT_USER_ROOT=$bazelOutputUserRoot" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
"BAZEL_REPO_CONTENTS_CACHE=$repoContentsCache" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
- name: Expose MSVC SDK environment (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
# Bazel exec-side Rust build scripts do not reliably inherit the MSVC developer
# shell on GitHub-hosted Windows runners, so discover the latest VS install and
# ask `VsDevCmd.bat` to materialize the x64/x64 compiler + SDK environment.
$vswhere = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe"
if (-not (Test-Path $vswhere)) {
throw "vswhere.exe not found"
}
$installPath = & $vswhere -latest -products * -requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64 -property installationPath 2>$null
if (-not $installPath) {
throw "Could not locate a Visual Studio installation with VC tools"
}
$vsDevCmd = Join-Path $installPath 'Common7\Tools\VsDevCmd.bat'
if (-not (Test-Path $vsDevCmd)) {
throw "VsDevCmd.bat not found at $vsDevCmd"
}
# Keep the export surface explicit: these are the paths and SDK roots that the
# MSVC toolchain probes need later when Bazel runs Windows exec-platform build
# scripts such as `aws-lc-sys`.
$varsToExport = @(
'INCLUDE',
'LIB',
'LIBPATH',
'PATH',
'UCRTVersion',
'UniversalCRTSdkDir',
'VCINSTALLDIR',
'VCToolsInstallDir',
'WindowsLibPath',
'WindowsSdkBinPath',
'WindowsSdkDir',
'WindowsSDKLibVersion',
'WindowsSDKVersion'
)
# `VsDevCmd.bat` is a batch file, so invoke it under `cmd.exe`, suppress its
# banner, then dump the resulting environment with `set`. Re-export only the
# approved keys into `GITHUB_ENV` so later steps inherit the same MSVC context.
$envLines = & cmd.exe /c ('"{0}" -no_logo -arch=x64 -host_arch=x64 >nul && set' -f $vsDevCmd)
foreach ($line in $envLines) {
if ($line -notmatch '^(.*?)=(.*)$') {
continue
}
$name = $matches[1]
$value = $matches[2]
if ($varsToExport -contains $name) {
"$name=$value" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
}
}
- name: Compute cache-stable Windows Bazel PATH
if: runner.os == 'Windows'
shell: pwsh
run: ./.github/scripts/compute-bazel-windows-path.ps1
- name: Enable Git long paths (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: git config --global core.longpaths true

View File

@@ -1,47 +0,0 @@
name: setup-rusty-v8-musl
description: Download and verify musl rusty_v8 artifacts for Cargo builds.
inputs:
target:
description: Rust musl target triple.
required: true
runs:
using: composite
steps:
- name: Configure musl rusty_v8 artifact overrides and verify checksums
shell: bash
env:
TARGET: ${{ inputs.target }}
run: |
set -euo pipefail
case "${TARGET}" in
x86_64-unknown-linux-musl|aarch64-unknown-linux-musl)
;;
*)
echo "Unsupported musl rusty_v8 target: ${TARGET}" >&2
exit 1
;;
esac
version="$(python3 "${GITHUB_WORKSPACE}/.github/scripts/rusty_v8_bazel.py" resolved-v8-crate-version)"
release_tag="rusty-v8-v${version}"
base_url="https://github.com/openai/codex/releases/download/${release_tag}"
binding_dir="${RUNNER_TEMP}/rusty_v8"
archive_path="${binding_dir}/librusty_v8_release_${TARGET}.a.gz"
binding_path="${binding_dir}/src_binding_release_${TARGET}.rs"
checksums_path="${binding_dir}/rusty_v8_release_${TARGET}.sha256"
mkdir -p "${binding_dir}"
curl -fsSL "${base_url}/librusty_v8_release_${TARGET}.a.gz" -o "${archive_path}"
curl -fsSL "${base_url}/src_binding_release_${TARGET}.rs" -o "${binding_path}"
curl -fsSL "${base_url}/rusty_v8_release_${TARGET}.sha256" -o "${checksums_path}"
if [[ "$(wc -l < "${checksums_path}")" -ne 2 ]]; then
echo "Expected exactly two checksums for ${TARGET} in ${checksums_path}" >&2
exit 1
fi
(cd "${binding_dir}" && sha256sum -c "${checksums_path}")
echo "RUSTY_V8_ARCHIVE=${archive_path}" >> "${GITHUB_ENV}"
echo "RUSTY_V8_SRC_BINDING_PATH=${binding_path}" >> "${GITHUB_ENV}"

View File

@@ -4,9 +4,6 @@ inputs:
target:
description: Target triple for the artifacts to sign.
required: true
binaries:
description: Space-delimited binary basenames to sign.
default: "codex codex-responses-api-proxy codex-windows-sandbox-setup codex-command-runner"
client-id:
description: Azure Trusted Signing client ID.
required: true
@@ -30,31 +27,14 @@ runs:
using: composite
steps:
- name: Azure login for Trusted Signing (OIDC)
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
uses: azure/login@v2
with:
client-id: ${{ inputs.client-id }}
tenant-id: ${{ inputs.tenant-id }}
subscription-id: ${{ inputs.subscription-id }}
- name: Prepare file list
id: prepare
shell: bash
env:
TARGET: ${{ inputs.target }}
BINARIES: ${{ inputs.binaries }}
run: |
set -euo pipefail
{
echo "files<<EOF"
for binary in ${BINARIES}; do
echo "${GITHUB_WORKSPACE}/codex-rs/target/${TARGET}/release/${binary}.exe"
done
echo "EOF"
} >> "$GITHUB_OUTPUT"
- name: Sign Windows binaries with Azure Trusted Signing
uses: azure/trusted-signing-action@1d365fec12862c4aa68fcac418143d73f0cea293 # v0.5.11
uses: azure/trusted-signing-action@v0
with:
endpoint: ${{ inputs.endpoint }}
trusted-signing-account-name: ${{ inputs.account-name }}
@@ -70,4 +50,8 @@ runs:
exclude-azure-developer-cli-credential: true
exclude-interactive-browser-credential: true
cache-dependencies: false
files: ${{ steps.prepare.outputs.files }}
files: |
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex.exe
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-responses-api-proxy.exe
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-windows-sandbox-setup.exe
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-command-runner.exe

View File

@@ -7,4 +7,3 @@ codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json
codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json
codex-rs/tui/tests/fixtures/oss-story.jsonl
codex-rs/tui_app_server/tests/fixtures/oss-story.jsonl
codex-rs/tui/src/app.rs

View File

@@ -6,37 +6,25 @@ updates:
directory: .github/actions/codex
schedule:
interval: weekly
cooldown:
default-days: 7
- package-ecosystem: cargo
directories:
- codex-rs
- codex-rs/*
schedule:
interval: weekly
cooldown:
default-days: 7
- package-ecosystem: devcontainers
directory: /
schedule:
interval: weekly
cooldown:
default-days: 7
- package-ecosystem: docker
directory: codex-cli
schedule:
interval: weekly
cooldown:
default-days: 7
- package-ecosystem: github-actions
directory: /
schedule:
interval: weekly
cooldown:
default-days: 7
- package-ecosystem: rust-toolchain
directory: codex-rs
schedule:
interval: weekly
cooldown:
default-days: 7

View File

@@ -1,24 +0,0 @@
{
"outputs": {
"argument-comment-lint": {
"platforms": {
"macos-aarch64": {
"regex": "^argument-comment-lint-aarch64-apple-darwin\\.tar\\.gz$",
"path": "argument-comment-lint/bin/argument-comment-lint"
},
"linux-x86_64": {
"regex": "^argument-comment-lint-x86_64-unknown-linux-gnu\\.tar\\.gz$",
"path": "argument-comment-lint/bin/argument-comment-lint"
},
"linux-aarch64": {
"regex": "^argument-comment-lint-aarch64-unknown-linux-gnu\\.tar\\.gz$",
"path": "argument-comment-lint/bin/argument-comment-lint"
},
"windows-x86_64": {
"regex": "^argument-comment-lint-x86_64-pc-windows-msvc\\.zip$",
"path": "argument-comment-lint/bin/argument-comment-lint.exe"
}
}
}
}
}

View File

@@ -11,11 +11,11 @@
"path": "codex"
},
"linux-x86_64": {
"regex": "^codex-x86_64-unknown-linux-musl-bundle\\.tar\\.zst$",
"regex": "^codex-x86_64-unknown-linux-musl\\.zst$",
"path": "codex"
},
"linux-aarch64": {
"regex": "^codex-aarch64-unknown-linux-musl-bundle\\.tar\\.zst$",
"regex": "^codex-aarch64-unknown-linux-musl\\.zst$",
"path": "codex"
},
"windows-x86_64": {
@@ -28,34 +28,6 @@
}
}
},
"codex-app-server": {
"platforms": {
"macos-aarch64": {
"regex": "^codex-app-server-aarch64-apple-darwin\\.zst$",
"path": "codex-app-server"
},
"macos-x86_64": {
"regex": "^codex-app-server-x86_64-apple-darwin\\.zst$",
"path": "codex-app-server"
},
"linux-x86_64": {
"regex": "^codex-app-server-x86_64-unknown-linux-musl\\.zst$",
"path": "codex-app-server"
},
"linux-aarch64": {
"regex": "^codex-app-server-aarch64-unknown-linux-musl\\.zst$",
"path": "codex-app-server"
},
"windows-x86_64": {
"regex": "^codex-app-server-x86_64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex-app-server.exe"
},
"windows-aarch64": {
"regex": "^codex-app-server-aarch64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex-app-server.exe"
}
}
},
"codex-responses-api-proxy": {
"platforms": {
"macos-aarch64": {
@@ -84,18 +56,6 @@
}
}
},
"bwrap": {
"platforms": {
"linux-x86_64": {
"regex": "^bwrap-x86_64-unknown-linux-musl\\.zst$",
"path": "bwrap"
},
"linux-aarch64": {
"regex": "^bwrap-aarch64-unknown-linux-musl\\.zst$",
"path": "bwrap"
}
}
},
"codex-command-runner": {
"platforms": {
"windows-x86_64": {

View File

@@ -1,23 +0,0 @@
{
"outputs": {
"codex-zsh": {
"platforms": {
"macos-aarch64": {
"name": "codex-zsh-aarch64-apple-darwin.tar.gz",
"format": "tar.gz",
"path": "codex-zsh/bin/zsh"
},
"linux-x86_64": {
"name": "codex-zsh-x86_64-unknown-linux-musl.tar.gz",
"format": "tar.gz",
"path": "codex-zsh/bin/zsh"
},
"linux-aarch64": {
"name": "codex-zsh-aarch64-unknown-linux-musl.tar.gz",
"format": "tar.gz",
"path": "codex-zsh/bin/zsh"
}
}
}
}
}

View File

@@ -1,6 +1,6 @@
# External (non-OpenAI) Pull Request Requirements
External code contributions are by invitation only. Please read the dedicated "Contributing" markdown file for details:
Before opening this Pull Request, please read the dedicated "Contributing" markdown file or your PR may be closed:
https://github.com/openai/codex/blob/main/docs/contributing.md
If your PR conforms to our contribution guidelines, replace this text with a detailed and high quality description of your changes.

View File

@@ -1,61 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
if [[ "$#" -ne 1 ]]; then
echo "usage: $0 <archive-path>" >&2
exit 1
fi
archive_path="$1"
workspace="${GITHUB_WORKSPACE:?missing GITHUB_WORKSPACE}"
zsh_commit="${ZSH_COMMIT:?missing ZSH_COMMIT}"
zsh_patch="${ZSH_PATCH:?missing ZSH_PATCH}"
temp_root="${RUNNER_TEMP:-/tmp}"
work_root="$(mktemp -d "${temp_root%/}/codex-zsh-release.XXXXXX")"
trap 'rm -rf "$work_root"' EXIT
source_root="${work_root}/zsh"
package_root="${work_root}/codex-zsh"
wrapper_path="${work_root}/exec-wrapper"
stdout_path="${work_root}/stdout.txt"
wrapper_log_path="${work_root}/wrapper.log"
git clone https://git.code.sf.net/p/zsh/code "$source_root"
cd "$source_root"
git checkout "$zsh_commit"
git apply "${workspace}/${zsh_patch}"
./Util/preconfig
./configure
cores="$(command -v nproc >/dev/null 2>&1 && nproc || getconf _NPROCESSORS_ONLN)"
make -j"${cores}"
cat > "$wrapper_path" <<'EOF'
#!/usr/bin/env bash
set -euo pipefail
: "${CODEX_WRAPPER_LOG:?missing CODEX_WRAPPER_LOG}"
printf '%s\n' "$@" > "$CODEX_WRAPPER_LOG"
file="$1"
shift
if [[ "$#" -eq 0 ]]; then
exec "$file"
fi
arg0="$1"
shift
exec -a "$arg0" "$file" "$@"
EOF
chmod +x "$wrapper_path"
CODEX_WRAPPER_LOG="$wrapper_log_path" \
EXEC_WRAPPER="$wrapper_path" \
"${source_root}/Src/zsh" -fc '/bin/echo smoke-zsh' > "$stdout_path"
grep -Fx "smoke-zsh" "$stdout_path"
grep -Fx "/bin/echo" "$wrapper_log_path"
mkdir -p "$package_root/bin" "$(dirname "${workspace}/${archive_path}")"
cp "${source_root}/Src/zsh" "$package_root/bin/zsh"
chmod +x "$package_root/bin/zsh"
(cd "$work_root" && tar -czf "${workspace}/${archive_path}" codex-zsh)

View File

@@ -1,113 +0,0 @@
<#
BuildBuddy cache keys include the action and test environment, so Bazel should
not inherit the full hosted-runner PATH on Windows. That PATH includes volatile
tool entries, such as Maven, that can change independently of this repo and
cause avoidable cache misses.
This script derives a smaller, cache-stable PATH that keeps the Windows
toolchain entries Bazel-backed CI tasks need: MSVC and Windows SDK paths,
MinGW runtime DLL paths for gnullvm-built tests, Git, PowerShell, Node, Python,
DotSlash, and the standard Windows system directories.
`setup-bazel-ci` runs this after exporting the MSVC environment, and the script
publishes the result via `GITHUB_ENV` as `CODEX_BAZEL_WINDOWS_PATH` so later
steps can pass that explicit PATH to Bazel.
#>
$stablePathEntries = New-Object System.Collections.Generic.List[string]
$seenEntries = [System.Collections.Generic.HashSet[string]]::new([System.StringComparer]::OrdinalIgnoreCase)
$windowsAppsPath = if ([string]::IsNullOrWhiteSpace($env:LOCALAPPDATA)) {
$null
} else {
"$($env:LOCALAPPDATA)\Microsoft\WindowsApps"
}
$windowsDir = if ($env:WINDIR) {
$env:WINDIR
} elseif ($env:SystemRoot) {
$env:SystemRoot
} else {
$null
}
function Add-StablePathEntry {
param([string]$PathEntry)
if ([string]::IsNullOrWhiteSpace($PathEntry)) {
return
}
if ($seenEntries.Add($PathEntry)) {
[void]$stablePathEntries.Add($PathEntry)
}
}
foreach ($pathEntry in ($env:PATH -split ';')) {
if ([string]::IsNullOrWhiteSpace($pathEntry)) {
continue
}
if (
$pathEntry -like '*Microsoft Visual Studio*' -or
$pathEntry -like '*Windows Kits*' -or
$pathEntry -like '*Microsoft SDKs*' -or
$pathEntry -eq 'C:\mingw64\bin' -or
$pathEntry -like 'C:\msys64\*\bin' -or
$pathEntry -like 'C:\Program Files\Git\*' -or
$pathEntry -like 'C:\Program Files\PowerShell\*' -or
$pathEntry -like 'C:\hostedtoolcache\windows\node\*' -or
$pathEntry -like 'C:\hostedtoolcache\windows\Python\*' -or
$pathEntry -eq 'D:\a\_temp\install-dotslash\bin' -or
($windowsDir -and ($pathEntry -eq $windowsDir -or $pathEntry -like "${windowsDir}\*"))
) {
Add-StablePathEntry $pathEntry
}
}
$gitCommand = Get-Command git -ErrorAction SilentlyContinue
if ($gitCommand) {
Add-StablePathEntry (Split-Path $gitCommand.Source -Parent)
}
$nodeCommand = Get-Command node -ErrorAction SilentlyContinue
if ($nodeCommand) {
Add-StablePathEntry (Split-Path $nodeCommand.Source -Parent)
}
$python3Command = Get-Command python3 -ErrorAction SilentlyContinue
if ($python3Command) {
Add-StablePathEntry (Split-Path $python3Command.Source -Parent)
}
$pythonCommand = Get-Command python -ErrorAction SilentlyContinue
if ($pythonCommand) {
Add-StablePathEntry (Split-Path $pythonCommand.Source -Parent)
}
$pwshCommand = Get-Command pwsh -ErrorAction SilentlyContinue
if ($pwshCommand) {
Add-StablePathEntry (Split-Path $pwshCommand.Source -Parent)
}
foreach ($mingwPath in @('C:\mingw64\bin', 'C:\msys64\mingw64\bin', 'C:\msys64\ucrt64\bin')) {
if (Test-Path $mingwPath) {
Add-StablePathEntry $mingwPath
}
}
if ($windowsAppsPath) {
Add-StablePathEntry $windowsAppsPath
}
if ($stablePathEntries.Count -eq 0) {
throw 'Failed to derive cache-stable Windows PATH.'
}
if ([string]::IsNullOrWhiteSpace($env:GITHUB_ENV)) {
throw 'GITHUB_ENV must be set.'
}
$stablePath = $stablePathEntries -join ';'
Write-Host 'Derived CODEX_BAZEL_WINDOWS_PATH entries:'
foreach ($pathEntry in $stablePathEntries) {
Write-Host " $pathEntry"
}
"CODEX_BAZEL_WINDOWS_PATH=$stablePath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append

View File

@@ -1,80 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
bazel_lint_args=("$@")
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
has_host_platform_override=0
for arg in "${bazel_lint_args[@]}"; do
if [[ "$arg" == --host_platform=* ]]; then
has_host_platform_override=1
break
fi
done
if [[ $has_host_platform_override -eq 0 ]]; then
# The nightly Windows lint toolchain is registered with an MSVC exec
# platform even though the lint target platform stays on `windows-gnullvm`.
# Override the host platform here so the exec-side helper binaries actually
# match the registered toolchain set.
bazel_lint_args+=("--host_platform=//:local_windows_msvc")
fi
# Native Windows lint runs need exec-side Rust helper binaries and proc-macros
# to use rust-lld instead of the C++ linker path. The default `none`
# preference resolves to `cc` when a cc_toolchain is present, which currently
# routes these exec actions through clang++ with an argument shape it cannot
# consume.
bazel_lint_args+=("--@rules_rust//rust/settings:toolchain_linker_preference=rust")
# Some Rust top-level targets are still intentionally incompatible with the
# local Windows MSVC exec platform. Skip those explicit targets so the native
# lint aspect can run across the compatible crate graph instead of failing the
# whole build after analysis.
bazel_lint_args+=("--skip_incompatible_explicit_targets")
fi
read_query_labels() {
local query="$1"
local query_stdout
local query_stderr
query_stdout="$(mktemp)"
query_stderr="$(mktemp)"
if ! ./.github/scripts/run-bazel-query-ci.sh \
--keep_going \
--output=label \
-- "$query" >"$query_stdout" 2>"$query_stderr"; then
cat "$query_stderr" >&2
rm -f "$query_stdout" "$query_stderr"
exit 1
fi
cat "$query_stdout"
rm -f "$query_stdout" "$query_stderr"
}
final_build_targets=(//codex-rs/...)
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
# Bazel's local Windows platform currently lacks a default test toolchain for
# `rust_test`, so target the concrete Rust crate rules directly. The lint
# aspect still walks their crate graph, which preserves incremental reuse for
# non-test code while avoiding non-Rust wrapper targets such as platform_data.
final_build_targets=()
while IFS= read -r label; do
[[ -n "$label" ]] || continue
final_build_targets+=("$label")
done < <(read_query_labels 'kind("rust_(library|binary|proc_macro) rule", //codex-rs/...)')
if [[ ${#final_build_targets[@]} -eq 0 ]]; then
echo "Failed to discover Windows Bazel lint targets." >&2
exit 1
fi
fi
./.github/scripts/run-bazel-ci.sh \
-- \
build \
"${bazel_lint_args[@]}" \
-- \
"${final_build_targets[@]}"

View File

@@ -1,453 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
print_failed_bazel_test_logs=0
print_failed_bazel_action_summary=0
remote_download_toplevel=0
windows_msvc_host_platform=0
windows_cross_compile=0
while [[ $# -gt 0 ]]; do
case "$1" in
--print-failed-test-logs)
print_failed_bazel_test_logs=1
shift
;;
--print-failed-action-summary)
print_failed_bazel_action_summary=1
shift
;;
--remote-download-toplevel)
remote_download_toplevel=1
shift
;;
--windows-msvc-host-platform)
windows_msvc_host_platform=1
shift
;;
--windows-cross-compile)
windows_cross_compile=1
shift
;;
--)
shift
break
;;
*)
echo "Unknown option: $1" >&2
exit 1
;;
esac
done
if [[ $# -eq 0 ]]; then
echo "Usage: $0 [--print-failed-test-logs] [--print-failed-action-summary] [--remote-download-toplevel] [--windows-msvc-host-platform] [--windows-cross-compile] -- <bazel args> -- <targets>" >&2
exit 1
fi
bazel_startup_args=()
if [[ -n "${BAZEL_OUTPUT_USER_ROOT:-}" ]]; then
bazel_startup_args+=("--output_user_root=${BAZEL_OUTPUT_USER_ROOT}")
fi
run_bazel() {
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
MSYS2_ARG_CONV_EXCL='*' bazel "$@"
return
fi
bazel "$@"
}
ci_config=ci-linux
case "${RUNNER_OS:-}" in
macOS)
ci_config=ci-macos
;;
Windows)
if [[ $windows_cross_compile -eq 1 ]]; then
ci_config=ci-windows-cross
else
ci_config=ci-windows
fi
;;
esac
print_bazel_test_log_tails() {
local console_log="$1"
local testlogs_dir
local -a bazel_info_cmd=(bazel)
local -a bazel_info_args=(info)
if (( ${#bazel_startup_args[@]} > 0 )); then
bazel_info_cmd+=("${bazel_startup_args[@]}")
fi
# `bazel info` needs the same CI config as the failed test invocation so
# platform-specific output roots match. On Windows, omitting `ci-windows`
# would point at `local_windows-fastbuild` even when the test ran with the
# MSVC host platform under `local_windows_msvc-fastbuild`.
if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then
bazel_info_args+=(
"--config=${ci_config}"
"--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}"
)
fi
# Only pass flags that affect Bazel's output-root selection or repository
# lookup. Test/build-only flags such as execution logs or remote download
# mode can make `bazel info` fail, which would hide the real test log path.
for arg in "${post_config_bazel_args[@]}"; do
case "$arg" in
--host_platform=* | --repo_contents_cache=* | --repository_cache=*)
bazel_info_args+=("$arg")
;;
esac
done
testlogs_dir="$(run_bazel "${bazel_info_cmd[@]:1}" \
--noexperimental_remote_repo_contents_cache \
"${bazel_info_args[@]}" \
bazel-testlogs 2>/dev/null || echo bazel-testlogs)"
local failed_targets=()
while IFS= read -r target; do
failed_targets+=("$target")
done < <(
grep -E '^(FAIL: //|ERROR: .* Testing //)' "$console_log" \
| sed -E 's#^FAIL: (//[^ ]+).*#\1#; s#^ERROR: .* Testing (//[^ ]+) failed:.*#\1#' \
| sort -u
)
if [[ ${#failed_targets[@]} -eq 0 ]]; then
echo "No failed Bazel test targets were found in console output."
return
fi
for target in "${failed_targets[@]}"; do
local rel_path="${target#//}"
rel_path="${rel_path/://}"
local test_log="${testlogs_dir}/${rel_path}/test.log"
local reported_test_log
reported_test_log="$(grep -F "FAIL: ${target} " "$console_log" | sed -nE 's#.* \(see (.*[\\/]test\.log)\).*#\1#p' | head -n 1 || true)"
if [[ -n "$reported_test_log" ]]; then
reported_test_log="${reported_test_log//\\//}"
test_log="$reported_test_log"
fi
echo "::group::Bazel test log tail for ${target}"
if [[ -f "$test_log" ]]; then
tail -n 200 "$test_log"
else
echo "Missing test log: $test_log"
fi
echo "::endgroup::"
done
}
print_bazel_action_failure_summary() {
local console_log="$1"
local escaped_summary
local summary
summary="$(
awk '
function clean(line) {
gsub(sprintf("%c", 27) "\\[[0-9;]*m", "", line)
sub(/^.*\t[^\t]*\t[0-9TZ:._-]+ /, "", line)
return line
}
function is_diagnostic(line) {
return line ~ /^(error(\[[^]]+\])?:|warning:|note:|help:)/ ||
line ~ /^[[:space:]]+-->/ ||
line ~ /^[[:space:]]*[0-9]+[[:space:]]+\|/ ||
line ~ /^[[:space:]]*\|/ ||
line ~ /^[[:space:]]+= (note|help):/ ||
line ~ /^[[:space:]]*\^[[:space:]^~-]*$/ ||
line ~ /^For more information/ ||
line ~ /^error: aborting/
}
{
line = clean($0)
}
line ~ /^ERROR: .* failed:/ {
if (printed) {
print ""
}
print line
in_failure = 1
seen_diagnostic = 0
printed = 1
next
}
in_failure && is_diagnostic(line) {
print line
seen_diagnostic = 1
next
}
in_failure && seen_diagnostic && line == "" {
print ""
next
}
in_failure && seen_diagnostic {
in_failure = 0
seen_diagnostic = 0
next
}
' "$console_log"
)"
if [[ -z "$summary" ]]; then
summary="$(grep -E '^ERROR: |^FAILED: ' "$console_log" | tail -n 50 || true)"
fi
if [[ -z "$summary" ]]; then
echo "No Bazel action failures were found in the captured console output."
return
fi
if [[ "${GITHUB_ACTIONS:-}" == "true" ]]; then
escaped_summary="$(
printf '%s' "$summary" \
| awk 'BEGIN { ORS = "" } {
gsub(/%/, "%25")
gsub(/\r/, "%0D")
print sep $0
sep = "%0A"
}'
)"
echo "::error title=Bazel failed action diagnostics::${escaped_summary}"
fi
echo
echo "Bazel failed action diagnostics:"
echo "--------------------------------"
printf '%s\n' "$summary"
echo "--------------------------------"
}
bazel_args=()
bazel_targets=()
found_target_separator=0
for arg in "$@"; do
if [[ "$arg" == "--" && $found_target_separator -eq 0 ]]; then
found_target_separator=1
continue
fi
if [[ $found_target_separator -eq 0 ]]; then
bazel_args+=("$arg")
else
bazel_targets+=("$arg")
fi
done
if [[ ${#bazel_args[@]} -eq 0 || ${#bazel_targets[@]} -eq 0 ]]; then
echo "Expected Bazel args and targets separated by --" >&2
exit 1
fi
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
# Fork PRs do not receive the BuildBuddy secret needed for the remote
# cross-compile config. Preserve the previous local Windows build shape.
windows_msvc_host_platform=1
fi
post_config_bazel_args=()
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_msvc_host_platform -eq 1 ]]; then
has_host_platform_override=0
for arg in "${bazel_args[@]}"; do
if [[ "$arg" == --host_platform=* ]]; then
has_host_platform_override=1
break
fi
done
if [[ $has_host_platform_override -eq 0 ]]; then
# Use the MSVC Windows platform for jobs that need helper binaries like
# Rust test wrappers and V8 generators to resolve a compatible toolchain.
# Callers that need a different Windows target platform should pass an
# explicit `--platforms=...` flag.
post_config_bazel_args+=("--host_platform=//:local_windows_msvc")
fi
fi
if [[ $remote_download_toplevel -eq 1 ]]; then
# Override the CI config's remote_download_minimal setting when callers need
# the built artifact to exist on disk after the command completes.
post_config_bazel_args+=(--remote_download_toplevel)
fi
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
# `--enable_platform_specific_config` expands `common:windows` on Windows
# hosts after ordinary rc configs, which can override `ci-windows-cross`'s
# RBE host platform. Repeat the host platform on the command line so V8 and
# other genrules execute on Linux RBE workers instead of Git Bash locally.
#
# Bazel also derives the default genrule shell from the client host. Without
# an explicit shell executable, remote Linux actions can be asked to run
# `C:\Program Files\Git\usr\bin\bash.exe`.
post_config_bazel_args+=(--host_platform=//:rbe --shell_executable=/bin/bash)
fi
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
# The Windows cross-compile config depends on remote execution. Fork PRs do
# not receive the BuildBuddy secret, so fall back to the existing local build
# shape and keep its lower concurrency cap.
post_config_bazel_args+=(--jobs=8)
fi
if [[ -n "${BAZEL_REPO_CONTENTS_CACHE:-}" ]]; then
# Windows self-hosted runners can run multiple Bazel jobs concurrently. Give
# each job its own repo contents cache so they do not fight over the shared
# path configured in `ci-windows`.
post_config_bazel_args+=("--repo_contents_cache=${BAZEL_REPO_CONTENTS_CACHE}")
fi
if [[ -n "${BAZEL_REPOSITORY_CACHE:-}" ]]; then
post_config_bazel_args+=("--repository_cache=${BAZEL_REPOSITORY_CACHE}")
fi
if [[ -n "${CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR:-}" ]]; then
post_config_bazel_args+=(
"--execution_log_compact_file=${CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR}/execution-log-${bazel_args[0]}-${GITHUB_JOB:-local}-$$.zst"
)
fi
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
pass_windows_build_env=1
if [[ $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
# Remote build actions execute on Linux RBE workers. Passing the Windows
# runner's build environment there makes Bazel genrules try to execute
# C:\Program Files\Git\usr\bin\bash.exe on Linux.
pass_windows_build_env=0
fi
if [[ $pass_windows_build_env -eq 1 ]]; then
windows_action_env_vars=(
INCLUDE
LIB
LIBPATH
UCRTVersion
UniversalCRTSdkDir
VCINSTALLDIR
VCToolsInstallDir
WindowsLibPath
WindowsSdkBinPath
WindowsSdkDir
WindowsSDKLibVersion
WindowsSDKVersion
)
for env_var in "${windows_action_env_vars[@]}"; do
if [[ -n "${!env_var:-}" ]]; then
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
fi
done
fi
if [[ -z "${CODEX_BAZEL_WINDOWS_PATH:-}" ]]; then
echo "CODEX_BAZEL_WINDOWS_PATH must be set for Windows Bazel CI." >&2
exit 1
fi
if [[ $pass_windows_build_env -eq 1 ]]; then
post_config_bazel_args+=(
"--action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
"--host_action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
)
elif [[ $windows_cross_compile -eq 1 ]]; then
# Remote build actions run on Linux RBE workers. Give their shell snippets
# a Linux PATH while preserving CODEX_BAZEL_WINDOWS_PATH below for local
# Windows test execution.
post_config_bazel_args+=(
"--action_env=PATH=/usr/bin:/bin"
"--host_action_env=PATH=/usr/bin:/bin"
)
fi
post_config_bazel_args+=("--test_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}")
fi
bazel_console_log="$(mktemp)"
trap 'rm -f "$bazel_console_log"' EXIT
bazel_cmd=(bazel)
if (( ${#bazel_startup_args[@]} > 0 )); then
bazel_cmd+=("${bazel_startup_args[@]}")
fi
if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then
echo "BuildBuddy API key is available; using remote Bazel configuration."
# Work around Bazel 9 remote repo contents cache / overlay materialization failures
# seen in CI (for example "is not a symlink" or permission errors while
# materializing external repos such as rules_perl). We still use BuildBuddy for
# remote execution/cache; this only disables the startup-level repo contents cache.
bazel_run_args=(
"${bazel_args[@]}"
"--config=${ci_config}"
"--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}"
)
if (( ${#post_config_bazel_args[@]} > 0 )); then
bazel_run_args+=("${post_config_bazel_args[@]}")
fi
set +e
run_bazel "${bazel_cmd[@]:1}" \
--noexperimental_remote_repo_contents_cache \
"${bazel_run_args[@]}" \
-- \
"${bazel_targets[@]}" \
2>&1 | tee "$bazel_console_log"
bazel_status=${PIPESTATUS[0]}
set -e
else
echo "BuildBuddy API key is not available; using local Bazel configuration."
# Keep fork/community PRs on Bazel but disable remote services that are
# configured in .bazelrc and require auth.
#
# Flag docs:
# - Command-line reference: https://bazel.build/reference/command-line-reference
# - Remote caching overview: https://bazel.build/remote/caching
# - Remote execution overview: https://bazel.build/remote/rbe
# - Build Event Protocol overview: https://bazel.build/remote/bep
#
# --noexperimental_remote_repo_contents_cache:
# disable remote repo contents cache enabled in .bazelrc startup options.
# https://bazel.build/reference/command-line-reference#startup_options-flag--experimental_remote_repo_contents_cache
# --remote_cache= and --remote_executor=:
# clear remote cache/execution endpoints configured in .bazelrc.
# https://bazel.build/reference/command-line-reference#common_options-flag--remote_cache
# https://bazel.build/reference/command-line-reference#common_options-flag--remote_executor
bazel_run_args=(
"${bazel_args[@]}"
--remote_cache=
--remote_executor=
)
if (( ${#post_config_bazel_args[@]} > 0 )); then
bazel_run_args+=("${post_config_bazel_args[@]}")
fi
set +e
run_bazel "${bazel_cmd[@]:1}" \
--noexperimental_remote_repo_contents_cache \
"${bazel_run_args[@]}" \
-- \
"${bazel_targets[@]}" \
2>&1 | tee "$bazel_console_log"
bazel_status=${PIPESTATUS[0]}
set -e
fi
if [[ ${bazel_status:-0} -ne 0 ]]; then
if [[ $print_failed_bazel_action_summary -eq 1 ]]; then
print_bazel_action_failure_summary "$bazel_console_log"
fi
if [[ $print_failed_bazel_test_logs -eq 1 ]]; then
print_bazel_test_log_tails "$bazel_console_log"
fi
exit "$bazel_status"
fi

View File

@@ -1,84 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Run Bazel queries with the same CI startup settings as the main build/test
# invocation so target-discovery queries can reuse the same Bazel server.
query_args=()
windows_cross_compile=0
while [[ $# -gt 0 ]]; do
case "$1" in
--windows-cross-compile)
windows_cross_compile=1
shift
;;
--)
shift
break
;;
*)
query_args+=("$1")
shift
;;
esac
done
if [[ $# -ne 1 ]]; then
echo "Usage: $0 [--windows-cross-compile] [<bazel query args>...] -- <query expression>" >&2
exit 1
fi
query_expression="$1"
ci_config=ci-linux
case "${RUNNER_OS:-}" in
macOS)
ci_config=ci-macos
;;
Windows)
if [[ $windows_cross_compile -eq 1 ]]; then
ci_config=ci-windows-cross
else
ci_config=ci-windows
fi
;;
esac
bazel_startup_args=()
if [[ -n "${BAZEL_OUTPUT_USER_ROOT:-}" ]]; then
bazel_startup_args+=("--output_user_root=${BAZEL_OUTPUT_USER_ROOT}")
fi
run_bazel() {
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
MSYS2_ARG_CONV_EXCL='*' bazel "$@"
return
fi
bazel "$@"
}
bazel_query_args=(--noexperimental_remote_repo_contents_cache query)
if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then
bazel_query_args+=(
"--config=${ci_config}"
"--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}"
)
fi
if [[ -n "${BAZEL_REPO_CONTENTS_CACHE:-}" ]]; then
bazel_query_args+=("--repo_contents_cache=${BAZEL_REPO_CONTENTS_CACHE}")
fi
if [[ -n "${BAZEL_REPOSITORY_CACHE:-}" ]]; then
bazel_query_args+=("--repository_cache=${BAZEL_REPOSITORY_CACHE}")
fi
bazel_query_args+=("${query_args[@]}" "$query_expression")
if (( ${#bazel_startup_args[@]} > 0 )); then
run_bazel "${bazel_startup_args[@]}" "${bazel_query_args[@]}"
else
run_bazel "${bazel_query_args[@]}"
fi

View File

@@ -1,412 +0,0 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import gzip
import hashlib
import os
import re
import shutil
import subprocess
import sys
import tomllib
from pathlib import Path
from rusty_v8_module_bazel import (
RustyV8ChecksumError,
check_module_bazel,
rusty_v8_http_file_versions,
update_module_bazel,
)
ROOT = Path(__file__).resolve().parents[2]
MODULE_BAZEL = ROOT / "MODULE.bazel"
RUSTY_V8_CHECKSUMS_DIR = ROOT / "third_party" / "v8"
RELEASE_ARTIFACT_PROFILE = "release"
SANDBOX_ARTIFACT_PROFILE = "ptrcomp_sandbox_release"
ARTIFACT_BAZEL_CONFIGS = ["rusty-v8-upstream-libcxx"]
def bazel_remote_args() -> list[str]:
buildbuddy_api_key = os.environ.get("BUILDBUDDY_API_KEY")
if not buildbuddy_api_key:
return []
return [f"--remote_header=x-buildbuddy-api-key={buildbuddy_api_key}"]
def bazel_execroot() -> Path:
result = subprocess.run(
["bazel", "info", "execution_root"],
cwd=ROOT,
check=True,
capture_output=True,
text=True,
)
return Path(result.stdout.strip())
def bazel_output_base() -> Path:
result = subprocess.run(
["bazel", "info", "output_base"],
cwd=ROOT,
check=True,
capture_output=True,
text=True,
)
return Path(result.stdout.strip())
def bazel_output_path(path: str) -> Path:
if path.startswith("external/"):
return bazel_output_base() / path
return bazel_execroot() / path
def bazel_output_files(
platform: str,
labels: list[str],
compilation_mode: str = "fastbuild",
bazel_configs: list[str] | None = None,
) -> list[Path]:
expression = "set(" + " ".join(labels) + ")"
bazel_configs = bazel_configs or []
result = subprocess.run(
[
"bazel",
"cquery",
"-c",
compilation_mode,
f"--platforms=@llvm//platforms:{platform}",
*[f"--config={config}" for config in bazel_configs],
*bazel_remote_args(),
"--output=files",
expression,
],
cwd=ROOT,
check=True,
capture_output=True,
text=True,
)
return [bazel_output_path(line.strip()) for line in result.stdout.splitlines() if line.strip()]
def bazel_build(
platform: str,
labels: list[str],
compilation_mode: str = "fastbuild",
bazel_configs: list[str] | None = None,
download_toplevel: bool = False,
) -> None:
bazel_configs = bazel_configs or []
download_args = ["--remote_download_toplevel"] if download_toplevel else []
subprocess.run(
[
"bazel",
"build",
"-c",
compilation_mode,
f"--platforms=@llvm//platforms:{platform}",
*[f"--config={config}" for config in bazel_configs],
*bazel_remote_args(),
*download_args,
*labels,
],
cwd=ROOT,
check=True,
)
def ensure_bazel_output_files(
platform: str,
labels: list[str],
compilation_mode: str = "fastbuild",
bazel_configs: list[str] | None = None,
) -> list[Path]:
# Bazel output paths can be reused across config flips, so existence alone
# does not prove the files match the requested flags.
bazel_build(
platform,
labels,
compilation_mode,
bazel_configs,
download_toplevel=True,
)
outputs = bazel_output_files(platform, labels, compilation_mode, bazel_configs)
missing = [str(path) for path in outputs if not path.exists()]
if missing:
raise SystemExit(f"missing built outputs for {labels}: {missing}")
return outputs
def artifact_bazel_configs(bazel_configs: list[str] | None = None) -> list[str]:
configured = list(ARTIFACT_BAZEL_CONFIGS)
for config in bazel_configs or []:
if config not in configured:
configured.append(config)
return configured
def release_pair_label(target: str, sandbox: bool = False) -> str:
target_suffix = target.replace("-", "_")
pair_kind = "sandbox_release_pair" if sandbox else "release_pair"
return f"//third_party/v8:rusty_v8_{pair_kind}_{target_suffix}"
def resolved_v8_crate_version() -> str:
cargo_lock = tomllib.loads((ROOT / "codex-rs" / "Cargo.lock").read_text())
versions = sorted(
{
package["version"]
for package in cargo_lock["package"]
if package["name"] == "v8"
}
)
if len(versions) == 1:
return versions[0]
if len(versions) > 1:
raise SystemExit(f"expected exactly one resolved v8 version, found: {versions}")
module_bazel = (ROOT / "MODULE.bazel").read_text()
matches = sorted(
set(
re.findall(
r'https://static\.crates\.io/crates/v8/v8-([0-9]+\.[0-9]+\.[0-9]+)\.crate',
module_bazel,
)
)
)
if len(matches) != 1:
raise SystemExit(
"expected exactly one pinned v8 crate version in MODULE.bazel, "
f"found: {matches}"
)
return matches[0]
def rusty_v8_checksum_manifest_path(version: str) -> Path:
return RUSTY_V8_CHECKSUMS_DIR / f"rusty_v8_{version.replace('.', '_')}.sha256"
def command_version(version: str | None) -> str:
if version is not None:
return version
manifest_versions = rusty_v8_http_file_versions(MODULE_BAZEL.read_text())
if len(manifest_versions) == 1:
return manifest_versions[0]
if len(manifest_versions) > 1:
raise SystemExit(
"expected at most one rusty_v8 http_file version in MODULE.bazel, "
f"found: {manifest_versions}; pass --version explicitly"
)
return resolved_v8_crate_version()
def command_manifest_path(manifest: Path | None, version: str) -> Path:
if manifest is None:
return rusty_v8_checksum_manifest_path(version)
if manifest.is_absolute():
return manifest
return ROOT / manifest
def staged_archive_name(target: str, source_path: Path, artifact_profile: str) -> str:
if target.endswith("-pc-windows-msvc"):
return f"rusty_v8_{artifact_profile}_{target}.lib.gz"
return f"librusty_v8_{artifact_profile}_{target}.a.gz"
def staged_binding_name(target: str, artifact_profile: str) -> str:
return f"src_binding_{artifact_profile}_{target}.rs"
def staged_checksums_name(target: str, artifact_profile: str) -> str:
return f"rusty_v8_{artifact_profile}_{target}.sha256"
def stage_artifacts(
target: str,
lib_path: Path,
binding_path: Path,
output_dir: Path,
sandbox: bool,
) -> None:
missing_paths = [str(path) for path in [lib_path, binding_path] if not path.exists()]
if missing_paths:
raise SystemExit(f"missing release outputs for {target}: {missing_paths}")
output_dir.mkdir(parents=True, exist_ok=True)
artifact_profile = SANDBOX_ARTIFACT_PROFILE if sandbox else RELEASE_ARTIFACT_PROFILE
staged_library = output_dir / staged_archive_name(target, lib_path, artifact_profile)
staged_binding = output_dir / staged_binding_name(target, artifact_profile)
with lib_path.open("rb") as src, staged_library.open("wb") as dst:
with gzip.GzipFile(
filename="",
mode="wb",
fileobj=dst,
compresslevel=6,
mtime=0,
) as gz:
shutil.copyfileobj(src, gz)
shutil.copyfile(binding_path, staged_binding)
staged_checksums = output_dir / staged_checksums_name(target, artifact_profile)
with staged_checksums.open("w", encoding="utf-8") as checksums:
for path in [staged_library, staged_binding]:
digest = hashlib.sha256()
with path.open("rb") as artifact:
for chunk in iter(lambda: artifact.read(1024 * 1024), b""):
digest.update(chunk)
checksums.write(f"{digest.hexdigest()} {path.name}\n")
print(staged_library)
print(staged_binding)
print(staged_checksums)
def upstream_release_pair_paths(source_root: Path, target: str) -> tuple[Path, Path]:
lib_name = "rusty_v8.lib" if target.endswith("-pc-windows-msvc") else "librusty_v8.a"
gn_out = source_root / "target" / target / "release" / "gn_out"
return gn_out / "obj" / lib_name, gn_out / "src_binding.rs"
def stage_upstream_release_pair(
source_root: Path,
target: str,
output_dir: Path,
sandbox: bool = False,
) -> None:
lib_path, binding_path = upstream_release_pair_paths(source_root, target)
stage_artifacts(target, lib_path, binding_path, output_dir, sandbox)
def stage_release_pair(
platform: str,
target: str,
output_dir: Path,
compilation_mode: str = "fastbuild",
bazel_configs: list[str] | None = None,
sandbox: bool = False,
) -> None:
bazel_configs = artifact_bazel_configs(bazel_configs)
outputs = ensure_bazel_output_files(
platform,
[release_pair_label(target, sandbox)],
compilation_mode,
bazel_configs,
)
try:
lib_path = next(path for path in outputs if path.suffix in {".a", ".lib"})
except StopIteration as exc:
raise SystemExit(f"missing static library output for {target}") from exc
try:
binding_path = next(path for path in outputs if path.suffix == ".rs")
except StopIteration as exc:
raise SystemExit(f"missing Rust binding output for {target}") from exc
stage_artifacts(target, lib_path, binding_path, output_dir, sandbox)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="command", required=True)
stage_release_pair_parser = subparsers.add_parser("stage-release-pair")
stage_release_pair_parser.add_argument("--platform", required=True)
stage_release_pair_parser.add_argument("--target", required=True)
stage_release_pair_parser.add_argument("--output-dir", required=True)
stage_release_pair_parser.add_argument("--sandbox", action="store_true")
stage_release_pair_parser.add_argument(
"--bazel-config",
action="append",
default=[],
dest="bazel_configs",
)
stage_release_pair_parser.add_argument(
"--compilation-mode",
default="fastbuild",
choices=["fastbuild", "opt", "dbg"],
)
stage_upstream_release_pair_parser = subparsers.add_parser(
"stage-upstream-release-pair"
)
stage_upstream_release_pair_parser.add_argument("--source-root", type=Path, required=True)
stage_upstream_release_pair_parser.add_argument("--target", required=True)
stage_upstream_release_pair_parser.add_argument("--output-dir", required=True)
stage_upstream_release_pair_parser.add_argument("--sandbox", action="store_true")
subparsers.add_parser("resolved-v8-crate-version")
check_module_bazel_parser = subparsers.add_parser("check-module-bazel")
check_module_bazel_parser.add_argument("--version")
check_module_bazel_parser.add_argument("--manifest", type=Path)
check_module_bazel_parser.add_argument(
"--module-bazel",
type=Path,
default=MODULE_BAZEL,
)
update_module_bazel_parser = subparsers.add_parser("update-module-bazel")
update_module_bazel_parser.add_argument("--version")
update_module_bazel_parser.add_argument("--manifest", type=Path)
update_module_bazel_parser.add_argument(
"--module-bazel",
type=Path,
default=MODULE_BAZEL,
)
return parser.parse_args()
def main() -> int:
args = parse_args()
if args.command == "stage-release-pair":
stage_release_pair(
platform=args.platform,
target=args.target,
output_dir=Path(args.output_dir),
compilation_mode=args.compilation_mode,
bazel_configs=args.bazel_configs,
sandbox=args.sandbox,
)
return 0
if args.command == "stage-upstream-release-pair":
stage_upstream_release_pair(
source_root=args.source_root,
target=args.target,
output_dir=Path(args.output_dir),
sandbox=args.sandbox,
)
return 0
if args.command == "resolved-v8-crate-version":
print(resolved_v8_crate_version())
return 0
if args.command == "check-module-bazel":
version = command_version(args.version)
manifest_path = command_manifest_path(args.manifest, version)
try:
check_module_bazel(args.module_bazel, manifest_path, version)
except RustyV8ChecksumError as exc:
raise SystemExit(str(exc)) from exc
return 0
if args.command == "update-module-bazel":
version = command_version(args.version)
manifest_path = command_manifest_path(args.manifest, version)
try:
update_module_bazel(args.module_bazel, manifest_path, version)
except RustyV8ChecksumError as exc:
raise SystemExit(str(exc)) from exc
return 0
raise SystemExit(f"unsupported command: {args.command}")
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,243 +0,0 @@
#!/usr/bin/env python3
from __future__ import annotations
import re
from dataclasses import dataclass
from pathlib import Path
SHA256_RE = re.compile(r"[0-9a-f]{64}")
HTTP_FILE_BLOCK_RE = re.compile(r"(?ms)^http_file\(\n.*?^\)\n?")
HTTP_FILE_VERSION_RE = re.compile(r"^rusty_v8_([0-9]+)_([0-9]+)_([0-9]+)_")
class RustyV8ChecksumError(ValueError):
pass
@dataclass(frozen=True)
class RustyV8HttpFile:
start: int
end: int
block: str
name: str
downloaded_file_path: str
sha256: str | None
def parse_checksum_manifest(path: Path) -> dict[str, str]:
try:
lines = path.read_text(encoding="utf-8").splitlines()
except FileNotFoundError as exc:
raise RustyV8ChecksumError(f"missing checksum manifest: {path}") from exc
checksums: dict[str, str] = {}
for line_number, line in enumerate(lines, 1):
if not line.strip():
continue
parts = line.split()
if len(parts) != 2:
raise RustyV8ChecksumError(
f"{path}:{line_number}: expected '<sha256> <filename>'"
)
checksum, filename = parts
if not SHA256_RE.fullmatch(checksum):
raise RustyV8ChecksumError(
f"{path}:{line_number}: invalid SHA-256 digest for {filename}"
)
if not filename or filename in {".", ".."} or "/" in filename:
raise RustyV8ChecksumError(
f"{path}:{line_number}: expected a bare artifact filename"
)
if filename in checksums:
raise RustyV8ChecksumError(
f"{path}:{line_number}: duplicate checksum for {filename}"
)
checksums[filename] = checksum
if not checksums:
raise RustyV8ChecksumError(f"empty checksum manifest: {path}")
return checksums
def string_field(block: str, field: str) -> str | None:
# Matches one-line string fields inside http_file blocks, e.g. `sha256 = "...",`.
match = re.search(rf'^\s*{re.escape(field)}\s*=\s*"([^"]+)",\s*$', block, re.M)
if match:
return match.group(1)
return None
def rusty_v8_http_files(module_bazel: str, version: str) -> list[RustyV8HttpFile]:
version_slug = version.replace(".", "_")
name_prefix = f"rusty_v8_{version_slug}_"
entries = []
for match in HTTP_FILE_BLOCK_RE.finditer(module_bazel):
block = match.group(0)
name = string_field(block, "name")
if not name or not name.startswith(name_prefix):
continue
downloaded_file_path = string_field(block, "downloaded_file_path")
if not downloaded_file_path:
raise RustyV8ChecksumError(
f"MODULE.bazel {name} is missing downloaded_file_path"
)
entries.append(
RustyV8HttpFile(
start=match.start(),
end=match.end(),
block=block,
name=name,
downloaded_file_path=downloaded_file_path,
sha256=string_field(block, "sha256"),
)
)
return entries
def rusty_v8_http_file_versions(module_bazel: str) -> list[str]:
versions = set()
for match in HTTP_FILE_BLOCK_RE.finditer(module_bazel):
name = string_field(match.group(0), "name")
if not name:
continue
version_match = HTTP_FILE_VERSION_RE.match(name)
if version_match:
versions.add(".".join(version_match.groups()))
return sorted(versions)
def module_entry_set_errors(
entries: list[RustyV8HttpFile],
checksums: dict[str, str],
version: str,
) -> list[str]:
errors = []
if not entries:
errors.append(f"MODULE.bazel has no rusty_v8 http_file entries for {version}")
return errors
module_files: dict[str, RustyV8HttpFile] = {}
duplicate_files = set()
for entry in entries:
if entry.downloaded_file_path in module_files:
duplicate_files.add(entry.downloaded_file_path)
module_files[entry.downloaded_file_path] = entry
for filename in sorted(duplicate_files):
errors.append(f"MODULE.bazel has duplicate http_file entries for {filename}")
for filename in sorted(set(module_files) - set(checksums)):
entry = module_files[filename]
errors.append(f"MODULE.bazel {entry.name} has no checksum in the manifest")
for filename in sorted(set(checksums) - set(module_files)):
errors.append(f"manifest has {filename}, but MODULE.bazel has no http_file")
return errors
def module_checksum_errors(
entries: list[RustyV8HttpFile],
checksums: dict[str, str],
) -> list[str]:
errors = []
for entry in entries:
expected = checksums.get(entry.downloaded_file_path)
if expected is None:
continue
if entry.sha256 is None:
errors.append(f"MODULE.bazel {entry.name} is missing sha256")
elif entry.sha256 != expected:
errors.append(
f"MODULE.bazel {entry.name} has sha256 {entry.sha256}, "
f"expected {expected}"
)
return errors
def raise_checksum_errors(message: str, errors: list[str]) -> None:
if errors:
formatted_errors = "\n".join(f"- {error}" for error in errors)
raise RustyV8ChecksumError(f"{message}:\n{formatted_errors}")
def check_module_bazel_text(
module_bazel: str,
checksums: dict[str, str],
version: str,
) -> None:
entries = rusty_v8_http_files(module_bazel, version)
errors = [
*module_entry_set_errors(entries, checksums, version),
*module_checksum_errors(entries, checksums),
]
raise_checksum_errors("rusty_v8 MODULE.bazel checksum drift", errors)
def block_with_sha256(block: str, checksum: str) -> str:
sha256_line_re = re.compile(r'(?m)^(\s*)sha256\s*=\s*"[0-9a-f]+",\s*$')
if sha256_line_re.search(block):
return sha256_line_re.sub(
lambda match: f'{match.group(1)}sha256 = "{checksum}",',
block,
count=1,
)
downloaded_file_path_match = re.search(
r'(?m)^(\s*)downloaded_file_path\s*=\s*"[^"]+",\n',
block,
)
if not downloaded_file_path_match:
raise RustyV8ChecksumError("http_file block is missing downloaded_file_path")
insert_at = downloaded_file_path_match.end()
indent = downloaded_file_path_match.group(1)
return f'{block[:insert_at]}{indent}sha256 = "{checksum}",\n{block[insert_at:]}'
def update_module_bazel_text(
module_bazel: str,
checksums: dict[str, str],
version: str,
) -> str:
entries = rusty_v8_http_files(module_bazel, version)
errors = module_entry_set_errors(entries, checksums, version)
raise_checksum_errors("cannot update rusty_v8 MODULE.bazel checksums", errors)
updated = []
previous_end = 0
for entry in entries:
updated.append(module_bazel[previous_end : entry.start])
updated.append(
block_with_sha256(entry.block, checksums[entry.downloaded_file_path])
)
previous_end = entry.end
updated.append(module_bazel[previous_end:])
return "".join(updated)
def check_module_bazel(
module_bazel_path: Path,
manifest_path: Path,
version: str,
) -> None:
checksums = parse_checksum_manifest(manifest_path)
module_bazel = module_bazel_path.read_text(encoding="utf-8")
check_module_bazel_text(module_bazel, checksums, version)
print(f"{module_bazel_path} rusty_v8 {version} checksums match {manifest_path}")
def update_module_bazel(
module_bazel_path: Path,
manifest_path: Path,
version: str,
) -> None:
checksums = parse_checksum_manifest(manifest_path)
module_bazel = module_bazel_path.read_text(encoding="utf-8")
updated_module_bazel = update_module_bazel_text(module_bazel, checksums, version)
if updated_module_bazel == module_bazel:
print(f"{module_bazel_path} rusty_v8 {version} checksums are already current")
return
module_bazel_path.write_text(updated_module_bazel, encoding="utf-8")
print(f"updated {module_bazel_path} rusty_v8 {version} checksums")

View File

@@ -1,413 +0,0 @@
#!/usr/bin/env python3
from __future__ import annotations
import textwrap
import unittest
from os import environ
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest.mock import patch
import rusty_v8_bazel
import rusty_v8_module_bazel
class RustyV8BazelTest(unittest.TestCase):
def test_consumer_selectors_track_resolved_crate_version(self) -> None:
build_bazel = (
rusty_v8_bazel.ROOT / "third_party" / "v8" / "BUILD.bazel"
).read_text()
version_suffix = rusty_v8_bazel.resolved_v8_crate_version().replace(".", "_")
for selector in [
"aarch64_apple_darwin_bazel",
"aarch64_pc_windows_gnullvm",
"aarch64_pc_windows_msvc",
"aarch64_unknown_linux_gnu_bazel",
"aarch64_unknown_linux_musl_release_base",
"x86_64_apple_darwin_bazel",
"x86_64_pc_windows_gnullvm",
"x86_64_pc_windows_msvc",
"x86_64_unknown_linux_gnu_bazel",
"x86_64_unknown_linux_musl_release",
]:
self.assertIn(
f":v8_{version_suffix}_{selector}",
build_bazel,
)
for selector in [
"aarch64_apple_darwin",
"aarch64_pc_windows_gnullvm",
"aarch64_pc_windows_msvc",
"aarch64_unknown_linux_gnu",
"aarch64_unknown_linux_musl",
"x86_64_apple_darwin",
"x86_64_pc_windows_gnullvm",
"x86_64_pc_windows_msvc",
"x86_64_unknown_linux_gnu",
"x86_64_unknown_linux_musl",
]:
self.assertIn(
f":src_binding_release_{selector}_{version_suffix}_release",
build_bazel,
)
def test_command_version_tracks_remaining_http_file_assets(self) -> None:
with TemporaryDirectory() as temp_dir:
module_bazel = Path(temp_dir) / "MODULE.bazel"
module_bazel.write_text(
textwrap.dedent(
"""\
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
urls = ["https://example.test/archive.gz"],
)
"""
)
)
with patch.object(rusty_v8_bazel, "MODULE_BAZEL", module_bazel):
self.assertEqual("146.4.0", rusty_v8_bazel.command_version(None))
def test_artifact_bazel_configs_always_enable_upstream_libcxx(self) -> None:
self.assertEqual(
["rusty-v8-upstream-libcxx"],
rusty_v8_bazel.artifact_bazel_configs(),
)
self.assertEqual(
["rusty-v8-upstream-libcxx", "v8-release-compat"],
rusty_v8_bazel.artifact_bazel_configs(["v8-release-compat"]),
)
self.assertEqual(
["rusty-v8-upstream-libcxx", "v8-release-compat"],
rusty_v8_bazel.artifact_bazel_configs(
["rusty-v8-upstream-libcxx", "v8-release-compat"]
),
)
def test_bazel_remote_args_include_buildbuddy_header_when_present(self) -> None:
with patch.dict(environ, {"BUILDBUDDY_API_KEY": "token"}, clear=False):
self.assertEqual(
["--remote_header=x-buildbuddy-api-key=token"],
rusty_v8_bazel.bazel_remote_args(),
)
with patch.dict(environ, {}, clear=True):
self.assertEqual([], rusty_v8_bazel.bazel_remote_args())
def test_release_pair_labels_and_staged_names_distinguish_sandbox_artifacts(self) -> None:
self.assertEqual(
"//third_party/v8:rusty_v8_release_pair_x86_64_unknown_linux_musl",
rusty_v8_bazel.release_pair_label("x86_64-unknown-linux-musl"),
)
self.assertEqual(
"//third_party/v8:rusty_v8_sandbox_release_pair_x86_64_unknown_linux_musl",
rusty_v8_bazel.release_pair_label("x86_64-unknown-linux-musl", sandbox=True),
)
self.assertEqual(
"//third_party/v8:rusty_v8_sandbox_release_pair_x86_64_apple_darwin",
rusty_v8_bazel.release_pair_label("x86_64-apple-darwin", sandbox=True),
)
self.assertEqual(
"librusty_v8_release_x86_64-unknown-linux-musl.a.gz",
rusty_v8_bazel.staged_archive_name(
"x86_64-unknown-linux-musl",
Path("libv8.a"),
rusty_v8_bazel.RELEASE_ARTIFACT_PROFILE,
),
)
self.assertEqual(
"rusty_v8_ptrcomp_sandbox_release_x86_64-pc-windows-msvc.lib.gz",
rusty_v8_bazel.staged_archive_name(
"x86_64-pc-windows-msvc",
Path("v8.a"),
rusty_v8_bazel.SANDBOX_ARTIFACT_PROFILE,
),
)
self.assertEqual(
"src_binding_ptrcomp_sandbox_release_x86_64-unknown-linux-musl.rs",
rusty_v8_bazel.staged_binding_name(
"x86_64-unknown-linux-musl",
rusty_v8_bazel.SANDBOX_ARTIFACT_PROFILE,
),
)
self.assertEqual(
"rusty_v8_ptrcomp_sandbox_release_x86_64-unknown-linux-musl.sha256",
rusty_v8_bazel.staged_checksums_name(
"x86_64-unknown-linux-musl",
rusty_v8_bazel.SANDBOX_ARTIFACT_PROFILE,
),
)
def test_stage_artifacts(self) -> None:
with TemporaryDirectory() as source_dir, TemporaryDirectory() as output_dir:
source_root = Path(source_dir)
archive = source_root / "librusty_v8.a"
binding = source_root / "src_binding.rs"
archive.write_bytes(b"archive")
binding.write_text("binding")
rusty_v8_bazel.stage_artifacts(
"aarch64-apple-darwin",
archive,
binding,
Path(output_dir),
sandbox=True,
)
self.assertEqual(
{
"librusty_v8_ptrcomp_sandbox_release_aarch64-apple-darwin.a.gz",
"src_binding_ptrcomp_sandbox_release_aarch64-apple-darwin.rs",
"rusty_v8_ptrcomp_sandbox_release_aarch64-apple-darwin.sha256",
},
{path.name for path in Path(output_dir).iterdir()},
)
def test_upstream_release_pair_paths(self) -> None:
self.assertEqual(
(
Path(
"/tmp/rusty_v8/target/x86_64-apple-darwin/release/gn_out/obj/"
"librusty_v8.a"
),
Path(
"/tmp/rusty_v8/target/x86_64-apple-darwin/release/gn_out/"
"src_binding.rs"
),
),
rusty_v8_bazel.upstream_release_pair_paths(
Path("/tmp/rusty_v8"),
"x86_64-apple-darwin",
),
)
self.assertEqual(
(
Path(
"/tmp/rusty_v8/target/x86_64-pc-windows-msvc/release/gn_out/"
"obj/rusty_v8.lib"
),
Path(
"/tmp/rusty_v8/target/x86_64-pc-windows-msvc/release/gn_out/"
"src_binding.rs"
),
),
rusty_v8_bazel.upstream_release_pair_paths(
Path("/tmp/rusty_v8"),
"x86_64-pc-windows-msvc",
),
)
def test_stage_upstream_release_pair(self) -> None:
with TemporaryDirectory() as source_dir, TemporaryDirectory() as output_dir:
source_root = Path(source_dir)
gn_out = (
source_root
/ "target"
/ "x86_64-pc-windows-msvc"
/ "release"
/ "gn_out"
)
(gn_out / "obj").mkdir(parents=True)
(gn_out / "obj" / "rusty_v8.lib").write_bytes(b"archive")
(gn_out / "src_binding.rs").write_text("binding")
rusty_v8_bazel.stage_upstream_release_pair(
source_root,
"x86_64-pc-windows-msvc",
Path(output_dir),
sandbox=True,
)
self.assertEqual(
{
"rusty_v8_ptrcomp_sandbox_release_x86_64-pc-windows-msvc.lib.gz",
"src_binding_ptrcomp_sandbox_release_x86_64-pc-windows-msvc.rs",
"rusty_v8_ptrcomp_sandbox_release_x86_64-pc-windows-msvc.sha256",
},
{path.name for path in Path(output_dir).iterdir()},
)
def test_ensure_bazel_output_files_rebuilds_existing_outputs(self) -> None:
with TemporaryDirectory() as output_dir:
output = Path(output_dir) / "libv8.a"
output.write_bytes(b"archive")
with (
patch.object(rusty_v8_bazel, "bazel_build") as bazel_build,
patch.object(
rusty_v8_bazel,
"bazel_output_files",
return_value=[output],
) as bazel_output_files,
):
self.assertEqual(
[output],
rusty_v8_bazel.ensure_bazel_output_files(
"macos_arm64",
["//third_party/v8:pair"],
"opt",
["rusty-v8-upstream-libcxx"],
),
)
bazel_build.assert_called_once_with(
"macos_arm64",
["//third_party/v8:pair"],
"opt",
["rusty-v8-upstream-libcxx"],
download_toplevel=True,
)
bazel_output_files.assert_called_once_with(
"macos_arm64",
["//third_party/v8:pair"],
"opt",
["rusty-v8-upstream-libcxx"],
)
def test_update_module_bazel_replaces_and_inserts_sha256(self) -> None:
module_bazel = textwrap.dedent(
"""\
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
sha256 = "0000000000000000000000000000000000000000000000000000000000000000",
urls = [
"https://example.test/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
],
)
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_binding",
downloaded_file_path = "src_binding_release_x86_64-unknown-linux-musl.rs",
urls = [
"https://example.test/src_binding_release_x86_64-unknown-linux-musl.rs",
],
)
http_file(
name = "rusty_v8_145_0_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
sha256 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
urls = [
"https://example.test/old.gz",
],
)
"""
)
checksums = {
"librusty_v8_release_x86_64-unknown-linux-gnu.a.gz": (
"1111111111111111111111111111111111111111111111111111111111111111"
),
"src_binding_release_x86_64-unknown-linux-musl.rs": (
"2222222222222222222222222222222222222222222222222222222222222222"
),
}
updated = rusty_v8_module_bazel.update_module_bazel_text(
module_bazel,
checksums,
"146.4.0",
)
self.assertEqual(
textwrap.dedent(
"""\
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
sha256 = "1111111111111111111111111111111111111111111111111111111111111111",
urls = [
"https://example.test/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
],
)
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_binding",
downloaded_file_path = "src_binding_release_x86_64-unknown-linux-musl.rs",
sha256 = "2222222222222222222222222222222222222222222222222222222222222222",
urls = [
"https://example.test/src_binding_release_x86_64-unknown-linux-musl.rs",
],
)
http_file(
name = "rusty_v8_145_0_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
sha256 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
urls = [
"https://example.test/old.gz",
],
)
"""
),
updated,
)
rusty_v8_module_bazel.check_module_bazel_text(updated, checksums, "146.4.0")
def test_check_module_bazel_rejects_manifest_drift(self) -> None:
module_bazel = textwrap.dedent(
"""\
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
sha256 = "1111111111111111111111111111111111111111111111111111111111111111",
urls = [
"https://example.test/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
],
)
"""
)
checksums = {
"librusty_v8_release_x86_64-unknown-linux-gnu.a.gz": (
"1111111111111111111111111111111111111111111111111111111111111111"
),
"orphan.gz": (
"2222222222222222222222222222222222222222222222222222222222222222"
),
}
with self.assertRaisesRegex(
rusty_v8_module_bazel.RustyV8ChecksumError,
"manifest has orphan.gz",
):
rusty_v8_module_bazel.check_module_bazel_text(
module_bazel,
checksums,
"146.4.0",
)
def test_rusty_v8_http_file_versions(self) -> None:
module_bazel = textwrap.dedent(
"""\
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "archive.gz",
urls = ["https://example.test/archive.gz"],
)
http_file(
name = "rusty_v8_147_4_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "new-archive.gz",
urls = ["https://example.test/new-archive.gz"],
)
http_file(
name = "unrelated_archive",
downloaded_file_path = "other.gz",
urls = ["https://example.test/other.gz"],
)
"""
)
self.assertEqual(
["146.4.0", "147.4.0"],
rusty_v8_module_bazel.rusty_v8_http_file_versions(module_bazel),
)
if __name__ == "__main__":
unittest.main()

View File

@@ -1,234 +0,0 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import re
import sys
import tomllib
from pathlib import Path
ROOT = Path(__file__).resolve().parents[2]
DEFAULT_CARGO_TOML = ROOT / "codex-rs" / "Cargo.toml"
DEFAULT_BAZELRC = ROOT / ".bazelrc"
BAZEL_CLIPPY_FLAG_PREFIX = "build:clippy --@rules_rust//rust/settings:clippy_flag="
BAZEL_SPECIAL_FLAGS = {"-Dwarnings"}
VALID_LEVELS = {"allow", "warn", "deny", "forbid"}
LONG_FLAG_RE = re.compile(
r"^--(?P<level>allow|warn|deny|forbid)=clippy::(?P<lint>[a-z0-9_]+)$"
)
SHORT_FLAG_RE = re.compile(r"^-(?P<level>[AWDF])clippy::(?P<lint>[a-z0-9_]+)$")
SHORT_LEVEL_NAMES = {
"A": "allow",
"W": "warn",
"D": "deny",
"F": "forbid",
}
def main() -> int:
parser = argparse.ArgumentParser(
description=(
"Verify that Bazel clippy flags in .bazelrc stay in sync with "
"codex-rs/Cargo.toml [workspace.lints.clippy]."
)
)
parser.add_argument(
"--cargo-toml",
type=Path,
default=DEFAULT_CARGO_TOML,
help="Path to the workspace Cargo.toml to inspect.",
)
parser.add_argument(
"--bazelrc",
type=Path,
default=DEFAULT_BAZELRC,
help="Path to the .bazelrc file to inspect.",
)
args = parser.parse_args()
cargo_toml = args.cargo_toml.resolve()
bazelrc = args.bazelrc.resolve()
cargo_lints = load_workspace_clippy_lints(cargo_toml)
bazel_lints = load_bazel_clippy_lints(bazelrc)
missing = sorted(cargo_lints.keys() - bazel_lints.keys())
extra = sorted(bazel_lints.keys() - cargo_lints.keys())
mismatched = sorted(
lint
for lint in cargo_lints.keys() & bazel_lints.keys()
if cargo_lints[lint] != bazel_lints[lint]
)
if missing or extra or mismatched:
print_sync_error(
cargo_toml=cargo_toml,
bazelrc=bazelrc,
cargo_lints=cargo_lints,
bazel_lints=bazel_lints,
missing=missing,
extra=extra,
mismatched=mismatched,
)
return 1
print(
"Bazel clippy flags in "
f"{display_path(bazelrc)} match "
f"{display_path(cargo_toml)} [workspace.lints.clippy]."
)
return 0
def load_workspace_clippy_lints(cargo_toml: Path) -> dict[str, str]:
workspace = tomllib.loads(cargo_toml.read_text())["workspace"]
clippy_lints = workspace["lints"]["clippy"]
parsed: dict[str, str] = {}
for lint, level in clippy_lints.items():
if not isinstance(level, str):
raise SystemExit(
f"expected string lint level for clippy::{lint} in {cargo_toml}, got {level!r}"
)
normalized = level.strip().lower()
if normalized not in VALID_LEVELS:
raise SystemExit(
f"unsupported lint level {level!r} for clippy::{lint} in {cargo_toml}"
)
parsed[lint] = normalized
return parsed
def load_bazel_clippy_lints(bazelrc: Path) -> dict[str, str]:
parsed: dict[str, str] = {}
line_numbers: dict[str, int] = {}
for lineno, line in enumerate(bazelrc.read_text().splitlines(), start=1):
if not line.startswith(BAZEL_CLIPPY_FLAG_PREFIX):
continue
flag = line.removeprefix(BAZEL_CLIPPY_FLAG_PREFIX).strip()
if flag in BAZEL_SPECIAL_FLAGS:
continue
parsed_flag = parse_bazel_lint_flag(flag)
if parsed_flag is None:
continue
lint, level = parsed_flag
if lint in parsed:
raise SystemExit(
f"duplicate Bazel clippy entry for clippy::{lint} at "
f"{bazelrc}:{line_numbers[lint]} and {bazelrc}:{lineno}"
)
parsed[lint] = level
line_numbers[lint] = lineno
return parsed
def parse_bazel_lint_flag(flag: str) -> tuple[str, str] | None:
long_match = LONG_FLAG_RE.match(flag)
if long_match:
return long_match["lint"], long_match["level"]
short_match = SHORT_FLAG_RE.match(flag)
if short_match:
return short_match["lint"], SHORT_LEVEL_NAMES[short_match["level"]]
return None
def print_sync_error(
*,
cargo_toml: Path,
bazelrc: Path,
cargo_lints: dict[str, str],
bazel_lints: dict[str, str],
missing: list[str],
extra: list[str],
mismatched: list[str],
) -> None:
cargo_toml_display = display_path(cargo_toml)
bazelrc_display = display_path(bazelrc)
example_manifest = find_workspace_lints_example_manifest()
print(
"ERROR: Bazel clippy flags are out of sync with Cargo workspace clippy lints.",
file=sys.stderr,
)
print(file=sys.stderr)
print(
f"Cargo defines the source of truth in {cargo_toml_display} "
"[workspace.lints.clippy].",
file=sys.stderr,
)
if example_manifest is not None:
print(
"Cargo applies those lint levels to member crates that opt into "
f"`[lints] workspace = true`, for example {example_manifest}.",
file=sys.stderr,
)
print(
"Bazel clippy does not ingest Cargo lint levels automatically, and "
"`clippy.toml` can configure lint behavior but cannot set allow/warn/deny/forbid.",
file=sys.stderr,
)
print(
f"Update {bazelrc_display} so its `build:clippy` "
"`clippy_flag` entries match Cargo.",
file=sys.stderr,
)
if missing:
print(file=sys.stderr)
print("Missing Bazel entries:", file=sys.stderr)
for lint in missing:
print(f" {render_bazelrc_line(lint, cargo_lints[lint])}", file=sys.stderr)
if mismatched:
print(file=sys.stderr)
print("Mismatched lint levels:", file=sys.stderr)
for lint in mismatched:
cargo_level = cargo_lints[lint]
bazel_level = bazel_lints[lint]
print(
f" clippy::{lint}: Cargo has {cargo_level}, Bazel has {bazel_level}",
file=sys.stderr,
)
print(
f" expected: {render_bazelrc_line(lint, cargo_level)}",
file=sys.stderr,
)
if extra:
print(file=sys.stderr)
print("Extra Bazel entries with no Cargo counterpart:", file=sys.stderr)
for lint in extra:
print(f" {render_bazelrc_line(lint, bazel_lints[lint])}", file=sys.stderr)
def render_bazelrc_line(lint: str, level: str) -> str:
return f"{BAZEL_CLIPPY_FLAG_PREFIX}--{level}=clippy::{lint}"
def display_path(path: Path) -> str:
try:
return str(path.relative_to(ROOT))
except ValueError:
return str(path)
def find_workspace_lints_example_manifest() -> str | None:
for cargo_toml in sorted((ROOT / "codex-rs").glob("**/Cargo.toml")):
if cargo_toml == DEFAULT_CARGO_TOML:
continue
data = tomllib.loads(cargo_toml.read_text())
if data.get("lints", {}).get("workspace") is True:
return str(cargo_toml.relative_to(ROOT))
return None
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,391 +0,0 @@
#!/usr/bin/env python3
"""Verify that codex-rs Cargo manifests follow workspace manifest policy.
Checks:
- Crates inherit `[workspace.package]` metadata.
- Crates opt into `[lints] workspace = true`.
- Crate names follow the codex-rs directory naming conventions.
- Workspace manifests do not introduce workspace crate feature toggles.
"""
from __future__ import annotations
import sys
import tomllib
from pathlib import Path
ROOT = Path(__file__).resolve().parents[2]
CARGO_RS_ROOT = ROOT / "codex-rs"
WORKSPACE_PACKAGE_FIELDS = ("version", "edition", "license")
TOP_LEVEL_NAME_EXCEPTIONS = {
"windows-sandbox-rs": "codex-windows-sandbox",
}
UTILITY_NAME_EXCEPTIONS = {
"path-utils": "codex-utils-path",
}
MANIFEST_FEATURE_EXCEPTIONS = {
"codex-rs/code-mode/Cargo.toml": {"sandbox": ("v8/v8_enable_sandbox",)},
"codex-rs/v8-poc/Cargo.toml": {"sandbox": ("v8/v8_enable_sandbox",)},
}
OPTIONAL_DEPENDENCY_EXCEPTIONS = set()
INTERNAL_DEPENDENCY_FEATURE_EXCEPTIONS = {}
def main() -> int:
internal_package_names = workspace_package_names()
used_manifest_feature_exceptions: set[str] = set()
used_optional_dependency_exceptions: set[tuple[str, str, str]] = set()
used_internal_dependency_feature_exceptions: set[tuple[str, str, str]] = set()
failures_by_path: dict[str, list[str]] = {}
for path in manifests_to_verify():
if errors := manifest_errors(
path,
internal_package_names,
used_manifest_feature_exceptions,
used_optional_dependency_exceptions,
used_internal_dependency_feature_exceptions,
):
failures_by_path[manifest_key(path)] = errors
add_unused_exception_errors(
failures_by_path,
used_manifest_feature_exceptions,
used_optional_dependency_exceptions,
used_internal_dependency_feature_exceptions,
)
if not failures_by_path:
return 0
print(
"Cargo manifests under codex-rs must inherit workspace package metadata, "
"opt into workspace lints, and avoid introducing new workspace crate "
"features."
)
print(
"Workspace crate features are disallowed because our Bazel build setup "
"does not honor them today, which can let issues hidden behind feature "
"gates go unnoticed, and because they add extra crate build "
"permutations we want to avoid."
)
print(
"Cargo only applies `codex-rs/Cargo.toml` `[workspace.lints.clippy]` "
"entries to a crate when that crate declares:"
)
print()
print("[lints]")
print("workspace = true")
print()
print(
"Without that opt-in, `cargo clippy` can miss violations that Bazel clippy "
"catches."
)
print()
print(
"Package-name checks apply to `codex-rs/<crate>/Cargo.toml` and "
"`codex-rs/utils/<crate>/Cargo.toml`."
)
print(
"Workspace crate features are forbidden; add a targeted exception here "
"only if there is a deliberate temporary migration in flight."
)
print()
for path in sorted(failures_by_path):
errors = failures_by_path[path]
print(f"{path}:")
for error in errors:
print(f" - {error}")
return 1
def manifest_errors(
path: Path,
internal_package_names: set[str],
used_manifest_feature_exceptions: set[str],
used_optional_dependency_exceptions: set[tuple[str, str, str]],
used_internal_dependency_feature_exceptions: set[tuple[str, str, str]],
) -> list[str]:
manifest = load_manifest(path)
package = manifest.get("package")
if not isinstance(package, dict) and path != CARGO_RS_ROOT / "Cargo.toml":
return []
errors = []
if isinstance(package, dict):
for field in WORKSPACE_PACKAGE_FIELDS:
if not is_workspace_reference(package.get(field)):
errors.append(f"set `{field}.workspace = true` in `[package]`")
lints = manifest.get("lints")
if not (isinstance(lints, dict) and lints.get("workspace") is True):
errors.append("add `[lints]` with `workspace = true`")
expected_name = expected_package_name(path)
if expected_name is not None:
actual_name = package.get("name")
if actual_name != expected_name:
errors.append(
f"set `[package].name` to `{expected_name}` (found `{actual_name}`)"
)
path_key = manifest_key(path)
features = manifest.get("features")
if features is not None:
normalized_features = normalize_feature_mapping(features)
expected_features = MANIFEST_FEATURE_EXCEPTIONS.get(path_key)
if expected_features is None:
errors.append(
"remove `[features]`; new workspace crate features are not allowed"
)
else:
used_manifest_feature_exceptions.add(path_key)
if normalized_features != expected_features:
errors.append(
"limit `[features]` to the existing exception list while "
"workspace crate features are being removed "
f"(expected {render_feature_mapping(expected_features)})"
)
for section_name, dependencies in dependency_sections(manifest):
for dependency_name, dependency in dependencies.items():
if not isinstance(dependency, dict):
continue
if dependency.get("optional") is True:
exception_key = (path_key, section_name, dependency_name)
if exception_key in OPTIONAL_DEPENDENCY_EXCEPTIONS:
used_optional_dependency_exceptions.add(exception_key)
else:
errors.append(
"remove `optional = true` from "
f"`{dependency_entry_label(section_name, dependency_name)}`; "
"new optional dependencies are not allowed because they "
"create crate features"
)
if not is_internal_dependency(path, dependency_name, dependency, internal_package_names):
continue
dependency_features = dependency.get("features")
if dependency_features is not None:
normalized_dependency_features = normalize_string_list(
dependency_features
)
exception_key = (path_key, section_name, dependency_name)
expected_dependency_features = (
INTERNAL_DEPENDENCY_FEATURE_EXCEPTIONS.get(exception_key)
)
if expected_dependency_features is None:
errors.append(
"remove `features = [...]` from workspace dependency "
f"`{dependency_entry_label(section_name, dependency_name)}`; "
"new workspace crate feature activations are not allowed"
)
else:
used_internal_dependency_feature_exceptions.add(exception_key)
if normalized_dependency_features != expected_dependency_features:
errors.append(
"limit workspace dependency features on "
f"`{dependency_entry_label(section_name, dependency_name)}` "
"to the existing exception list while workspace crate "
"features are being removed "
f"(expected {render_string_list(expected_dependency_features)})"
)
if dependency.get("default-features") is False:
errors.append(
"remove `default-features = false` from workspace dependency "
f"`{dependency_entry_label(section_name, dependency_name)}`; "
"new workspace crate feature toggles are not allowed"
)
return errors
def expected_package_name(path: Path) -> str | None:
parts = path.relative_to(CARGO_RS_ROOT).parts
if len(parts) == 2 and parts[1] == "Cargo.toml":
directory = parts[0]
return TOP_LEVEL_NAME_EXCEPTIONS.get(
directory,
directory if directory.startswith("codex-") else f"codex-{directory}",
)
if len(parts) == 3 and parts[0] == "utils" and parts[2] == "Cargo.toml":
directory = parts[1]
return UTILITY_NAME_EXCEPTIONS.get(directory, f"codex-utils-{directory}")
return None
def is_workspace_reference(value: object) -> bool:
return isinstance(value, dict) and value.get("workspace") is True
def manifest_key(path: Path) -> str:
return str(path.relative_to(ROOT))
def normalize_feature_mapping(value: object) -> dict[str, tuple[str, ...]] | None:
if not isinstance(value, dict):
return None
normalized = {}
for key, features in value.items():
if not isinstance(key, str):
return None
normalized_features = normalize_string_list(features)
if normalized_features is None:
return None
normalized[key] = normalized_features
return normalized
def normalize_string_list(value: object) -> tuple[str, ...] | None:
if not isinstance(value, list) or not all(isinstance(item, str) for item in value):
return None
return tuple(value)
def render_feature_mapping(features: dict[str, tuple[str, ...]]) -> str:
entries = [
f"{name} = {render_string_list(items)}" for name, items in features.items()
]
return ", ".join(entries)
def render_string_list(items: tuple[str, ...]) -> str:
return "[" + ", ".join(f'"{item}"' for item in items) + "]"
def dependency_sections(manifest: dict) -> list[tuple[str, dict]]:
sections = []
for section_name in ("dependencies", "dev-dependencies", "build-dependencies"):
dependencies = manifest.get(section_name)
if isinstance(dependencies, dict):
sections.append((section_name, dependencies))
workspace = manifest.get("workspace")
if isinstance(workspace, dict):
workspace_dependencies = workspace.get("dependencies")
if isinstance(workspace_dependencies, dict):
sections.append(("workspace.dependencies", workspace_dependencies))
target = manifest.get("target")
if not isinstance(target, dict):
return sections
for target_name, tables in target.items():
if not isinstance(tables, dict):
continue
for section_name in ("dependencies", "dev-dependencies", "build-dependencies"):
dependencies = tables.get(section_name)
if isinstance(dependencies, dict):
sections.append((f"target.{target_name}.{section_name}", dependencies))
return sections
def dependency_entry_label(section_name: str, dependency_name: str) -> str:
return f"[{section_name}].{dependency_name}"
def is_internal_dependency(
manifest_path: Path,
dependency_name: str,
dependency: dict,
internal_package_names: set[str],
) -> bool:
package_name = dependency.get("package", dependency_name)
if isinstance(package_name, str) and package_name in internal_package_names:
return True
dependency_path = dependency.get("path")
if not isinstance(dependency_path, str):
return False
resolved_dependency_path = (manifest_path.parent / dependency_path).resolve()
try:
resolved_dependency_path.relative_to(CARGO_RS_ROOT)
except ValueError:
return False
return True
def add_unused_exception_errors(
failures_by_path: dict[str, list[str]],
used_manifest_feature_exceptions: set[str],
used_optional_dependency_exceptions: set[tuple[str, str, str]],
used_internal_dependency_feature_exceptions: set[tuple[str, str, str]],
) -> None:
for path_key in sorted(
set(MANIFEST_FEATURE_EXCEPTIONS) - used_manifest_feature_exceptions
):
add_failure(
failures_by_path,
path_key,
"remove the stale `[features]` exception from "
"`MANIFEST_FEATURE_EXCEPTIONS`",
)
for path_key, section_name, dependency_name in sorted(
OPTIONAL_DEPENDENCY_EXCEPTIONS - used_optional_dependency_exceptions
):
add_failure(
failures_by_path,
path_key,
"remove the stale optional-dependency exception for "
f"`{dependency_entry_label(section_name, dependency_name)}` from "
"`OPTIONAL_DEPENDENCY_EXCEPTIONS`",
)
for path_key, section_name, dependency_name in sorted(
set(INTERNAL_DEPENDENCY_FEATURE_EXCEPTIONS)
- used_internal_dependency_feature_exceptions
):
add_failure(
failures_by_path,
path_key,
"remove the stale internal dependency feature exception for "
f"`{dependency_entry_label(section_name, dependency_name)}` from "
"`INTERNAL_DEPENDENCY_FEATURE_EXCEPTIONS`",
)
def add_failure(failures_by_path: dict[str, list[str]], path_key: str, error: str) -> None:
failures_by_path.setdefault(path_key, []).append(error)
def workspace_package_names() -> set[str]:
package_names = set()
for path in cargo_manifests():
manifest = load_manifest(path)
package = manifest.get("package")
if not isinstance(package, dict):
continue
package_name = package.get("name")
if isinstance(package_name, str):
package_names.add(package_name)
return package_names
def load_manifest(path: Path) -> dict:
return tomllib.loads(path.read_text())
def cargo_manifests() -> list[Path]:
return sorted(
path
for path in CARGO_RS_ROOT.rglob("Cargo.toml")
if path != CARGO_RS_ROOT / "Cargo.toml"
)
def manifests_to_verify() -> list[Path]:
return [CARGO_RS_ROOT / "Cargo.toml", *cargo_manifests()]
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,89 +0,0 @@
#!/usr/bin/env python3
"""Verify codex-tui does not depend on or import codex-core directly."""
from __future__ import annotations
import re
import sys
import tomllib
from pathlib import Path
ROOT = Path(__file__).resolve().parents[2]
TUI_ROOT = ROOT / "codex-rs" / "tui"
TUI_MANIFEST = TUI_ROOT / "Cargo.toml"
FORBIDDEN_PACKAGE = "codex-core"
FORBIDDEN_SOURCE_PATTERNS = (
re.compile(r"\bcodex_core::"),
re.compile(r"\buse\s+codex_core\b"),
re.compile(r"\bextern\s+crate\s+codex_core\b"),
)
def main() -> int:
failures = []
failures.extend(manifest_failures())
failures.extend(source_failures())
if not failures:
return 0
print("codex-tui must not depend on or import codex-core directly.")
print(
"Use the app-server protocol/client boundary instead; temporary embedded "
"startup gaps belong behind codex_app_server_client::legacy_core."
)
print()
for failure in failures:
print(f"- {failure}")
return 1
def manifest_failures() -> list[str]:
manifest = tomllib.loads(TUI_MANIFEST.read_text())
failures = []
for section_name, dependencies in dependency_sections(manifest):
if FORBIDDEN_PACKAGE in dependencies:
failures.append(
f"{relative_path(TUI_MANIFEST)} declares `{FORBIDDEN_PACKAGE}` "
f"in `[{section_name}]`"
)
return failures
def dependency_sections(manifest: dict) -> list[tuple[str, dict]]:
sections: list[tuple[str, dict]] = []
for section_name in ("dependencies", "dev-dependencies", "build-dependencies"):
dependencies = manifest.get(section_name)
if isinstance(dependencies, dict):
sections.append((section_name, dependencies))
for target_name, target in manifest.get("target", {}).items():
if not isinstance(target, dict):
continue
for section_name in ("dependencies", "dev-dependencies", "build-dependencies"):
dependencies = target.get(section_name)
if isinstance(dependencies, dict):
sections.append((f'target.{target_name}.{section_name}', dependencies))
return sections
def source_failures() -> list[str]:
failures = []
for path in sorted(TUI_ROOT.glob("**/*.rs")):
text = path.read_text()
for line_number, line in enumerate(text.splitlines(), start=1):
if any(pattern.search(line) for pattern in FORBIDDEN_SOURCE_PATTERNS):
failures.append(f"{relative_path(path)}:{line_number} imports `codex_core`")
return failures
def relative_path(path: Path) -> str:
return str(path.relative_to(ROOT))
if __name__ == "__main__":
sys.exit(main())

View File

@@ -8,9 +8,25 @@ FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
curl git python3 ca-certificates && \
curl git python3 ca-certificates xz-utils && \
rm -rf /var/lib/apt/lists/*
COPY codex-rs/node-version.txt /tmp/node-version.txt
RUN set -eux; \
node_arch="$(dpkg --print-architecture)"; \
case "${node_arch}" in \
amd64) node_dist_arch="x64" ;; \
arm64) node_dist_arch="arm64" ;; \
*) echo "unsupported architecture: ${node_arch}"; exit 1 ;; \
esac; \
node_version="$(tr -d '[:space:]' </tmp/node-version.txt)"; \
curl -fsSLO "https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-${node_dist_arch}.tar.xz"; \
tar -xJf "node-v${node_version}-linux-${node_dist_arch}.tar.xz" -C /usr/local --strip-components=1; \
rm "node-v${node_version}-linux-${node_dist_arch}.tar.xz" /tmp/node-version.txt; \
node --version; \
npm --version
# Install dotslash.
RUN curl -LSfs "https://github.com/facebook/dotslash/releases/download/v0.5.8/dotslash-ubuntu-22.04.$(uname -m).tar.gz" | tar fxz - -C /usr/local/bin

View File

@@ -1,33 +0,0 @@
# Workflow Strategy
The workflows in this directory are split so that pull requests get fast, review-friendly signal while `main` still gets the full cross-platform verification pass.
## Pull Requests
- `bazel.yml` is the main pre-merge verification path for Rust code.
It runs Bazel `test` and Bazel `clippy` on the supported Bazel targets,
including the generated Rust test binaries needed to lint inline `#[cfg(test)]`
code.
- `rust-ci.yml` keeps the Cargo-native PR checks intentionally small:
- `cargo fmt --check`
- `cargo shear`
- `argument-comment-lint` on Linux, macOS, and Windows
- `tools/argument-comment-lint` package tests when the lint or its workflow wiring changes
## Post-Merge On `main`
- `bazel.yml` also runs on pushes to `main`.
This re-verifies the merged Bazel path and helps keep the BuildBuddy caches warm.
- `rust-ci-full.yml` is the full Cargo-native verification workflow.
It keeps the heavier checks off the PR path while still validating them after merge:
- the full Cargo `clippy` matrix
- the full Cargo `nextest` matrix
- release-profile Cargo builds
- cross-platform `argument-comment-lint`
- Linux remote-env tests
## Rule Of Thumb
- If a build/test/clippy check can be expressed in Bazel, prefer putting the PR-time version in `bazel.yml`.
- Keep `rust-ci.yml` fast enough that it usually does not dominate PR latency.
- Reserve `rust-ci-full.yml` for heavyweight Cargo-native coverage that Bazel does not replace yet.

View File

@@ -1,4 +1,4 @@
name: Bazel
name: Bazel (experimental)
# Note this workflow was originally derived from:
# https://github.com/cerisier/toolchains_llvm_bootstrapped/blob/main/.github/workflows/ci.yaml
@@ -17,11 +17,6 @@ concurrency:
cancel-in-progress: ${{ github.ref_name != 'main' }}
jobs:
test:
# PRs use a fast Windows cross-compiled test leg for pre-merge signal.
# Post-merge pushes to main also run the native Windows test job below for
# broader Windows signal without putting PR latency back on the critical
# path. Cargo CI owns V8/code-mode test coverage for now.
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
@@ -44,376 +39,183 @@ jobs:
# - os: ubuntu-24.04-arm
# target: aarch64-unknown-linux-gnu
# Windows fast path: build the windows-gnullvm binaries with Linux
# RBE, then run the resulting Windows tests on the Windows runner.
# Cargo CI preserves V8/code-mode coverage while Bazel CI keeps broad
# non-code-mode signal.
- os: windows-latest
target: x86_64-pc-windows-gnullvm
# TODO: Enable Windows once we fix the toolchain issues there.
#- os: windows-latest
# target: x86_64-pc-windows-gnullvm
runs-on: ${{ matrix.os }}
# Configure a human readable name for each job
name: Bazel test on ${{ matrix.os }} for ${{ matrix.target }}
name: Local Bazel build on ${{ matrix.os }} for ${{ matrix.target }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
- uses: actions/checkout@v6
- name: Check rusty_v8 MODULE.bazel checksums
if: matrix.os == 'ubuntu-24.04' && matrix.target == 'x86_64-unknown-linux-gnu'
shell: bash
run: |
python3 .github/scripts/rusty_v8_bazel.py check-module-bazel
python3 -m unittest discover -s .github/scripts -p test_rusty_v8_bazel.py
- name: Prepare Bazel CI
id: prepare_bazel
uses: ./.github/actions/prepare-bazel-ci
- name: Set up Node.js for js_repl tests
uses: actions/setup-node@v6
with:
target: ${{ matrix.target }}
cache-scope: bazel-${{ github.job }}
install-test-prereqs: "true"
node-version-file: codex-rs/node-version.txt
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- name: Make DotSlash available in PATH (Unix)
if: runner.os != 'Windows'
run: cp "$(which dotslash)" /usr/local/bin
- name: Make DotSlash available in PATH (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: Copy-Item (Get-Command dotslash).Source -Destination "$env:LOCALAPPDATA\Microsoft\WindowsApps\dotslash.exe"
# Install Bazel via Bazelisk
- name: Set up Bazel
uses: bazelbuild/setup-bazelisk@v3
- name: Check MODULE.bazel.lock is up to date
if: matrix.os == 'ubuntu-24.04' && matrix.target == 'x86_64-unknown-linux-gnu'
shell: bash
run: ./scripts/check-module-bazel-lock.sh
# TODO(mbolin): Bring this back once we have caching working. Currently,
# we never seem to get a cache hit but we still end up paying the cost of
# uploading at the end of the build, which takes over a minute!
#
# Cache build and external artifacts so that the next ci build is incremental.
# Because github action caches cannot be updated after a build, we need to
# store the contents of each build in a unique cache key, then fall back to loading
# it on the next ci run. We use hashFiles(...) in the key and restore-keys- with
# the prefix to load the most recent cache for the branch on a cache miss. You
# should customize the contents of hashFiles to capture any bazel input sources,
# although this doesn't need to be perfect. If none of the input sources change
# then a cache hit will load an existing cache and bazel won't have to do any work.
# In the case of a cache miss, you want the fallback cache to contain most of the
# previously built artifacts to minimize build time. The more precise you are with
# hashFiles sources the less work bazel will have to do.
# - name: Mount bazel caches
# uses: actions/cache@v5
# with:
# path: |
# ~/.cache/bazel-repo-cache
# ~/.cache/bazel-repo-contents-cache
# key: bazel-cache-${{ matrix.os }}-${{ hashFiles('**/BUILD.bazel', '**/*.bzl', 'MODULE.bazel') }}
# restore-keys: |
# bazel-cache-${{ matrix.os }}
- name: Configure Bazel startup args (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
# Use a very short path to reduce argv/path length issues.
"BAZEL_STARTUP_ARGS=--output_user_root=C:\" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
- name: bazel test //...
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
bazel_targets=(
//...
# Keep standalone V8 library targets out of the ordinary Bazel CI
# path. V8 consumers under `//codex-rs/...` still participate
# transitively through `//...`.
-//third_party/v8:all
# V8-backed code-mode tests are covered by Cargo CI. Bazel CI
# cross-compiles in several legs, and those tests are not stable in
# that setup yet.
-//codex-rs/code-mode:code-mode-unit-tests
-//codex-rs/v8-poc:v8-poc-unit-tests
)
set -o pipefail
bazel_wrapper_args=(
--print-failed-action-summary
--print-failed-test-logs
)
bazel_test_args=(
test
--test_tag_filters=-argument-comment-lint
--test_verbose_timeout_warnings
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
)
if [[ "${RUNNER_OS}" == "Windows" ]]; then
bazel_wrapper_args+=(
--windows-cross-compile
--remote-download-toplevel
bazel_console_log="$(mktemp)"
print_failed_bazel_test_logs() {
local console_log="$1"
local testlogs_dir
testlogs_dir="$(bazel $BAZEL_STARTUP_ARGS info bazel-testlogs 2>/dev/null || echo bazel-testlogs)"
local failed_targets=()
while IFS= read -r target; do
failed_targets+=("$target")
done < <(
grep -E '^FAIL: //' "$console_log" \
| sed -E 's#^FAIL: (//[^ ]+).*#\1#' \
| sort -u
)
fi
./.github/scripts/run-bazel-ci.sh \
"${bazel_wrapper_args[@]}" \
-- \
"${bazel_test_args[@]}" \
-- \
"${bazel_targets[@]}"
- name: Upload Bazel execution logs
if: always() && !cancelled()
continue-on-error: true
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: bazel-execution-logs-test-${{ matrix.target }}
path: ${{ runner.temp }}/bazel-execution-logs
if-no-files-found: ignore
# Save the job-scoped Bazel repository cache after cache misses. Keep the
# upload non-fatal so cache service issues never fail the job itself.
- name: Save bazel repository cache
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
test-windows-native-main:
# Native Windows Bazel tests are slower and frequently approach the
# 30-minute PR budget. Run this only for post-merge commits to main and give
# it a larger timeout.
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
timeout-minutes: 40
runs-on: windows-latest
name: Bazel test on windows-latest for x86_64-pc-windows-gnullvm (native main)
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
- name: Prepare Bazel CI
id: prepare_bazel
uses: ./.github/actions/prepare-bazel-ci
with:
target: x86_64-pc-windows-gnullvm
cache-scope: bazel-${{ github.job }}
install-test-prereqs: "true"
- name: bazel test //...
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
bazel_targets=(
//...
# Keep standalone V8 library targets out of the ordinary Bazel CI
# path. V8 consumers under `//codex-rs/...` still participate
# transitively through `//...`.
-//third_party/v8:all
# Keep this aligned with the main Bazel job. The native Windows
# job preserves broad post-merge coverage, but code-mode/V8 tests
# are covered by Cargo CI rather than Bazel for now.
-//codex-rs/code-mode:code-mode-unit-tests
-//codex-rs/v8-poc:v8-poc-unit-tests
)
bazel_test_args=(
test
--test_tag_filters=-argument-comment-lint
--test_verbose_timeout_warnings
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
--build_metadata=TAG_windows_native_main=true
)
./.github/scripts/run-bazel-ci.sh \
--print-failed-action-summary \
--print-failed-test-logs \
-- \
"${bazel_test_args[@]}" \
-- \
"${bazel_targets[@]}"
- name: Upload Bazel execution logs
if: always() && !cancelled()
continue-on-error: true
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: bazel-execution-logs-test-windows-native-x86_64-pc-windows-gnullvm
path: ${{ runner.temp }}/bazel-execution-logs
if-no-files-found: ignore
# Save the job-scoped Bazel repository cache after cache misses. Keep the
# upload non-fatal so cache service issues never fail the job itself.
- name: Save bazel repository cache
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
clippy:
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
# Keep Linux lint coverage on x64 and add the arm64 macOS path that
# the Bazel test job already exercises. Add Windows gnullvm as well
# so PRs get Bazel-native lint signal on the same Windows toolchain
# that the Bazel test job uses.
- os: ubuntu-24.04
target: x86_64-unknown-linux-gnu
- os: macos-15-xlarge
target: aarch64-apple-darwin
- os: windows-latest
target: x86_64-pc-windows-gnullvm
runs-on: ${{ matrix.os }}
name: Bazel clippy on ${{ matrix.os }} for ${{ matrix.target }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
- name: Prepare Bazel CI
id: prepare_bazel
uses: ./.github/actions/prepare-bazel-ci
with:
target: ${{ matrix.target }}
cache-scope: bazel-${{ github.job }}
- name: bazel build --config=clippy lint targets
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
bazel_clippy_args=(
--config=clippy
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
--build_metadata=TAG_job=clippy
)
bazel_wrapper_args=()
bazel_target_list_args=()
if [[ "${RUNNER_OS}" == "Windows" ]]; then
# Keep this aligned with the fast Windows Bazel test job: use
# Linux RBE for clippy build actions while targeting Windows
# gnullvm. Fork/community PRs without the BuildBuddy secret fall
# back inside `run-bazel-ci.sh` to the previous local Windows MSVC
# host-platform shape.
bazel_wrapper_args+=(--windows-cross-compile)
bazel_target_list_args+=(--windows-cross-compile)
if [[ -z "${BUILDBUDDY_API_KEY:-}" ]]; then
# The fork fallback can see incompatible explicit Windows-cross
# internal test binaries in the generated target list. Preserve
# the old local-fallback behavior there.
bazel_clippy_args+=(--skip_incompatible_explicit_targets)
if [[ ${#failed_targets[@]} -eq 0 ]]; then
echo "No failed Bazel test targets were found in console output."
return
fi
fi
bazel_target_lines="$(./scripts/list-bazel-clippy-targets.sh "${bazel_target_list_args[@]}")"
bazel_targets=()
while IFS= read -r target; do
bazel_targets+=("${target}")
done <<< "${bazel_target_lines}"
for target in "${failed_targets[@]}"; do
local rel_path="${target#//}"
rel_path="${rel_path/:/\/}"
local test_log="${testlogs_dir}/${rel_path}/test.log"
./.github/scripts/run-bazel-ci.sh \
--print-failed-action-summary \
"${bazel_wrapper_args[@]}" \
-- \
build \
"${bazel_clippy_args[@]}" \
-- \
"${bazel_targets[@]}"
echo "::group::Bazel test log tail for ${target}"
if [[ -f "$test_log" ]]; then
tail -n 200 "$test_log"
else
echo "Missing test log: $test_log"
fi
echo "::endgroup::"
done
}
- name: Upload Bazel execution logs
if: always() && !cancelled()
continue-on-error: true
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: bazel-execution-logs-clippy-${{ matrix.target }}
path: ${{ runner.temp }}/bazel-execution-logs
if-no-files-found: ignore
# Save the job-scoped Bazel repository cache after cache misses. Keep the
# upload non-fatal so cache service issues never fail the job itself.
- name: Save bazel repository cache
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
verify-release-build:
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-24.04
target: x86_64-unknown-linux-gnu
- os: macos-15-xlarge
target: aarch64-apple-darwin
- os: windows-latest
target: x86_64-pc-windows-gnullvm
runs-on: ${{ matrix.os }}
name: Verify release build on ${{ matrix.os }} for ${{ matrix.target }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
- name: Prepare Bazel CI
id: prepare_bazel
uses: ./.github/actions/prepare-bazel-ci
with:
target: ${{ matrix.target }}
cache-scope: bazel-${{ github.job }}
- name: bazel build verify-release-build targets
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
# This job exists to compile Rust code behind
# `cfg(not(debug_assertions))` so PR CI catches failures that would
# otherwise show up only in a release build. We do not need the full
# optimizer and debug-info work that normally comes with a release
# build to get that signal, so keep Bazel in `fastbuild` and disable
# Rust debug assertions explicitly.
bazel_wrapper_args=()
if [[ "${RUNNER_OS}" == "Windows" ]]; then
# This is build-only signal, so use the same Linux-RBE
# cross-compile path as the fast Windows test and clippy jobs.
# Fork/community PRs without the BuildBuddy secret fall back
# inside `run-bazel-ci.sh` to the previous local Windows MSVC
# host-platform shape.
bazel_wrapper_args+=(--windows-cross-compile)
fi
bazel_build_args=(
--compilation_mode=fastbuild
--@rules_rust//rust/settings:extra_rustc_flag=-Cdebug-assertions=no
--@rules_rust//rust/settings:extra_exec_rustc_flag=-Cdebug-assertions=no
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
--build_metadata=TAG_job=verify-release-build
--build_metadata=TAG_rust_debug_assertions=off
bazel_args=(
test
//...
--test_verbose_timeout_warnings
--build_metadata=REPO_URL=https://github.com/openai/codex.git
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD)
--build_metadata=ROLE=CI
--build_metadata=VISIBILITY=PUBLIC
)
bazel_target_lines="$(bash ./scripts/list-bazel-release-targets.sh)"
bazel_targets=()
while IFS= read -r target; do
bazel_targets+=("${target}")
done <<< "${bazel_target_lines}"
if [[ "${RUNNER_OS:-}" != "Windows" ]]; then
# Bazel test sandboxes on macOS may resolve an older Homebrew `node`
# before the `actions/setup-node` runtime on PATH.
node_bin="$(which node)"
bazel_args+=("--test_env=CODEX_JS_REPL_NODE_PATH=${node_bin}")
fi
./.github/scripts/run-bazel-ci.sh \
"${bazel_wrapper_args[@]}" \
-- \
build \
"${bazel_build_args[@]}" \
-- \
"${bazel_targets[@]}"
if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then
echo "BuildBuddy API key is available; using remote Bazel configuration."
# Work around Bazel 9 remote repo contents cache / overlay materialization failures
# seen in CI (for example "is not a symlink" or permission errors while
# materializing external repos such as rules_perl). We still use BuildBuddy for
# remote execution/cache; this only disables the startup-level repo contents cache.
set +e
bazel $BAZEL_STARTUP_ARGS \
--noexperimental_remote_repo_contents_cache \
--bazelrc=.github/workflows/ci.bazelrc \
"${bazel_args[@]}" \
"--remote_header=x-buildbuddy-api-key=$BUILDBUDDY_API_KEY" \
2>&1 | tee "$bazel_console_log"
bazel_status=${PIPESTATUS[0]}
set -e
else
echo "BuildBuddy API key is not available; using local Bazel configuration."
# Keep fork/community PRs on Bazel but disable remote services that are
# configured in .bazelrc and require auth.
#
# Flag docs:
# - Command-line reference: https://bazel.build/reference/command-line-reference
# - Remote caching overview: https://bazel.build/remote/caching
# - Remote execution overview: https://bazel.build/remote/rbe
# - Build Event Protocol overview: https://bazel.build/remote/bep
#
# --noexperimental_remote_repo_contents_cache:
# disable remote repo contents cache enabled in .bazelrc startup options.
# https://bazel.build/reference/command-line-reference#startup_options-flag--experimental_remote_repo_contents_cache
# --remote_cache= and --remote_executor=:
# clear remote cache/execution endpoints configured in .bazelrc.
# https://bazel.build/reference/command-line-reference#common_options-flag--remote_cache
# https://bazel.build/reference/command-line-reference#common_options-flag--remote_executor
set +e
bazel $BAZEL_STARTUP_ARGS \
--noexperimental_remote_repo_contents_cache \
"${bazel_args[@]}" \
--remote_cache= \
--remote_executor= \
2>&1 | tee "$bazel_console_log"
bazel_status=${PIPESTATUS[0]}
set -e
fi
- name: Verify Bazel builds bwrap
if: runner.os == 'Linux'
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
./.github/scripts/run-bazel-ci.sh \
--remote-download-toplevel \
--print-failed-action-summary \
-- \
build \
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
--build_metadata=TAG_job=verify-bwrap \
-- \
//codex-rs/bwrap:bwrap
- name: Upload Bazel execution logs
if: always() && !cancelled()
continue-on-error: true
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: bazel-execution-logs-verify-release-build-${{ matrix.target }}
path: ${{ runner.temp }}/bazel-execution-logs
if-no-files-found: ignore
# Save the job-scoped Bazel repository cache after cache misses. Keep the
# upload non-fatal so cache service issues never fail the job itself.
- name: Save bazel repository cache
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
if [[ ${bazel_status:-0} -ne 0 ]]; then
print_failed_bazel_test_logs "$bazel_console_log"
exit "$bazel_status"
fi

View File

@@ -8,27 +8,22 @@ jobs:
name: Blob size policy
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/checkout@v6
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
fetch-depth: 0
persist-credentials: false
- name: Determine PR comparison range
id: range
shell: bash
run: |
set -euo pipefail
echo "base=${{ github.event.pull_request.base.sha }}" >> "$GITHUB_OUTPUT"
echo "head=${{ github.event.pull_request.head.sha }}" >> "$GITHUB_OUTPUT"
echo "base=$(git rev-parse HEAD^1)" >> "$GITHUB_OUTPUT"
echo "head=$(git rev-parse HEAD^2)" >> "$GITHUB_OUTPUT"
- name: Check changed blob sizes
env:
BASE_SHA: ${{ steps.range.outputs.base }}
HEAD_SHA: ${{ steps.range.outputs.head }}
run: |
python3 scripts/check_blob_size.py \
--base "$BASE_SHA" \
--head "$HEAD_SHA" \
--base "${{ steps.range.outputs.base }}" \
--head "${{ steps.range.outputs.head }}" \
--max-bytes 512000 \
--allowlist .github/blob-size-allowlist.txt

View File

@@ -14,16 +14,13 @@ jobs:
working-directory: ./codex-rs
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
uses: actions/checkout@v6
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
uses: dtolnay/rust-toolchain@stable
- name: Run cargo-deny
uses: EmbarkStudios/cargo-deny-action@82eb9f621fbc699dd0918f3ea06864c14cc84246 # v2
uses: EmbarkStudios/cargo-deny-action@v2
with:
rust-version: 1.93.0
rust-version: stable
manifest-path: ./codex-rs/Cargo.toml

27
.github/workflows/ci.bazelrc vendored Normal file
View File

@@ -0,0 +1,27 @@
common --remote_download_minimal
common --keep_going
common --verbose_failures
# Disable disk cache since we have remote one and aren't using persistent workers.
common --disk_cache=
# Rearrange caches on Windows so they're on the same volume as the checkout.
common:windows --repo_contents_cache=D:/a/.cache/bazel-repo-contents-cache
common:windows --repository_cache=D:/a/.cache/bazel-repo-cache
# We prefer to run the build actions entirely remotely so we can dial up the concurrency.
# We have platform-specific tests, so we want to execute the tests on all platforms using the strongest sandboxing available on each platform.
# On linux, we can do a full remote build/test, by targeting the right (x86/arm) runners, so we have coverage of both.
# Linux crossbuilds don't work until we untangle the libc constraint mess.
common:linux --config=remote
common:linux --strategy=remote
common:linux --platforms=//:rbe
# On mac, we can run all the build actions remotely but test actions locally.
common:macos --config=remote
common:macos --strategy=remote
common:macos --strategy=TestRunner=darwin-sandbox,local
# On windows we cannot cross-build the tests but run them locally due to what appears to be a Bazel bug
# (windows vs unix path confusion)

View File

@@ -12,27 +12,15 @@ jobs:
NODE_OPTIONS: --max-old-space-size=4096
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
- name: Verify codex-rs Cargo manifests inherit workspace settings
run: python3 .github/scripts/verify_cargo_workspace_manifests.py
- name: Verify codex-tui does not import codex-core directly
run: python3 .github/scripts/verify_tui_core_boundary.py
- name: Verify Bazel clippy flags match Cargo workspace lints
run: python3 .github/scripts/verify_bazel_clippy_lints.py
uses: actions/checkout@v6
- name: Setup pnpm
uses: pnpm/action-setup@a8198c4bff370c8506180b035930dea56dbd5288 # v5
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0
uses: actions/setup-node@v6
with:
node-version: 22
@@ -40,7 +28,7 @@ jobs:
run: pnpm install --frozen-lockfile
# stage_npm_packages.py requires DotSlash when staging releases.
- uses: facebook/install-dotslash@1e4e7b3e07eaca387acb98f1d4720e0bee8dbb6a # v2
- uses: facebook/install-dotslash@v2
- name: Stage npm package
id: stage_npm_package
@@ -48,25 +36,18 @@ jobs:
GH_TOKEN: ${{ github.token }}
run: |
set -euo pipefail
# Use a recent successful rust-release run that published the full
# cross-platform native payload required by the npm package layout.
# Passing the workflow URL directly avoids relying on old rust-v*
# branches remaining discoverable via `gh run list --branch ...`.
CODEX_VERSION=0.125.0
WORKFLOW_URL="https://github.com/openai/codex/actions/runs/24901475298"
# Use a rust-release version that includes all native binaries.
CODEX_VERSION=0.74.0
OUTPUT_DIR="${RUNNER_TEMP}"
# This reused workflow predates the standalone bwrap artifact.
python3 ./scripts/stage_npm_packages.py \
--release-version "$CODEX_VERSION" \
--workflow-url "$WORKFLOW_URL" \
--package codex \
--allow-missing-native-component bwrap \
--output-dir "$OUTPUT_DIR"
PACK_OUTPUT="${OUTPUT_DIR}/codex-npm-${CODEX_VERSION}.tgz"
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"
- name: Upload staged npm package artifact
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
uses: actions/upload-artifact@v7
with:
name: codex-npm-staging
path: ${{ steps.stage_npm_package.outputs.pack_output }}
@@ -76,5 +57,10 @@ jobs:
- name: Check root README ToC
run: python3 scripts/readme_toc.py README.md
- name: Ensure codex-cli/README.md contains only ASCII and certain Unicode code points
run: ./scripts/asciicheck.py codex-cli/README.md
- name: Check codex-cli/README ToC
run: python3 scripts/readme_toc.py codex-cli/README.md
- name: Prettier (run `pnpm run format:fix` to fix)
run: pnpm run format

View File

@@ -18,7 +18,7 @@ jobs:
if: ${{ github.repository_owner == 'openai' }}
runs-on: ubuntu-latest
steps:
- uses: contributor-assistant/github-action@ca4a40a7d1004f18d9960b404b97e5f30a505a08 # v2.6.1
- uses: contributor-assistant/github-action@v2.6.1
# Run on close only if the PR was merged. This will lock the PR to preserve
# the CLA agreement. We don't want to lock PRs that have been closed without
# merging because the contributor may want to respond with additional comments.

View File

@@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Close inactive PRs from contributors
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
uses: actions/github-script@v8
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |

View File

@@ -18,12 +18,9 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
uses: actions/checkout@v6
- name: Annotate locations with typos
uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1.1.0
uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1
- name: Codespell
uses: codespell-project/actions-codespell@8f01853be192eb0f849a5c7d721450e7a467c579 # v2.2
with:

View File

@@ -19,9 +19,7 @@ jobs:
reason: ${{ steps.normalize-all.outputs.reason }}
has_matches: ${{ steps.normalize-all.outputs.has_matches }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: actions/checkout@v6
- name: Prepare Codex inputs
env:
@@ -63,7 +61,7 @@ jobs:
# .github/prompts/issue-deduplicator.txt file is obsolete and removed.
- id: codex-all
name: Find duplicates (pass 1, all issues)
uses: openai/codex-action@5c3f4ccdb2b8790f73d6b21751ac00e602aa0c02 # v1.7
uses: openai/codex-action@main
with:
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
allow-users: "*"
@@ -157,9 +155,7 @@ jobs:
reason: ${{ steps.normalize-open.outputs.reason }}
has_matches: ${{ steps.normalize-open.outputs.has_matches }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: actions/checkout@v6
- name: Prepare Codex inputs
env:
@@ -199,7 +195,7 @@ jobs:
- id: codex-open
name: Find duplicates (pass 2, open issues)
uses: openai/codex-action@5c3f4ccdb2b8790f73d6b21751ac00e602aa0c02 # v1.7
uses: openai/codex-action@main
with:
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
allow-users: "*"
@@ -346,7 +342,7 @@ jobs:
issues: write
steps:
- name: Comment on issue
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
uses: actions/github-script@v8
env:
CODEX_OUTPUT: ${{ needs.select-final.outputs.codex_output }}
with:
@@ -400,7 +396,6 @@ jobs:
env:
GH_TOKEN: ${{ github.token }}
GH_REPO: ${{ github.repository }}
ISSUE_NUMBER: ${{ github.event.issue.number }}
run: |
gh issue edit "$ISSUE_NUMBER" --remove-label codex-deduplicate || true
gh issue edit "${{ github.event.issue.number }}" --remove-label codex-deduplicate || true
echo "Attempted to remove label: codex-deduplicate"

View File

@@ -17,12 +17,10 @@ jobs:
outputs:
codex_output: ${{ steps.codex.outputs.final-message }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: actions/checkout@v6
- id: codex
uses: openai/codex-action@5c3f4ccdb2b8790f73d6b21751ac00e602aa0c02 # v1.7
uses: openai/codex-action@main
with:
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
allow-users: "*"
@@ -46,7 +44,6 @@ jobs:
6. iOS — Issues with the Codex iOS app.
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
- For agent-area issues, prefer the most specific applicable label. Use "agent" only as a fallback for agent-related issues that do not fit a more specific agent-area label. Prefer "app-server" over "session" or "config" when the issue is about app-server protocol, API, RPC, schema, launch, or bridge behavior. Use "memory" for agentic memory storage/retrieval and "performance" for high process memory utilization or memory leaks.
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
2. mcp — Topics involving Model Context Protocol servers/clients.
3. mcp-server — Problems related to the codex mcp-server command, where codex runs as an MCP server.
@@ -55,30 +52,13 @@ jobs:
6. code-review — Issues related to the code review feature or functionality.
7. safety-check - Issues related to cyber risk detection or trusted access verification.
8. auth - Problems related to authentication, login, or access tokens.
9. exec - Problems related to the "codex exec" command or functionality.
10. hooks - Problems related to event hooks
11. context - Problems related to compaction, context windows, or available context reporting.
12. skills - Problems related to skills or plugins
13. custom-model - Problems that involve using custom model providers, local models, or OSS models.
14. rate-limits - Problems related to token limits, rate limits, or token usage reporting.
15. sandbox - Issues related to local sandbox environments or tool call approvals to override sandbox restrictions.
16. tool-calls - Problems related to specific tool call invocations including unexpected errors, failures, or hangs.
17. TUI - Problems with the terminal user interface (TUI) including keyboard shortcuts, copy & pasting, menus, or screen update issues.
18. app-server - Issues involving the app-server protocol or interfaces, including SDK/API payloads, thread/* and turn/* RPCs, app-server launch behavior, external app/controller bridges, and app-server protocol/schema behavior.
19. connectivity - Network connectivity or endpoint issues, including reconnecting messages, stream dropped/disconnected errors, websocket/SSE/transport failures, timeout/network/VPN/proxy/API endpoint failures, and related retry behavior.
20. subagent - Issues involving subagents, sub-agents, or multi-agent behavior, including spawn_agent, wait_agent, close_agent, worker/explorer roles, delegation, agent teams, lifecycle, model/config inheritance, quotas, and orchestration.
21. session - Issues involving session or thread management, including resume, fork, archive, rename/title, thread history, rollout persistence, compaction, checkpoints, retention, and cross-session state.
22. config - Issues involving config.toml, config keys, config key merging, config updates, profiles, hooks config, project config, agent role TOMLs, instruction/personality config, and config schema behavior.
23. plan - Issues involving plan mode, planning workflows, or plan-specific tools/behavior.
24. computer-use - Issues involving agentic computer use or SkyComputerUseService.
25. browser - Issues involving agentic browser use, IAB, or the built-in browser within the Codex app.
26. memory - Issues involving agentic memory storage and retrieval.
27. imagen - Issues involving image generation.
28. remote - Issues involving remote access, remote control, or SSH.
29. performance - Issues involving slow, laggy performance, high memory utilization, or memory leaks.
30. automations - Issues involving scheduled automation tasks or heartbeats.
31. pets - Issues involving pets avatars and animations.
32. agent - Fallback only for core agent loop or agent-related issues that do not fit app-server, connectivity, subagent, session, config, plan, computer-use, browser, memory, imagen, remote, performance, automations, or pets.
9. codex-exec - Problems related to the "codex exec" command or functionality.
10. context-management - Problems related to compaction, context windows, or available context reporting.
11. custom-model - Problems that involve using custom model providers, local models, or OSS models.
12. rate-limits - Problems related to token limits, rate limits, or token usage reporting.
13. sandbox - Issues related to local sandbox environments or tool call approvals to override sandbox restrictions.
14. tool-calls - Problems related to specific tool call invocations including unexpected errors, failures, or hangs.
15. TUI - Problems with the terminal user interface (TUI) including keyboard shortcuts, copy & pasting, menus, or screen update issues.
Issue number: ${{ github.event.issue.number }}

View File

@@ -1,788 +0,0 @@
name: rust-ci-full
on:
push:
branches:
- main
- "**full-ci**"
workflow_dispatch:
# CI builds in debug (dev) for faster signal.
env:
# Cargo's libgit2 transport has been flaky on macOS when fetching git
# dependencies with nested submodules. Use the system git CLI, which has
# better network/proxy behavior and matches Cargo's own suggested fallback.
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
jobs:
# --- CI that doesn't need specific targets ---------------------------------
general:
name: Format / etc
runs-on: ubuntu-24.04
defaults:
run:
working-directory: codex-rs
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
with:
components: rustfmt
- name: cargo fmt
run: cargo fmt -- --config imports_granularity=Item --check
cargo_shear:
name: cargo shear
runs-on: ubuntu-24.04
defaults:
run:
working-directory: codex-rs
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with:
tool: cargo-shear@1.11.2
- name: cargo shear
run: cargo shear --deny-warnings
argument_comment_lint_package:
name: Argument comment lint package
runs-on: ubuntu-24.04
env:
CARGO_DYLINT_VERSION: 5.0.0
DYLINT_LINK_VERSION: 5.0.0
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
with:
toolchain: nightly-2025-09-18
components: llvm-tools-preview, rustc-dev, rust-src
- name: Cache cargo-dylint tooling
id: cargo_dylint_cache
uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: |
~/.cargo/bin/cargo-dylint
~/.cargo/bin/dylint-link
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
key: argument-comment-lint-${{ runner.os }}-${{ env.CARGO_DYLINT_VERSION }}-${{ env.DYLINT_LINK_VERSION }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }}
- name: Install cargo-dylint tooling
if: ${{ steps.cargo_dylint_cache.outputs.cache-hit != 'true' }}
shell: bash
run: |
cargo install --locked cargo-dylint --version "$CARGO_DYLINT_VERSION"
cargo install --locked dylint-link --version "$DYLINT_LINK_VERSION"
- name: Check Python wrapper syntax
run: python3 -m py_compile tools/argument-comment-lint/wrapper_common.py tools/argument-comment-lint/run.py tools/argument-comment-lint/run-prebuilt-linter.py tools/argument-comment-lint/test_wrapper_common.py
- name: Test Python wrapper helpers
run: python3 -m unittest discover -s tools/argument-comment-lint -p 'test_*.py'
- name: Test argument comment lint package
working-directory: tools/argument-comment-lint
run: cargo test
env:
RUST_MIN_STACK: "8388608" # 8 MiB
argument_comment_lint_prebuilt:
name: Argument comment lint - ${{ matrix.name }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- name: Linux
runner: ubuntu-24.04
- name: macOS
runner: macos-15-xlarge
- name: Windows
runner: windows-x64
runs_on:
group: codex-runners
labels: codex-windows-x64
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: ./.github/actions/setup-bazel-ci
with:
target: ${{ runner.os }}
install-test-prereqs: true
- name: Install Linux sandbox build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
sudo DEBIAN_FRONTEND=noninteractive apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ runner.os != 'Windows' }}
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
bazel_targets="$(./tools/argument-comment-lint/list-bazel-targets.sh)"
./.github/scripts/run-bazel-ci.sh \
-- \
build \
--config=argument-comment-lint \
--keep_going \
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
-- \
${bazel_targets}
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ runner.os == 'Windows' }}
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
./.github/scripts/run-argument-comment-lint-bazel.sh \
--config=argument-comment-lint \
--platforms=//:local_windows \
--keep_going \
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
# --- CI to validate on different os/targets --------------------------------
lint_build:
name: Lint/Build — ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
timeout-minutes: 30
defaults:
run:
working-directory: codex-rs
env:
# Speed up repeated builds across CI runs by caching compiled objects, except on
# arm64 macOS runners cross-targeting x86_64 where ring/cc-rs can produce
# mixed-architecture archives under sccache.
USE_SCCACHE: ${{ (startsWith(matrix.runner, 'windows') || (matrix.runner == 'macos-15-xlarge' && matrix.target == 'x86_64-apple-darwin')) && 'false' || 'true' }}
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
# In rust-ci, representative release-profile checks use thin LTO for faster feedback.
CARGO_PROFILE_RELEASE_LTO: ${{ matrix.profile == 'release' && 'thin' || 'fat' }}
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
profile: dev
- runner: macos-15-xlarge
target: x86_64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-arm64
# Also run representative release builds on Mac and Linux because
# there could be release-only build errors we want to catch.
# Hopefully this also pre-populates the build cache to speed up
# releases.
- runner: macos-15-xlarge
target: aarch64-apple-darwin
profile: release
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
profile: release
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
profile: release
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
profile: release
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
profile: release
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Install Linux build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
packages=(pkg-config libcap-dev)
if [[ "${{ matrix.target }}" == 'x86_64-unknown-linux-musl' || "${{ matrix.target }}" == 'aarch64-unknown-linux-musl' ]]; then
packages+=(libubsan1)
fi
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends "${packages[@]}"
fi
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
with:
targets: ${{ matrix.target }}
components: clippy
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Use hermetic Cargo home (musl)
shell: bash
run: |
set -euo pipefail
cargo_home="${GITHUB_WORKSPACE}/.cargo-home"
mkdir -p "${cargo_home}/bin"
echo "CARGO_HOME=${cargo_home}" >> "$GITHUB_ENV"
echo "${cargo_home}/bin" >> "$GITHUB_PATH"
: > "${cargo_home}/config.toml"
- name: Compute lockfile hash
id: lockhash
working-directory: codex-rs
shell: bash
run: |
set -euo pipefail
echo "hash=$(sha256sum Cargo.lock | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
echo "toolchain_hash=$(sha256sum rust-toolchain.toml | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
# Explicit cache restore: split cargo home vs target, so we can
# avoid caching the large target dir on the gnu-dev job.
- name: Restore cargo home cache
id: cache_cargo_home_restore
uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
# Install and restore sccache cache
- name: Install sccache
if: ${{ env.USE_SCCACHE == 'true' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with:
tool: sccache
version: 0.7.5
- name: Configure sccache backend
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: |
set -euo pipefail
if [[ -n "${ACTIONS_CACHE_URL:-}" && -n "${ACTIONS_RUNTIME_TOKEN:-}" ]]; then
echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV"
echo "Using sccache GitHub backend"
else
echo "SCCACHE_GHA_ENABLED=false" >> "$GITHUB_ENV"
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> "$GITHUB_ENV"
echo "Using sccache local disk + actions/cache fallback"
fi
- name: Enable sccache wrapper
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV"
- name: Restore sccache cache (fallback)
if: ${{ env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true' }}
id: cache_sccache_restore
uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
restore-keys: |
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Disable sccache wrapper (musl)
shell: bash
run: |
set -euo pipefail
echo "RUSTC_WRAPPER=" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Prepare APT cache directories (musl)
shell: bash
run: |
set -euo pipefail
sudo mkdir -p /var/cache/apt/archives /var/lib/apt/lists
sudo chown -R "$USER:$USER" /var/cache/apt /var/lib/apt/lists
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Restore APT cache (musl)
id: cache_apt_restore
uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: |
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@d1434d08867e3ee9daa34448df10607b98908d29 # v2.2.1
with:
version: 0.14.0
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
DEBIAN_FRONTEND: noninteractive
TARGET: ${{ matrix.target }}
APT_UPDATE_ARGS: -o Acquire::Retries=3
APT_INSTALL_ARGS: --no-install-recommends
shell: bash
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
name: Configure musl rusty_v8 artifact overrides and verify checksums
uses: ./.github/actions/setup-rusty-v8-musl
with:
target: ${{ matrix.target }}
- name: Install cargo-chef
if: ${{ matrix.profile == 'release' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with:
tool: cargo-chef
version: 0.1.71
- name: Pre-warm dependency cache (cargo-chef)
if: ${{ matrix.profile == 'release' }}
shell: bash
run: |
set -euo pipefail
RECIPE="${RUNNER_TEMP}/chef-recipe.json"
cargo chef prepare --recipe-path "$RECIPE"
cargo chef cook --recipe-path "$RECIPE" --target ${{ matrix.target }} --release
- name: cargo clippy
run: cargo clippy --target ${{ matrix.target }} --tests --profile ${{ matrix.profile }} --timings -- -D warnings
- name: Upload Cargo timings (clippy)
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: cargo-timings-rust-ci-clippy-${{ matrix.target }}-${{ matrix.profile }}
path: codex-rs/target/**/cargo-timings/cargo-timing.html
if-no-files-found: warn
# Save caches explicitly; make non-fatal so cache packaging
# never fails the overall job. Only save when key wasn't hit.
- name: Save cargo home cache
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
- name: Save sccache cache (fallback)
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
- name: sccache stats
if: always() && env.USE_SCCACHE == 'true'
continue-on-error: true
run: sccache --show-stats || true
- name: sccache summary
if: always() && env.USE_SCCACHE == 'true'
shell: bash
run: |
{
echo "### sccache stats — ${{ matrix.target }} (${{ matrix.profile }})";
echo;
echo '```';
sccache --show-stats || true;
echo '```';
} >> "$GITHUB_STEP_SUMMARY"
- name: Save APT cache (musl)
if: always() && !cancelled() && (matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl') && steps.cache_apt_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: |
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
tests:
name: Tests — ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.remote_env == 'true' && ' (remote)' || '' }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
# Perhaps we can bring this back down to 30m once we finish the cutover
# from tui_app_server/ to tui/. Incidentally, windows-arm64 was the main
# offender for exceeding the timeout.
timeout-minutes: 45
defaults:
run:
working-directory: codex-rs
env:
# Speed up repeated builds across CI runs by caching compiled objects, except on
# arm64 macOS runners cross-targeting x86_64 where ring/cc-rs can produce
# mixed-architecture archives under sccache.
USE_SCCACHE: ${{ (startsWith(matrix.runner, 'windows') || (matrix.runner == 'macos-15-xlarge' && matrix.target == 'x86_64-apple-darwin')) && 'false' || 'true' }}
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
remote_env: "true"
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Install Linux build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev bubblewrap
fi
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash
uses: facebook/install-dotslash@1e4e7b3e07eaca387acb98f1d4720e0bee8dbb6a # v2
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
with:
targets: ${{ matrix.target }}
- name: Compute lockfile hash
id: lockhash
working-directory: codex-rs
shell: bash
run: |
set -euo pipefail
echo "hash=$(sha256sum Cargo.lock | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
echo "toolchain_hash=$(sha256sum rust-toolchain.toml | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
- name: Restore cargo home cache
id: cache_cargo_home_restore
uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- name: Install sccache
if: ${{ env.USE_SCCACHE == 'true' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with:
tool: sccache
version: 0.7.5
- name: Configure sccache backend
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: |
set -euo pipefail
if [[ -n "${ACTIONS_CACHE_URL:-}" && -n "${ACTIONS_RUNTIME_TOKEN:-}" ]]; then
echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV"
echo "Using sccache GitHub backend"
else
echo "SCCACHE_GHA_ENABLED=false" >> "$GITHUB_ENV"
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> "$GITHUB_ENV"
echo "Using sccache local disk + actions/cache fallback"
fi
- name: Enable sccache wrapper
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV"
- name: Restore sccache cache (fallback)
if: ${{ env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true' }}
id: cache_sccache_restore
uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
restore-keys: |
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with:
tool: nextest
version: 0.9.103
- name: Enable unprivileged user namespaces (Linux)
if: runner.os == 'Linux'
run: |
# Required for bubblewrap to work on Linux CI runners.
sudo sysctl -w kernel.unprivileged_userns_clone=1
# Ubuntu 24.04+ can additionally gate unprivileged user namespaces
# behind AppArmor.
if sudo sysctl -a 2>/dev/null | grep -q '^kernel.apparmor_restrict_unprivileged_userns'; then
sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0
fi
- name: Set up remote test env (Docker)
if: ${{ runner.os == 'Linux' && matrix.remote_env == 'true' }}
shell: bash
run: |
set -euo pipefail
export CODEX_TEST_REMOTE_ENV_CONTAINER_NAME=codex-remote-test-env
source "${GITHUB_WORKSPACE}/scripts/test-remote-env.sh"
echo "CODEX_TEST_REMOTE_ENV=${CODEX_TEST_REMOTE_ENV}" >> "$GITHUB_ENV"
echo "CODEX_TEST_REMOTE_EXEC_SERVER_URL=${CODEX_TEST_REMOTE_EXEC_SERVER_URL}" >> "$GITHUB_ENV"
- name: tests
id: test
run: cargo nextest run --no-fail-fast --target ${{ matrix.target }} --cargo-profile ci-test --timings
env:
RUST_BACKTRACE: 1
RUST_MIN_STACK: "8388608" # 8 MiB
NEXTEST_STATUS_LEVEL: leak
- name: Upload Cargo timings (nextest)
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: cargo-timings-rust-ci-nextest-${{ matrix.target }}-${{ matrix.profile }}
path: codex-rs/target/**/cargo-timings/cargo-timing.html
if-no-files-found: warn
- name: Save cargo home cache
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
- name: Save sccache cache (fallback)
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
- name: sccache stats
if: always() && env.USE_SCCACHE == 'true'
continue-on-error: true
run: sccache --show-stats || true
- name: sccache summary
if: always() && env.USE_SCCACHE == 'true'
shell: bash
run: |
{
echo "### sccache stats — ${{ matrix.target }} (tests)";
echo;
echo '```';
sccache --show-stats || true;
echo '```';
} >> "$GITHUB_STEP_SUMMARY"
- name: Tear down remote test env
if: ${{ always() && runner.os == 'Linux' && matrix.remote_env == 'true' }}
shell: bash
run: |
set +e
if [[ "${STEPS_TEST_OUTCOME}" != "success" ]]; then
docker logs codex-remote-test-env || true
fi
docker rm -f codex-remote-test-env >/dev/null 2>&1 || true
env:
STEPS_TEST_OUTCOME: ${{ steps.test.outcome }}
- name: verify tests passed
if: steps.test.outcome == 'failure'
run: |
echo "Tests failed. See logs for details."
exit 1
# --- Gatherer job for the full post-merge workflow --------------------------
results:
name: Full CI results
needs:
[
general,
cargo_shear,
argument_comment_lint_package,
argument_comment_lint_prebuilt,
lint_build,
tests,
]
if: always()
runs-on: ubuntu-24.04
steps:
- name: Summarize
shell: bash
run: |
echo "argpkg : ${{ needs.argument_comment_lint_package.result }}"
echo "arglint: ${{ needs.argument_comment_lint_prebuilt.result }}"
echo "general: ${{ needs.general.result }}"
echo "shear : ${{ needs.cargo_shear.result }}"
echo "lint : ${{ needs.lint_build.result }}"
echo "tests : ${{ needs.tests.result }}"
[[ '${{ needs.argument_comment_lint_package.result }}' == 'success' ]] || { echo 'argument_comment_lint_package failed'; exit 1; }
[[ '${{ needs.argument_comment_lint_prebuilt.result }}' == 'success' ]] || { echo 'argument_comment_lint_prebuilt failed'; exit 1; }
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
[[ '${{ needs.cargo_shear.result }}' == 'success' ]] || { echo 'cargo_shear failed'; exit 1; }
[[ '${{ needs.lint_build.result }}' == 'success' ]] || { echo 'lint_build failed'; exit 1; }
[[ '${{ needs.tests.result }}' == 'success' ]] || { echo 'tests failed'; exit 1; }
- name: sccache summary note
if: always()
run: |
echo "Per-job sccache stats are attached to each matrix job's Step Summary."

View File

@@ -1,24 +1,25 @@
name: rust-ci
on:
pull_request: {}
push:
branches:
- main
workflow_dispatch:
# CI builds in debug (dev) for faster signal.
jobs:
# --- Detect what changed so the fast PR workflow only runs relevant jobs ----
# --- Detect what changed to detect which tests to run (always runs) -------------------------------------
changed:
name: Detect changed areas
runs-on: ubuntu-24.04
outputs:
argument_comment_lint: ${{ steps.detect.outputs.argument_comment_lint }}
argument_comment_lint_package: ${{ steps.detect.outputs.argument_comment_lint_package }}
codex: ${{ steps.detect.outputs.codex }}
workflows: ${{ steps.detect.outputs.workflows }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/checkout@v6
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
fetch-depth: 0
persist-credentials: false
- name: Detect changed paths (no external action)
id: detect
shell: bash
@@ -30,44 +31,35 @@ jobs:
HEAD_SHA='${{ github.event.pull_request.head.sha }}'
echo "Base SHA: $BASE_SHA"
echo "Head SHA: $HEAD_SHA"
# List files changed between base and PR head
mapfile -t files < <(git diff --name-only --no-renames "$BASE_SHA" "$HEAD_SHA")
else
# On manual runs, default to the full fast-PR bundle.
files=("codex-rs/force" "tools/argument-comment-lint/force" ".github/force")
# On push / manual runs, default to running everything
files=("codex-rs/force" ".github/force")
fi
codex=false
argument_comment_lint=false
argument_comment_lint_package=false
workflows=false
for f in "${files[@]}"; do
[[ $f == codex-rs/* ]] && codex=true
[[ $f == codex-rs/* || $f == tools/argument-comment-lint/* || $f == justfile ]] && argument_comment_lint=true
[[ $f == defs.bzl || $f == workspace_root_test_launcher.sh.tpl || $f == workspace_root_test_launcher.bat.tpl ]] && argument_comment_lint=true
[[ $f == tools/argument-comment-lint/* || $f == .github/workflows/rust-ci.yml || $f == .github/workflows/rust-ci-full.yml ]] && argument_comment_lint_package=true
[[ $f == .github/* ]] && workflows=true
done
echo "argument_comment_lint=$argument_comment_lint" >> "$GITHUB_OUTPUT"
echo "argument_comment_lint_package=$argument_comment_lint_package" >> "$GITHUB_OUTPUT"
echo "codex=$codex" >> "$GITHUB_OUTPUT"
echo "workflows=$workflows" >> "$GITHUB_OUTPUT"
# --- Fast Cargo-native PR checks -------------------------------------------
# --- CI that doesn't need specific targets ---------------------------------
general:
name: Format / etc
runs-on: ubuntu-24.04
needs: changed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' }}
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
defaults:
run:
working-directory: codex-rs
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.93.0
with:
components: rustfmt
- name: cargo fmt
@@ -77,164 +69,620 @@ jobs:
name: cargo shear
runs-on: ubuntu-24.04
needs: changed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' }}
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
defaults:
run:
working-directory: codex-rs
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.93.0
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with:
tool: cargo-shear@1.11.2
tool: cargo-shear
version: 1.5.1
- name: cargo shear
run: cargo shear --deny-warnings
run: cargo shear
argument_comment_lint_package:
name: Argument comment lint package
runs-on: ubuntu-24.04
needs: changed
if: ${{ needs.changed.outputs.argument_comment_lint_package == 'true' }}
env:
CARGO_DYLINT_VERSION: 5.0.0
DYLINT_LINK_VERSION: 5.0.0
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
- name: Install nightly argument-comment-lint toolchain
shell: bash
run: |
rustup toolchain install nightly-2025-09-18 \
--profile minimal \
--component llvm-tools-preview \
--component rustc-dev \
--component rust-src \
--no-self-update
rustup default nightly-2025-09-18
- name: Cache cargo-dylint tooling
id: cargo_dylint_cache
uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: |
~/.cargo/bin/cargo-dylint
~/.cargo/bin/dylint-link
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
key: argument-comment-lint-${{ runner.os }}-${{ env.CARGO_DYLINT_VERSION }}-${{ env.DYLINT_LINK_VERSION }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }}
- name: Install cargo-dylint tooling
if: ${{ steps.cargo_dylint_cache.outputs.cache-hit != 'true' }}
shell: bash
run: |
cargo install --locked cargo-dylint --version "$CARGO_DYLINT_VERSION"
cargo install --locked dylint-link --version "$DYLINT_LINK_VERSION"
- name: Check Python wrapper syntax
run: python3 -m py_compile tools/argument-comment-lint/wrapper_common.py tools/argument-comment-lint/run.py tools/argument-comment-lint/run-prebuilt-linter.py tools/argument-comment-lint/test_wrapper_common.py
- name: Test Python wrapper helpers
run: python3 -m unittest discover -s tools/argument-comment-lint -p 'test_*.py'
- name: Test argument comment lint package
working-directory: tools/argument-comment-lint
run: cargo test
env:
RUST_MIN_STACK: "8388608" # 8 MiB
argument_comment_lint_prebuilt:
name: Argument comment lint - ${{ matrix.name }}
# --- CI to validate on different os/targets --------------------------------
lint_build:
name: Lint/Build — ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
timeout-minutes: ${{ matrix.timeout_minutes }}
timeout-minutes: 30
needs: changed
# Keep job-level if to avoid spinning up runners when not needed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
defaults:
run:
working-directory: codex-rs
env:
# Speed up repeated builds across CI runs by caching compiled objects (non-Windows).
USE_SCCACHE: ${{ startsWith(matrix.runner, 'windows') && 'false' || 'true' }}
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
# In rust-ci, representative release-profile checks use thin LTO for faster feedback.
CARGO_PROFILE_RELEASE_LTO: ${{ matrix.profile == 'release' && 'thin' || 'fat' }}
strategy:
fail-fast: false
matrix:
include:
- name: Linux
runner: ubuntu-24.04
timeout_minutes: 30
- name: macOS
runner: macos-15-xlarge
timeout_minutes: 30
- name: Windows
runner: windows-x64
timeout_minutes: 30
- runner: macos-15-xlarge
target: aarch64-apple-darwin
profile: dev
- runner: macos-15-xlarge
target: x86_64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-arm64
# Also run representative release builds on Mac and Linux because
# there could be release-only build errors we want to catch.
# Hopefully this also pre-populates the build cache to speed up
# releases.
- runner: macos-15-xlarge
target: aarch64-apple-darwin
profile: release
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
profile: release
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
profile: release
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
profile: release
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
profile: release
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- name: Check whether argument comment lint should run
id: argument_comment_lint_gate
- uses: actions/checkout@v6
- name: Install Linux build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
env:
ARGUMENT_COMMENT_LINT: ${{ needs.changed.outputs.argument_comment_lint }}
WORKFLOWS: ${{ needs.changed.outputs.workflows }}
run: |
if [[ "$ARGUMENT_COMMENT_LINT" == "true" || "$WORKFLOWS" == "true" ]]; then
echo "run=true" >> "$GITHUB_OUTPUT"
exit 0
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
packages=(pkg-config libcap-dev)
if [[ "${{ matrix.target }}" == 'x86_64-unknown-linux-musl' || "${{ matrix.target }}" == 'aarch64-unknown-linux-musl' ]]; then
packages+=(libubsan1)
fi
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends "${packages[@]}"
fi
- uses: dtolnay/rust-toolchain@1.93.0
with:
targets: ${{ matrix.target }}
components: clippy
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Use hermetic Cargo home (musl)
shell: bash
run: |
set -euo pipefail
cargo_home="${GITHUB_WORKSPACE}/.cargo-home"
mkdir -p "${cargo_home}/bin"
echo "CARGO_HOME=${cargo_home}" >> "$GITHUB_ENV"
echo "${cargo_home}/bin" >> "$GITHUB_PATH"
: > "${cargo_home}/config.toml"
- name: Compute lockfile hash
id: lockhash
working-directory: codex-rs
shell: bash
run: |
set -euo pipefail
echo "hash=$(sha256sum Cargo.lock | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
echo "toolchain_hash=$(sha256sum rust-toolchain.toml | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
# Explicit cache restore: split cargo home vs target, so we can
# avoid caching the large target dir on the gnu-dev job.
- name: Restore cargo home cache
id: cache_cargo_home_restore
uses: actions/cache/restore@v5
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
# Install and restore sccache cache
- name: Install sccache
if: ${{ env.USE_SCCACHE == 'true' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: sccache
version: 0.7.5
- name: Configure sccache backend
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: |
set -euo pipefail
if [[ -n "${ACTIONS_CACHE_URL:-}" && -n "${ACTIONS_RUNTIME_TOKEN:-}" ]]; then
echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV"
echo "Using sccache GitHub backend"
else
echo "SCCACHE_GHA_ENABLED=false" >> "$GITHUB_ENV"
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> "$GITHUB_ENV"
echo "Using sccache local disk + actions/cache fallback"
fi
echo "No argument-comment-lint relevant changes."
echo "run=false" >> "$GITHUB_OUTPUT"
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
if: ${{ steps.argument_comment_lint_gate.outputs.run == 'true' }}
- name: Enable sccache wrapper
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV"
- name: Restore sccache cache (fallback)
if: ${{ env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true' }}
id: cache_sccache_restore
uses: actions/cache/restore@v5
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ steps.argument_comment_lint_gate.outputs.run == 'true' }}
uses: ./.github/actions/run-argument-comment-lint
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
restore-keys: |
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Disable sccache wrapper (musl)
shell: bash
run: |
set -euo pipefail
echo "RUSTC_WRAPPER=" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Prepare APT cache directories (musl)
shell: bash
run: |
set -euo pipefail
sudo mkdir -p /var/cache/apt/archives /var/lib/apt/lists
sudo chown -R "$USER:$USER" /var/cache/apt /var/lib/apt/lists
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Restore APT cache (musl)
id: cache_apt_restore
uses: actions/cache/restore@v5
with:
target: ${{ runner.os }}
buildbuddy-api-key: ${{ secrets.BUILDBUDDY_API_KEY }}
path: |
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
DEBIAN_FRONTEND: noninteractive
TARGET: ${{ matrix.target }}
APT_UPDATE_ARGS: -o Acquire::Retries=3
APT_INSTALL_ARGS: --no-install-recommends
shell: bash
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
- name: Install cargo-chef
if: ${{ matrix.profile == 'release' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: cargo-chef
version: 0.1.71
- name: Pre-warm dependency cache (cargo-chef)
if: ${{ matrix.profile == 'release' }}
shell: bash
run: |
set -euo pipefail
RECIPE="${RUNNER_TEMP}/chef-recipe.json"
cargo chef prepare --recipe-path "$RECIPE"
cargo chef cook --recipe-path "$RECIPE" --target ${{ matrix.target }} --release --all-features
- name: cargo clippy
run: cargo clippy --target ${{ matrix.target }} --all-features --tests --profile ${{ matrix.profile }} --timings -- -D warnings
- name: Upload Cargo timings (clippy)
if: always()
uses: actions/upload-artifact@v7
with:
name: cargo-timings-rust-ci-clippy-${{ matrix.target }}-${{ matrix.profile }}
path: codex-rs/target/**/cargo-timings/cargo-timing.html
if-no-files-found: warn
# Save caches explicitly; make non-fatal so cache packaging
# never fails the overall job. Only save when key wasn't hit.
- name: Save cargo home cache
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v5
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
- name: Save sccache cache (fallback)
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
continue-on-error: true
uses: actions/cache/save@v5
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
- name: sccache stats
if: always() && env.USE_SCCACHE == 'true'
continue-on-error: true
run: sccache --show-stats || true
- name: sccache summary
if: always() && env.USE_SCCACHE == 'true'
shell: bash
run: |
{
echo "### sccache stats — ${{ matrix.target }} (${{ matrix.profile }})";
echo;
echo '```';
sccache --show-stats || true;
echo '```';
} >> "$GITHUB_STEP_SUMMARY"
- name: Save APT cache (musl)
if: always() && !cancelled() && (matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl') && steps.cache_apt_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v5
with:
path: |
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
tests:
name: Tests — ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
timeout-minutes: 30
needs: changed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
defaults:
run:
working-directory: codex-rs
env:
# Speed up repeated builds across CI runs by caching compiled objects (non-Windows).
USE_SCCACHE: ${{ startsWith(matrix.runner, 'windows') && 'false' || 'true' }}
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@v6
- name: Set up Node.js for js_repl tests
uses: actions/setup-node@v6
with:
node-version-file: codex-rs/node-version.txt
- name: Install Linux build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
fi
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- uses: dtolnay/rust-toolchain@1.93.0
with:
targets: ${{ matrix.target }}
- name: Compute lockfile hash
id: lockhash
working-directory: codex-rs
shell: bash
run: |
set -euo pipefail
echo "hash=$(sha256sum Cargo.lock | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
echo "toolchain_hash=$(sha256sum rust-toolchain.toml | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
- name: Restore cargo home cache
id: cache_cargo_home_restore
uses: actions/cache/restore@v5
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- name: Install sccache
if: ${{ env.USE_SCCACHE == 'true' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: sccache
version: 0.7.5
- name: Configure sccache backend
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: |
set -euo pipefail
if [[ -n "${ACTIONS_CACHE_URL:-}" && -n "${ACTIONS_RUNTIME_TOKEN:-}" ]]; then
echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV"
echo "Using sccache GitHub backend"
else
echo "SCCACHE_GHA_ENABLED=false" >> "$GITHUB_ENV"
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> "$GITHUB_ENV"
echo "Using sccache local disk + actions/cache fallback"
fi
- name: Enable sccache wrapper
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV"
- name: Restore sccache cache (fallback)
if: ${{ env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true' }}
id: cache_sccache_restore
uses: actions/cache/restore@v5
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
restore-keys: |
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: nextest
version: 0.9.103
- name: Enable unprivileged user namespaces (Linux)
if: runner.os == 'Linux'
run: |
# Required for bubblewrap to work on Linux CI runners.
sudo sysctl -w kernel.unprivileged_userns_clone=1
# Ubuntu 24.04+ can additionally gate unprivileged user namespaces
# behind AppArmor.
if sudo sysctl -a 2>/dev/null | grep -q '^kernel.apparmor_restrict_unprivileged_userns'; then
sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0
fi
- name: tests
id: test
run: cargo nextest run --all-features --no-fail-fast --target ${{ matrix.target }} --cargo-profile ci-test --timings
env:
RUST_BACKTRACE: 1
NEXTEST_STATUS_LEVEL: leak
- name: Upload Cargo timings (nextest)
if: always()
uses: actions/upload-artifact@v7
with:
name: cargo-timings-rust-ci-nextest-${{ matrix.target }}-${{ matrix.profile }}
path: codex-rs/target/**/cargo-timings/cargo-timing.html
if-no-files-found: warn
- name: Save cargo home cache
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v5
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
- name: Save sccache cache (fallback)
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
continue-on-error: true
uses: actions/cache/save@v5
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
- name: sccache stats
if: always() && env.USE_SCCACHE == 'true'
continue-on-error: true
run: sccache --show-stats || true
- name: sccache summary
if: always() && env.USE_SCCACHE == 'true'
shell: bash
run: |
{
echo "### sccache stats — ${{ matrix.target }} (tests)";
echo;
echo '```';
sccache --show-stats || true;
echo '```';
} >> "$GITHUB_STEP_SUMMARY"
- name: verify tests passed
if: steps.test.outcome == 'failure'
run: |
echo "Tests failed. See logs for details."
exit 1
# --- Gatherer job that you mark as the ONLY required status -----------------
results:
name: CI results (required)
needs:
[
changed,
general,
cargo_shear,
argument_comment_lint_package,
argument_comment_lint_prebuilt,
]
needs: [changed, general, cargo_shear, lint_build, tests]
if: always()
runs-on: ubuntu-24.04
steps:
- name: Summarize
shell: bash
run: |
echo "argpkg : ${{ needs.argument_comment_lint_package.result }}"
echo "arglint: ${{ needs.argument_comment_lint_prebuilt.result }}"
echo "general: ${{ needs.general.result }}"
echo "shear : ${{ needs.cargo_shear.result }}"
echo "lint : ${{ needs.lint_build.result }}"
echo "tests : ${{ needs.tests.result }}"
# If nothing relevant changed (PR touching only root README, etc.),
# declare success regardless of other jobs.
if [[ "${NEEDS_CHANGED_OUTPUTS_ARGUMENT_COMMENT_LINT}" != 'true' && "${NEEDS_CHANGED_OUTPUTS_CODEX}" != 'true' && "${NEEDS_CHANGED_OUTPUTS_WORKFLOWS}" != 'true' ]]; then
if [[ '${{ needs.changed.outputs.codex }}' != 'true' && '${{ needs.changed.outputs.workflows }}' != 'true' && '${{ github.event_name }}' != 'push' ]]; then
echo 'No relevant changes -> CI not required.'
exit 0
fi
if [[ "${NEEDS_CHANGED_OUTPUTS_ARGUMENT_COMMENT_LINT_PACKAGE}" == 'true' ]]; then
[[ '${{ needs.argument_comment_lint_package.result }}' == 'success' ]] || { echo 'argument_comment_lint_package failed'; exit 1; }
fi
# Otherwise require the jobs to have succeeded
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
[[ '${{ needs.cargo_shear.result }}' == 'success' ]] || { echo 'cargo_shear failed'; exit 1; }
[[ '${{ needs.lint_build.result }}' == 'success' ]] || { echo 'lint_build failed'; exit 1; }
[[ '${{ needs.tests.result }}' == 'success' ]] || { echo 'tests failed'; exit 1; }
if [[ "${NEEDS_CHANGED_OUTPUTS_ARGUMENT_COMMENT_LINT}" == 'true' || "${NEEDS_CHANGED_OUTPUTS_WORKFLOWS}" == 'true' ]]; then
[[ '${{ needs.argument_comment_lint_prebuilt.result }}' == 'success' ]] || { echo 'argument_comment_lint_prebuilt failed'; exit 1; }
fi
if [[ "${NEEDS_CHANGED_OUTPUTS_CODEX}" == 'true' || "${NEEDS_CHANGED_OUTPUTS_WORKFLOWS}" == 'true' ]]; then
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
[[ '${{ needs.cargo_shear.result }}' == 'success' ]] || { echo 'cargo_shear failed'; exit 1; }
fi
env:
NEEDS_CHANGED_OUTPUTS_ARGUMENT_COMMENT_LINT: ${{ needs.changed.outputs.argument_comment_lint }}
NEEDS_CHANGED_OUTPUTS_CODEX: ${{ needs.changed.outputs.codex }}
NEEDS_CHANGED_OUTPUTS_WORKFLOWS: ${{ needs.changed.outputs.workflows }}
NEEDS_CHANGED_OUTPUTS_ARGUMENT_COMMENT_LINT_PACKAGE: ${{ needs.changed.outputs.argument_comment_lint_package }}
- name: sccache summary note
if: always()
run: |
echo "Per-job sccache stats are attached to each matrix job's Step Summary."

View File

@@ -1,108 +0,0 @@
name: rust-release-argument-comment-lint
on:
workflow_call:
inputs:
publish:
required: true
type: boolean
jobs:
skip:
if: ${{ !inputs.publish }}
runs-on: ubuntu-latest
steps:
- run: echo "Skipping argument-comment-lint release assets for prerelease tag"
build:
if: ${{ inputs.publish }}
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
timeout-minutes: 60
env:
CARGO_DYLINT_VERSION: 5.0.0
DYLINT_LINK_VERSION: 5.0.0
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
archive_name: argument-comment-lint-aarch64-apple-darwin.tar.gz
lib_name: libargument_comment_lint@nightly-2025-09-18-aarch64-apple-darwin.dylib
runner_binary: argument-comment-lint
cargo_dylint_binary: cargo-dylint
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
archive_name: argument-comment-lint-x86_64-unknown-linux-gnu.tar.gz
lib_name: libargument_comment_lint@nightly-2025-09-18-x86_64-unknown-linux-gnu.so
runner_binary: argument-comment-lint
cargo_dylint_binary: cargo-dylint
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
archive_name: argument-comment-lint-aarch64-unknown-linux-gnu.tar.gz
lib_name: libargument_comment_lint@nightly-2025-09-18-aarch64-unknown-linux-gnu.so
runner_binary: argument-comment-lint
cargo_dylint_binary: cargo-dylint
- runner: windows-x64
target: x86_64-pc-windows-msvc
archive_name: argument-comment-lint-x86_64-pc-windows-msvc.zip
lib_name: argument_comment_lint@nightly-2025-09-18-x86_64-pc-windows-msvc.dll
runner_binary: argument-comment-lint.exe
cargo_dylint_binary: cargo-dylint.exe
runs_on:
group: codex-runners
labels: codex-windows-x64
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
with:
toolchain: nightly-2025-09-18
targets: ${{ matrix.target }}
components: llvm-tools-preview, rustc-dev, rust-src
- name: Install tooling
shell: bash
run: |
install_root="${RUNNER_TEMP}/argument-comment-lint-tools"
cargo install --locked cargo-dylint --version "$CARGO_DYLINT_VERSION" --root "$install_root"
cargo install --locked dylint-link --version "$DYLINT_LINK_VERSION"
echo "INSTALL_ROOT=$install_root" >> "$GITHUB_ENV"
- name: Cargo build
working-directory: tools/argument-comment-lint
shell: bash
run: cargo build --release --target ${{ matrix.target }}
- name: Stage artifact
shell: bash
run: |
dest="dist/argument-comment-lint/${{ matrix.target }}"
mkdir -p "$dest"
package_root="${RUNNER_TEMP}/argument-comment-lint"
rm -rf "$package_root"
mkdir -p "$package_root/bin" "$package_root/lib"
cp "tools/argument-comment-lint/target/${{ matrix.target }}/release/${{ matrix.runner_binary }}" \
"$package_root/bin/${{ matrix.runner_binary }}"
cp "${INSTALL_ROOT}/bin/${{ matrix.cargo_dylint_binary }}" \
"$package_root/bin/${{ matrix.cargo_dylint_binary }}"
cp "tools/argument-comment-lint/target/${{ matrix.target }}/release/${{ matrix.lib_name }}" \
"$package_root/lib/${{ matrix.lib_name }}"
archive_path="$dest/${{ matrix.archive_name }}"
if [[ "${{ runner.os }}" == "Windows" ]]; then
(cd "${RUNNER_TEMP}" && 7z a "$GITHUB_WORKSPACE/$archive_path" argument-comment-lint >/dev/null)
else
(cd "${RUNNER_TEMP}" && tar -czf "$GITHUB_WORKSPACE/$archive_path" argument-comment-lint)
fi
- uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: argument-comment-lint-${{ matrix.target }}
path: dist/argument-comment-lint/${{ matrix.target }}/*

View File

@@ -18,11 +18,10 @@ jobs:
if: github.repository == 'openai/codex'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/checkout@v6
with:
ref: main
fetch-depth: 0
persist-credentials: false
- name: Update models.json
env:
@@ -41,10 +40,10 @@ jobs:
)
url="${base_url%/}/models?client_version=${client_version}"
curl --http1.1 --fail --show-error --location "${headers[@]}" "${url}" | jq '.' > codex-rs/models-manager/models.json
curl --http1.1 --fail --show-error --location "${headers[@]}" "${url}" | jq '.' > codex-rs/core/models.json
- name: Open pull request (if changed)
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
uses: peter-evans/create-pull-request@v8
with:
commit-message: "Update models.json"
title: "Update models.json"

View File

@@ -24,9 +24,7 @@ jobs:
build-windows-binaries:
name: Build Windows binaries - ${{ matrix.runner }} - ${{ matrix.target }} - ${{ matrix.bundle }}
runs-on: ${{ matrix.runs_on }}
# Windows release builds can exceed an hour on fat-LTO mainline releases,
# so keep the timeout aligned with the top-level release build headroom.
timeout-minutes: 90
timeout-minutes: 60
permissions:
contents: read
defaults:
@@ -42,50 +40,34 @@ jobs:
- runner: windows-x64
target: x86_64-pc-windows-msvc
bundle: primary
binaries: "codex codex-responses-api-proxy"
build_args: --bin codex --bin codex-responses-api-proxy
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
bundle: primary
binaries: "codex codex-responses-api-proxy"
build_args: --bin codex --bin codex-responses-api-proxy
runs_on:
group: codex-runners
labels: codex-windows-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
bundle: helpers
binaries: "codex-windows-sandbox-setup codex-command-runner"
build_args: --bin codex-windows-sandbox-setup --bin codex-command-runner
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
bundle: helpers
binaries: "codex-windows-sandbox-setup codex-command-runner"
runs_on:
group: codex-runners
labels: codex-windows-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
bundle: app-server
binaries: "codex-app-server"
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
bundle: app-server
binaries: "codex-app-server"
build_args: --bin codex-windows-sandbox-setup --bin codex-command-runner
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: actions/checkout@v6
- name: Print runner specs (Windows)
shell: powershell
run: |
@@ -100,21 +82,17 @@ jobs:
Write-Host "Total RAM: $ramGiB GiB"
Write-Host "Disk usage:"
Get-PSDrive -PSProvider FileSystem | Format-Table -AutoSize Name, @{Name='Size(GB)';Expression={[math]::Round(($_.Used + $_.Free) / 1GB, 1)}}, @{Name='Free(GB)';Expression={[math]::Round($_.Free / 1GB, 1)}}
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
- uses: dtolnay/rust-toolchain@1.93.0
with:
targets: ${{ matrix.target }}
- name: Cargo build (Windows binaries)
shell: bash
run: |
build_args=()
for binary in ${{ matrix.binaries }}; do
build_args+=(--bin "$binary")
done
cargo build --target ${{ matrix.target }} --release --timings "${build_args[@]}"
cargo build --target ${{ matrix.target }} --release --timings ${{ matrix.build_args }}
- name: Upload Cargo timings
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
uses: actions/upload-artifact@v7
with:
name: cargo-timings-rust-release-windows-${{ matrix.target }}-${{ matrix.bundle }}
path: codex-rs/target/**/cargo-timings/cargo-timing.html
@@ -125,12 +103,16 @@ jobs:
run: |
output_dir="target/${{ matrix.target }}/release/staged-${{ matrix.bundle }}"
mkdir -p "$output_dir"
for binary in ${{ matrix.binaries }}; do
cp "target/${{ matrix.target }}/release/${binary}.exe" "$output_dir/${binary}.exe"
done
if [[ "${{ matrix.bundle }}" == "primary" ]]; then
cp target/${{ matrix.target }}/release/codex.exe "$output_dir/codex.exe"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$output_dir/codex-responses-api-proxy.exe"
else
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$output_dir/codex-windows-sandbox-setup.exe"
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$output_dir/codex-command-runner.exe"
fi
- name: Upload Windows binaries
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
uses: actions/upload-artifact@v7
with:
name: windows-binaries-${{ matrix.target }}-${{ matrix.bundle }}
path: |
@@ -141,15 +123,13 @@ jobs:
- build-windows-binaries
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on }}
timeout-minutes: 90
timeout-minutes: 60
permissions:
contents: read
id-token: write
defaults:
run:
working-directory: codex-rs
env:
WINDOWS_BINARIES: "codex codex-responses-api-proxy codex-windows-sandbox-setup codex-command-runner codex-app-server"
strategy:
fail-fast: false
@@ -167,41 +147,33 @@ jobs:
labels: codex-windows-arm64
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: actions/checkout@v6
- name: Download prebuilt Windows primary binaries
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
uses: actions/download-artifact@v8
with:
name: windows-binaries-${{ matrix.target }}-primary
path: codex-rs/target/${{ matrix.target }}/release
- name: Download prebuilt Windows helper binaries
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
uses: actions/download-artifact@v8
with:
name: windows-binaries-${{ matrix.target }}-helpers
path: codex-rs/target/${{ matrix.target }}/release
- name: Download prebuilt Windows app-server binary
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
name: windows-binaries-${{ matrix.target }}-app-server
path: codex-rs/target/${{ matrix.target }}/release
- name: Verify binaries
shell: bash
run: |
set -euo pipefail
for binary in ${WINDOWS_BINARIES}; do
ls -lh "target/${{ matrix.target }}/release/${binary}.exe"
done
ls -lh target/${{ matrix.target }}/release/codex.exe
ls -lh target/${{ matrix.target }}/release/codex-responses-api-proxy.exe
ls -lh target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe
ls -lh target/${{ matrix.target }}/release/codex-command-runner.exe
- name: Sign Windows binaries with Azure Trusted Signing
uses: ./.github/actions/windows-code-sign
with:
target: ${{ matrix.target }}
binaries: ${{ env.WINDOWS_BINARIES }}
client-id: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_TRUSTED_SIGNING_TENANT_ID }}
subscription-id: ${{ secrets.AZURE_TRUSTED_SIGNING_SUBSCRIPTION_ID }}
@@ -215,55 +187,13 @@ jobs:
dest="dist/${{ matrix.target }}"
mkdir -p "$dest"
for binary in ${WINDOWS_BINARIES}; do
cp "target/${{ matrix.target }}/release/${binary}.exe" \
"$dest/${binary}-${{ matrix.target }}.exe"
done
- name: Build Python runtime wheel
shell: bash
run: |
set -euo pipefail
case "${{ matrix.target }}" in
aarch64-pc-windows-msvc)
platform_tag="win_arm64"
;;
x86_64-pc-windows-msvc)
platform_tag="win_amd64"
;;
*)
echo "No Python runtime wheel platform tag for ${{ matrix.target }}"
exit 1
;;
esac
python -m venv "${RUNNER_TEMP}/python-runtime-build-venv"
"${RUNNER_TEMP}/python-runtime-build-venv/Scripts/python.exe" -m pip install build
stage_dir="${RUNNER_TEMP}/openai-codex-cli-bin-${{ matrix.target }}"
wheel_dir="${GITHUB_WORKSPACE}/python-runtime-dist/${{ matrix.target }}"
# Keep the helpers next to codex.exe in the runtime wheel so Windows
# sandbox/elevation lookup matches the standalone release zip.
python "${GITHUB_WORKSPACE}/sdk/python/scripts/update_sdk_artifacts.py" \
stage-runtime \
"$stage_dir" \
"${GITHUB_WORKSPACE}/codex-rs/target/${{ matrix.target }}/release/codex.exe" \
--codex-version "${GITHUB_REF_NAME}" \
--platform-tag "$platform_tag" \
--resource-binary "${GITHUB_WORKSPACE}/codex-rs/target/${{ matrix.target }}/release/codex-command-runner.exe" \
--resource-binary "${GITHUB_WORKSPACE}/codex-rs/target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe"
"${RUNNER_TEMP}/python-runtime-build-venv/Scripts/python.exe" -m build --wheel --outdir "$wheel_dir" "$stage_dir"
- name: Upload Python runtime wheel
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: python-runtime-wheel-${{ matrix.target }}
path: python-runtime-dist/${{ matrix.target }}/*.whl
if-no-files-found: error
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$dest/codex-responses-api-proxy-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$dest/codex-command-runner-${{ matrix.target }}.exe"
- name: Install DotSlash
uses: facebook/install-dotslash@1e4e7b3e07eaca387acb98f1d4720e0bee8dbb6a # v2
uses: facebook/install-dotslash@v2
- name: Compress artifacts
shell: bash
@@ -327,7 +257,7 @@ jobs:
"${GITHUB_WORKSPACE}/.github/workflows/zstd" -T0 -19 "$dest/$base"
done
- uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
- uses: actions/upload-artifact@v7
with:
name: ${{ matrix.target }}
path: |

View File

@@ -1,99 +0,0 @@
name: rust-release-zsh
on:
workflow_call:
env:
ZSH_COMMIT: 77045ef899e53b9598bebc5a41db93a548a40ca6
ZSH_PATCH: codex-rs/shell-escalation/patches/zsh-exec-wrapper.patch
jobs:
linux:
name: Build zsh (Linux) - ${{ matrix.variant }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
container:
image: ${{ matrix.image }}
strategy:
fail-fast: false
matrix:
include:
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: ubuntu-24.04
image: ubuntu:24.04
archive_name: codex-zsh-x86_64-unknown-linux-musl.tar.gz
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: ubuntu-24.04
image: arm64v8/ubuntu:24.04
archive_name: codex-zsh-aarch64-unknown-linux-musl.tar.gz
steps:
- name: Install build prerequisites
shell: bash
run: |
set -euo pipefail
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get install -y \
autoconf \
bison \
build-essential \
ca-certificates \
gettext \
git \
libncursesw5-dev
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Build, smoke-test, and stage zsh artifact
shell: bash
run: |
"${GITHUB_WORKSPACE}/.github/scripts/build-zsh-release-artifact.sh" \
"dist/zsh/${{ matrix.target }}/${{ matrix.archive_name }}"
- uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: codex-zsh-${{ matrix.target }}
path: dist/zsh/${{ matrix.target }}/*
darwin:
name: Build zsh (macOS) - ${{ matrix.variant }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
variant: macos-15
archive_name: codex-zsh-aarch64-apple-darwin.tar.gz
steps:
- name: Install build prerequisites
shell: bash
run: |
set -euo pipefail
if ! command -v autoconf >/dev/null 2>&1; then
brew install autoconf
fi
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Build, smoke-test, and stage zsh artifact
shell: bash
run: |
"${GITHUB_WORKSPACE}/.github/scripts/build-zsh-release-artifact.sh" \
"dist/zsh/${{ matrix.target }}/${{ matrix.archive_name }}"
- uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: codex-zsh-${{ matrix.target }}
path: dist/zsh/${{ matrix.target }}/*

View File

@@ -19,10 +19,8 @@ jobs:
tag-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- name: Validate tag matches Cargo.toml version
shell: bash
run: |
@@ -49,11 +47,9 @@ jobs:
build:
needs: tag-check
name: Build - ${{ matrix.runner }} - ${{ matrix.target }} - ${{ matrix.bundle }}
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
# Release builds can take a long time, so leave some headroom to avoid
# having to restart the full workflow due to a timeout.
timeout-minutes: 90
timeout-minutes: 60
permissions:
contents: read
id-token: write
@@ -71,58 +67,19 @@ jobs:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
bundle: primary
artifact_name: aarch64-apple-darwin
binaries: "codex codex-responses-api-proxy"
build_dmg: "true"
- runner: macos-15-xlarge
target: aarch64-apple-darwin
bundle: app-server
artifact_name: aarch64-apple-darwin-app-server
binaries: "codex-app-server"
build_dmg: "false"
- runner: macos-15-xlarge
target: x86_64-apple-darwin
bundle: primary
artifact_name: x86_64-apple-darwin
binaries: "codex codex-responses-api-proxy"
build_dmg: "true"
- runner: macos-15-xlarge
target: x86_64-apple-darwin
bundle: app-server
artifact_name: x86_64-apple-darwin-app-server
binaries: "codex-app-server"
build_dmg: "false"
# Release artifacts intentionally ship MUSL-linked Linux binaries.
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
bundle: primary
artifact_name: x86_64-unknown-linux-musl
binaries: "codex codex-responses-api-proxy bwrap"
build_dmg: "false"
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
bundle: app-server
artifact_name: x86_64-unknown-linux-musl-app-server
binaries: "codex-app-server"
build_dmg: "false"
target: x86_64-unknown-linux-gnu
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
bundle: primary
artifact_name: aarch64-unknown-linux-musl
binaries: "codex codex-responses-api-proxy bwrap"
build_dmg: "false"
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
bundle: app-server
artifact_name: aarch64-unknown-linux-musl-app-server
binaries: "codex-app-server"
build_dmg: "false"
target: aarch64-unknown-linux-gnu
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: actions/checkout@v6
- name: Print runner specs (Linux)
if: ${{ runner.os == 'Linux' }}
shell: bash
@@ -168,7 +125,7 @@ jobs:
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
fi
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
- uses: dtolnay/rust-toolchain@1.93.0
with:
targets: ${{ matrix.target }}
@@ -185,10 +142,9 @@ jobs:
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@d1434d08867e3ee9daa34448df10607b98908d29 # v2.2.1
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
use-cache: false
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
@@ -254,44 +210,16 @@ jobs:
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
name: Configure musl rusty_v8 artifact overrides and verify checksums
uses: ./.github/actions/setup-rusty-v8-musl
with:
target: ${{ matrix.target }}
- if: ${{ contains(matrix.target, 'linux') && matrix.bundle == 'primary' }}
name: Build bwrap and export digest
shell: bash
run: |
set -euo pipefail
target="${{ matrix.target }}"
cargo build --target "$target" --release --timings --bin bwrap
bwrap_path="target/${target}/release/bwrap"
if [[ ! -f "$bwrap_path" ]]; then
echo "bwrap binary ${bwrap_path} not found"
exit 1
fi
digest="$(sha256sum "$bwrap_path" | awk '{print $1}')"
echo "CODEX_BWRAP_SHA256=${digest}" >> "$GITHUB_ENV"
echo "Built bwrap ${bwrap_path} with sha256:${digest}"
- name: Cargo build
shell: bash
run: |
build_args=()
for binary in ${{ matrix.binaries }}; do
build_args+=(--bin "$binary")
done
echo "CARGO_PROFILE_RELEASE_LTO: ${CARGO_PROFILE_RELEASE_LTO}"
cargo build --target ${{ matrix.target }} --release --timings "${build_args[@]}"
cargo build --target ${{ matrix.target }} --release --timings --bin codex --bin codex-responses-api-proxy
- name: Upload Cargo timings
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
uses: actions/upload-artifact@v7
with:
name: cargo-timings-rust-release-${{ matrix.target }}-${{ matrix.bundle }}
name: cargo-timings-rust-release-${{ matrix.target }}
path: codex-rs/target/**/cargo-timings/cargo-timing.html
if-no-files-found: warn
@@ -301,14 +229,12 @@ jobs:
with:
target: ${{ matrix.target }}
artifacts-dir: ${{ github.workspace }}/codex-rs/target/${{ matrix.target }}/release
binaries: ${{ matrix.binaries }}
- if: ${{ runner.os == 'macOS' }}
name: MacOS code signing (binaries)
uses: ./.github/actions/macos-code-sign
with:
target: ${{ matrix.target }}
binaries: ${{ matrix.binaries }}
sign-binaries: "true"
sign-dmg: "false"
apple-certificate: ${{ secrets.APPLE_CERTIFICATE_P12 }}
@@ -317,7 +243,7 @@ jobs:
apple-notarization-key-id: ${{ secrets.APPLE_NOTARIZATION_KEY_ID }}
apple-notarization-issuer-id: ${{ secrets.APPLE_NOTARIZATION_ISSUER_ID }}
- if: ${{ runner.os == 'macOS' && matrix.build_dmg == 'true' }}
- if: ${{ runner.os == 'macOS' }}
name: Build macOS dmg
shell: bash
run: |
@@ -332,17 +258,23 @@ jobs:
# The previous "MacOS code signing (binaries)" step signs + notarizes the
# built artifacts in `${release_dir}`. This step packages *those same*
# signed binaries into a dmg.
codex_binary_path="${release_dir}/codex"
proxy_binary_path="${release_dir}/codex-responses-api-proxy"
rm -rf "$dmg_root"
mkdir -p "$dmg_root"
for binary in ${{ matrix.binaries }}; do
binary_path="${release_dir}/${binary}"
if [[ ! -f "${binary_path}" ]]; then
echo "Binary ${binary_path} not found"
exit 1
fi
ditto "${binary_path}" "${dmg_root}/${binary}"
done
if [[ ! -f "$codex_binary_path" ]]; then
echo "Binary $codex_binary_path not found"
exit 1
fi
if [[ ! -f "$proxy_binary_path" ]]; then
echo "Binary $proxy_binary_path not found"
exit 1
fi
ditto "$codex_binary_path" "${dmg_root}/codex"
ditto "$proxy_binary_path" "${dmg_root}/codex-responses-api-proxy"
rm -f "$dmg_path"
hdiutil create \
@@ -357,7 +289,7 @@ jobs:
exit 1
fi
- if: ${{ runner.os == 'macOS' && matrix.build_dmg == 'true' }}
- if: ${{ runner.os == 'macOS' }}
name: MacOS code signing (dmg)
uses: ./.github/actions/macos-code-sign
with:
@@ -376,88 +308,18 @@ jobs:
dest="dist/${{ matrix.target }}"
mkdir -p "$dest"
for binary in ${{ matrix.binaries }}; do
cp "target/${{ matrix.target }}/release/${binary}" "$dest/${binary}-${{ matrix.target }}"
if [[ "${{ matrix.target }}" == *linux* ]]; then
cp "target/${{ matrix.target }}/release/${binary}.sigstore" \
"$dest/${binary}-${{ matrix.target }}.sigstore"
fi
done
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy "$dest/codex-responses-api-proxy-${{ matrix.target }}"
if [[ "${{ matrix.target }}" == *linux* && "${{ matrix.bundle }}" == "primary" ]]; then
bundle_root="${RUNNER_TEMP}/codex-${{ matrix.target }}-bundle"
rm -rf "$bundle_root"
mkdir -p "$bundle_root/codex-resources"
cp "$dest/codex-${{ matrix.target }}" "$bundle_root/codex"
cp "$dest/bwrap-${{ matrix.target }}" "$bundle_root/codex-resources/bwrap"
chmod 0755 "$bundle_root/codex" "$bundle_root/codex-resources/bwrap"
tar -C "$bundle_root" -cf - codex codex-resources/bwrap |
zstd -T0 -19 -o "$dest/codex-${{ matrix.target }}-bundle.tar.zst"
if [[ "${{ matrix.target }}" == *linux* ]]; then
cp target/${{ matrix.target }}/release/codex.sigstore "$dest/codex-${{ matrix.target }}.sigstore"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.sigstore "$dest/codex-responses-api-proxy-${{ matrix.target }}.sigstore"
fi
if [[ "${{ matrix.build_dmg }}" == "true" ]]; then
if [[ "${{ matrix.target }}" == *apple-darwin ]]; then
cp target/${{ matrix.target }}/release/codex-${{ matrix.target }}.dmg "$dest/codex-${{ matrix.target }}.dmg"
fi
- name: Build Python runtime wheel
if: ${{ matrix.bundle == 'primary' }}
shell: bash
run: |
set -euo pipefail
case "${{ matrix.target }}" in
aarch64-apple-darwin)
platform_tag="macosx_11_0_arm64"
;;
x86_64-apple-darwin)
platform_tag="macosx_10_9_x86_64"
;;
aarch64-unknown-linux-musl)
platform_tag="musllinux_1_1_aarch64"
;;
x86_64-unknown-linux-musl)
platform_tag="musllinux_1_1_x86_64"
;;
*)
echo "No Python runtime wheel platform tag for ${{ matrix.target }}"
exit 1
;;
esac
python3 -m venv "${RUNNER_TEMP}/python-runtime-build-venv"
# Do not install into the runner's system Python; macOS runners mark
# the Homebrew Python as externally managed under PEP 668.
"${RUNNER_TEMP}/python-runtime-build-venv/bin/python" -m pip install build
stage_dir="${RUNNER_TEMP}/openai-codex-cli-bin-${{ matrix.target }}"
wheel_dir="${GITHUB_WORKSPACE}/python-runtime-dist/${{ matrix.target }}"
stage_runtime_args=(
"${GITHUB_WORKSPACE}/sdk/python/scripts/update_sdk_artifacts.py"
stage-runtime
"$stage_dir"
"${GITHUB_WORKSPACE}/codex-rs/target/${{ matrix.target }}/release/codex"
--codex-version "${GITHUB_REF_NAME}"
--platform-tag "$platform_tag"
)
if [[ "${{ matrix.target }}" == *linux* ]]; then
# Keep bwrap in the runtime wheel so Linux sandbox fallback behavior
# matches the standalone release bundle on hosts without system bwrap.
stage_runtime_args+=(
--resource-binary
"${GITHUB_WORKSPACE}/codex-rs/target/${{ matrix.target }}/release/bwrap"
)
fi
python3 "${stage_runtime_args[@]}"
"${RUNNER_TEMP}/python-runtime-build-venv/bin/python" -m build --wheel --outdir "$wheel_dir" "$stage_dir"
- name: Upload Python runtime wheel
if: ${{ matrix.bundle == 'primary' }}
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: python-runtime-wheel-${{ matrix.target }}
path: python-runtime-dist/${{ matrix.target }}/*.whl
if-no-files-found: error
- name: Compress artifacts
shell: bash
run: |
@@ -477,7 +339,7 @@ jobs:
base="$(basename "$f")"
# Skip files that are already archives (shouldn't happen, but be
# safe).
if [[ "$base" == *.tar.gz || "$base" == *.tar.zst || "$base" == *.zip || "$base" == *.dmg ]]; then
if [[ "$base" == *.tar.gz || "$base" == *.zip || "$base" == *.dmg ]]; then
continue
fi
@@ -494,11 +356,11 @@ jobs:
zstd -T0 -19 --rm "$dest/$base"
done
- uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
- uses: actions/upload-artifact@v7
with:
name: ${{ matrix.artifact_name }}
# Upload the per-binary .zst files, .tar.gz equivalents, and any
# prebuilt archives staged above.
name: ${{ matrix.target }}
# Upload the per-binary .zst files as well as the new .tar.gz
# equivalents we generated in the previous step.
path: |
codex-rs/dist/${{ matrix.target }}/*
@@ -509,24 +371,20 @@ jobs:
release-lto: ${{ contains(github.ref_name, '-alpha') && 'thin' || 'fat' }}
secrets: inherit
argument-comment-lint-release-assets:
name: argument-comment-lint release assets
shell-tool-mcp:
name: shell-tool-mcp
needs: tag-check
uses: ./.github/workflows/rust-release-argument-comment-lint.yml
uses: ./.github/workflows/shell-tool-mcp.yml
with:
release-tag: ${{ github.ref_name }}
publish: true
zsh-release-assets:
name: zsh release assets
needs: tag-check
uses: ./.github/workflows/rust-release-zsh.yml
secrets: inherit
release:
needs:
- build
- build-windows
- argument-comment-lint-release-assets
- zsh-release-assets
- shell-tool-mcp
name: release
runs-on: ubuntu-latest
permissions:
@@ -537,13 +395,10 @@ jobs:
tag: ${{ github.ref_name }}
should_publish_npm: ${{ steps.npm_publish_settings.outputs.should_publish }}
npm_tag: ${{ steps.npm_publish_settings.outputs.npm_tag }}
should_publish_python_runtime: ${{ steps.python_runtime_publish_settings.outputs.should_publish }}
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
uses: actions/checkout@v6
- name: Generate release notes from tag commit message
id: release_notes
@@ -565,15 +420,18 @@ jobs:
echo "path=${notes_path}" >> "${GITHUB_OUTPUT}"
- uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
- uses: actions/download-artifact@v8
with:
path: dist
- name: List
run: ls -R dist/
# This is a temporary fix: we should modify shell-tool-mcp.yml so these
# files do not end up in dist/ in the first place.
- name: Delete entries from dist/ that should not go in the release
run: |
rm -rf dist/shell-tool-mcp*
rm -rf dist/windows-binaries*
# cargo-timing.html appears under multiple target-specific directories.
# If included in files: dist/**, release upload races on duplicate
@@ -614,29 +472,13 @@ jobs:
echo "npm_tag=" >> "$GITHUB_OUTPUT"
fi
- name: Determine Python runtime publish settings
id: python_runtime_publish_settings
env:
VERSION: ${{ steps.release_name.outputs.name }}
run: |
set -euo pipefail
version="${VERSION}"
if [[ "${version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "should_publish=true" >> "$GITHUB_OUTPUT"
elif [[ "${version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+-alpha\.[0-9]+$ ]]; then
echo "should_publish=true" >> "$GITHUB_OUTPUT"
else
echo "should_publish=false" >> "$GITHUB_OUTPUT"
fi
- name: Setup pnpm
uses: pnpm/action-setup@a8198c4bff370c8506180b035930dea56dbd5288 # v5
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Setup Node.js for npm packaging
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0
uses: actions/setup-node@v6
with:
node-version: 22
@@ -644,14 +486,13 @@ jobs:
run: pnpm install --frozen-lockfile
# stage_npm_packages.py requires DotSlash when staging releases.
- uses: facebook/install-dotslash@1e4e7b3e07eaca387acb98f1d4720e0bee8dbb6a # v2
- uses: facebook/install-dotslash@v2
- name: Stage npm packages
env:
GH_TOKEN: ${{ github.token }}
RELEASE_VERSION: ${{ steps.release_name.outputs.name }}
run: |
./scripts/stage_npm_packages.py \
--release-version "$RELEASE_VERSION" \
--release-version "${{ steps.release_name.outputs.name }}" \
--package codex \
--package codex-responses-api-proxy \
--package codex-sdk
@@ -662,7 +503,7 @@ jobs:
cp scripts/install/install.ps1 dist/install.ps1
- name: Create GitHub Release
uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2.6.1
uses: softprops/action-gh-release@v2
with:
name: ${{ steps.release_name.outputs.name }}
tag_name: ${{ github.ref_name }}
@@ -672,27 +513,13 @@ jobs:
# (e.g. -alpha, -beta). Otherwise publish a normal release.
prerelease: ${{ contains(steps.release_name.outputs.name, '-') }}
- uses: facebook/dotslash-publish-release@9c9ec027515c34db9282a09a25a9cab5880b2c52 # v2
- uses: facebook/dotslash-publish-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag: ${{ github.ref_name }}
config: .github/dotslash-config.json
- uses: facebook/dotslash-publish-release@9c9ec027515c34db9282a09a25a9cab5880b2c52 # v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag: ${{ github.ref_name }}
config: .github/dotslash-zsh-config.json
- uses: facebook/dotslash-publish-release@9c9ec027515c34db9282a09a25a9cab5880b2c52 # v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag: ${{ github.ref_name }}
config: .github/dotslash-argument-comment-lint-config.json
- name: Trigger developers.openai.com deploy
# Only trigger the deploy if the release is not a pre-release.
# The deploy is used to update the developers.openai.com website with the new config schema json file.
@@ -721,22 +548,23 @@ jobs:
steps:
- name: Setup Node.js
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0
uses: actions/setup-node@v6
with:
# Node 24 bundles npm >= 11.5.1, which trusted publishing requires.
node-version: 24
node-version: 22
registry-url: "https://registry.npmjs.org"
scope: "@openai"
# Trusted publishing requires npm CLI version 11.5.1 or later.
- name: Update npm
run: npm install -g npm@latest
- name: Download npm tarballs from release
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
RELEASE_TAG: ${{ needs.release.outputs.tag }}
RELEASE_VERSION: ${{ needs.release.outputs.version }}
run: |
set -euo pipefail
version="$RELEASE_VERSION"
tag="$RELEASE_TAG"
version="${{ needs.release.outputs.version }}"
tag="${{ needs.release.outputs.tag }}"
mkdir -p dist/npm
patterns=(
"codex-npm-${version}.tgz"
@@ -765,59 +593,11 @@ jobs:
prefix="${NPM_TAG}-"
fi
root_tarball="dist/npm/codex-npm-${VERSION}.tgz"
sdk_tarball="dist/npm/codex-sdk-npm-${VERSION}.tgz"
# Keep this list in sync with CODEX_PLATFORM_PACKAGES in
# codex-cli/scripts/build_npm_package.py. The root wrapper advances
# @openai/codex@latest as soon as it publishes, so every platform
# package it aliases must already exist in the registry first.
platform_tarballs=(
"dist/npm/codex-npm-linux-x64-${VERSION}.tgz"
"dist/npm/codex-npm-linux-arm64-${VERSION}.tgz"
"dist/npm/codex-npm-darwin-x64-${VERSION}.tgz"
"dist/npm/codex-npm-darwin-arm64-${VERSION}.tgz"
"dist/npm/codex-npm-win32-x64-${VERSION}.tgz"
"dist/npm/codex-npm-win32-arm64-${VERSION}.tgz"
)
for required_tarball in "${platform_tarballs[@]}" "${root_tarball}"; do
if [[ ! -f "${required_tarball}" ]]; then
echo "Missing npm tarball: ${required_tarball}"
exit 1
fi
done
shopt -s nullglob
other_tarballs=()
for tarball in dist/npm/*-"${VERSION}".tgz; do
if [[ "${tarball}" == "${root_tarball}" || "${tarball}" == "${sdk_tarball}" ]]; then
continue
fi
is_platform_tarball=false
for platform_tarball in "${platform_tarballs[@]}"; do
if [[ "${tarball}" == "${platform_tarball}" ]]; then
is_platform_tarball=true
break
fi
done
if [[ "${is_platform_tarball}" == true ]]; then
continue
fi
other_tarballs+=("${tarball}")
done
# Publish the platform packages before the root CLI wrapper. The root
# wrapper advances @openai/codex@latest, so it should only publish
# after the optional dependency versions it references exist.
tarballs=(
"${platform_tarballs[@]}"
"${other_tarballs[@]}"
"${root_tarball}"
)
if [[ -f "${sdk_tarball}" ]]; then
tarballs+=("${sdk_tarball}")
tarballs=(dist/npm/*-"${VERSION}".tgz)
if [[ ${#tarballs[@]} -eq 0 ]]; then
echo "No npm tarballs found in dist/npm for version ${VERSION}"
exit 1
fi
for tarball in "${tarballs[@]}"; do
@@ -863,48 +643,6 @@ jobs:
exit "${publish_status}"
done
# Publish the platform-specific Python runtime wheels using PyPI trusted publishing.
# PyPI project configuration must trust this workflow and job. Keep this
# non-blocking while the Python runtime publishing path is new; failures still
# need release follow-up, but should not invalidate the Rust release itself.
publish-python-runtime:
# Publish to PyPI for stable releases and alpha pre-releases with numeric suffixes.
if: ${{ needs.release.outputs.should_publish_python_runtime == 'true' }}
name: publish-python-runtime
needs: release
runs-on: ubuntu-latest
continue-on-error: true
environment: pypi
permissions:
id-token: write # Required for PyPI trusted publishing.
contents: read
steps:
- name: Download Python runtime wheels from release
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
RELEASE_TAG: ${{ needs.release.outputs.tag }}
RELEASE_VERSION: ${{ needs.release.outputs.version }}
run: |
set -euo pipefail
python_version="$RELEASE_VERSION"
python_version="${python_version/-alpha./a}"
python_version="${python_version/-beta./b}"
python_version="${python_version/-rc./rc}"
mkdir -p dist/python-runtime
gh release download "$RELEASE_TAG" \
--repo "${GITHUB_REPOSITORY}" \
--pattern "openai_codex_cli_bin-${python_version}-*.whl" \
--dir dist/python-runtime
ls -lh dist/python-runtime
- name: Publish Python runtime wheels to PyPI
uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0
with:
packages-dir: dist/python-runtime
skip-existing: true
winget:
name: winget
needs: release
@@ -919,7 +657,7 @@ jobs:
steps:
- name: Publish to WinGet
uses: vedantmgoyal9/winget-releaser@7bd472be23763def6e16bd06cc8b1cdfab0e2fd5
uses: vedantmgoyal9/winget-releaser@19e706d4c9121098010096f9c495a70a7518b30f
with:
identifier: OpenAI.Codex
version: ${{ needs.release.outputs.version }}

View File

@@ -1,313 +0,0 @@
name: rusty-v8-release
on:
push:
tags:
- "rusty-v8-v*.*.*"
concurrency:
group: ${{ github.workflow }}::${{ github.ref_name }}
cancel-in-progress: false
jobs:
metadata:
runs-on: ubuntu-latest
outputs:
release_tag: ${{ steps.release_tag.outputs.release_tag }}
v8_version: ${{ steps.v8_version.outputs.version }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Set up Python
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: "3.12"
- name: Resolve exact v8 crate version
id: v8_version
shell: bash
run: |
set -euo pipefail
version="$(python3 .github/scripts/rusty_v8_bazel.py resolved-v8-crate-version)"
echo "version=${version}" >> "$GITHUB_OUTPUT"
- name: Resolve release tag
id: release_tag
env:
GITHUB_REF_NAME: ${{ github.ref_name }}
V8_VERSION: ${{ steps.v8_version.outputs.version }}
shell: bash
run: |
set -euo pipefail
expected_release_tag="rusty-v8-v${V8_VERSION}"
release_tag="${GITHUB_REF_NAME}"
if [[ "${release_tag}" != "${expected_release_tag}" ]]; then
echo "Tag ${release_tag} does not match expected release tag ${expected_release_tag}." >&2
exit 1
fi
echo "release_tag=${release_tag}" >> "$GITHUB_OUTPUT"
build:
name: Build ${{ matrix.variant }} ${{ matrix.target }}
needs: metadata
runs-on: ${{ matrix.runner }}
permissions:
contents: read
actions: read
strategy:
fail-fast: false
matrix:
include:
- runner: ubuntu-24.04
bazel_config: ci-v8
platform: linux_amd64
sandbox: false
target: x86_64-unknown-linux-gnu
variant: release
- runner: ubuntu-24.04
bazel_config: ci-v8
platform: linux_amd64
sandbox: true
target: x86_64-unknown-linux-gnu
variant: ptrcomp-sandbox
- runner: ubuntu-24.04-arm
bazel_config: ci-v8
platform: linux_arm64
sandbox: false
target: aarch64-unknown-linux-gnu
variant: release
- runner: ubuntu-24.04-arm
bazel_config: ci-v8
platform: linux_arm64
sandbox: true
target: aarch64-unknown-linux-gnu
variant: ptrcomp-sandbox
- runner: macos-15-xlarge
bazel_config: ci-macos
platform: macos_amd64
sandbox: false
target: x86_64-apple-darwin
variant: release
- runner: macos-15-xlarge
bazel_config: ci-macos
platform: macos_amd64
sandbox: true
target: x86_64-apple-darwin
variant: ptrcomp-sandbox
- runner: macos-15-xlarge
bazel_config: ci-macos
platform: macos_arm64
sandbox: false
target: aarch64-apple-darwin
variant: release
- runner: macos-15-xlarge
bazel_config: ci-macos
platform: macos_arm64
sandbox: true
target: aarch64-apple-darwin
variant: ptrcomp-sandbox
- runner: ubuntu-24.04
bazel_config: ci-v8
platform: linux_amd64_musl
sandbox: false
target: x86_64-unknown-linux-musl
variant: release
- runner: ubuntu-24.04-arm
bazel_config: ci-v8
platform: linux_arm64_musl
sandbox: false
target: aarch64-unknown-linux-musl
variant: release
- runner: ubuntu-24.04
bazel_config: ci-v8
platform: linux_amd64_musl
sandbox: true
target: x86_64-unknown-linux-musl
variant: ptrcomp-sandbox
- runner: ubuntu-24.04-arm
bazel_config: ci-v8
platform: linux_arm64_musl
sandbox: true
target: aarch64-unknown-linux-musl
variant: ptrcomp-sandbox
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Set up Bazel
uses: ./.github/actions/setup-bazel-ci
with:
target: ${{ matrix.target }}
- name: Set up Python
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: "3.12"
- name: Set up Rust toolchain for Cargo smoke
uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
with:
toolchain: "1.93.0"
- name: Build Bazel V8 release pair
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
PLATFORM: ${{ matrix.platform }}
SANDBOX: ${{ matrix.sandbox }}
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
target_suffix="${TARGET//-/_}"
pair_kind="release_pair"
if [[ "${SANDBOX}" == "true" ]]; then
pair_kind="sandbox_release_pair"
fi
pair_target="//third_party/v8:rusty_v8_${pair_kind}_${target_suffix}"
bazel_args=(
build
-c
opt
"--platforms=@llvm//platforms:${PLATFORM}"
--config=rusty-v8-upstream-libcxx
"${pair_target}"
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD)
)
if [[ "${SANDBOX}" != "true" ]]; then
bazel_args+=(--config=v8-release-compat)
fi
bazel \
--noexperimental_remote_repo_contents_cache \
"${bazel_args[@]}" \
"--config=${{ matrix.bazel_config }}" \
"--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}"
- name: Stage release pair
env:
BAZEL_CONFIG: ${{ matrix.bazel_config }}
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
PLATFORM: ${{ matrix.platform }}
SANDBOX: ${{ matrix.sandbox }}
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
stage_args=(
--platform "${PLATFORM}"
--target "${TARGET}"
--compilation-mode opt
--output-dir "dist/${TARGET}"
--bazel-config "${BAZEL_CONFIG}"
)
if [[ "${SANDBOX}" == "true" ]]; then
stage_args+=(--sandbox)
else
stage_args+=(--bazel-config v8-release-compat)
fi
python3 .github/scripts/rusty_v8_bazel.py stage-release-pair "${stage_args[@]}"
- name: Smoke test staged artifact with Cargo
env:
SANDBOX: ${{ matrix.sandbox }}
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
host_arch="$(uname -m)"
case "${TARGET}:${host_arch}" in
x86_64-apple-darwin:x86_64|aarch64-apple-darwin:arm64|x86_64-unknown-linux-gnu:x86_64|aarch64-unknown-linux-gnu:aarch64)
;;
*)
echo "Skipping non-native Cargo smoke for ${TARGET} on ${host_arch}."
exit 0
;;
esac
archive="$(find "dist/${TARGET}" -maxdepth 1 -type f -name 'librusty_v8_*.a.gz' -print -quit)"
binding="$(find "dist/${TARGET}" -maxdepth 1 -type f -name 'src_binding_*.rs' -print -quit)"
if [[ -z "${archive}" || -z "${binding}" ]]; then
echo "Missing staged archive or binding for ${TARGET}." >&2
exit 1
fi
cargo_args=(test -p codex-v8-poc)
if [[ "${SANDBOX}" == "true" ]]; then
cargo_args+=(--features sandbox)
fi
(
cd codex-rs
CARGO_TARGET_DIR="${RUNNER_TEMP}/rusty-v8-cargo-smoke-${TARGET}-${SANDBOX}" \
RUSTY_V8_ARCHIVE="${GITHUB_WORKSPACE}/${archive}" \
RUSTY_V8_SRC_BINDING_PATH="${GITHUB_WORKSPACE}/${binding}" \
cargo "${cargo_args[@]}"
)
- name: Upload staged artifacts
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: rusty-v8-${{ needs.metadata.outputs.v8_version }}-${{ matrix.variant }}-${{ matrix.target }}
path: dist/${{ matrix.target }}/*
publish-release:
needs:
- metadata
- build
runs-on: ubuntu-latest
permissions:
contents: write
actions: read
steps:
- name: Check whether release already exists
id: release
env:
GH_TOKEN: ${{ github.token }}
RELEASE_TAG: ${{ needs.metadata.outputs.release_tag }}
shell: bash
run: |
set -euo pipefail
if gh release view "${RELEASE_TAG}" --repo "${GITHUB_REPOSITORY}" > /dev/null 2>&1; then
echo "exists=true" >> "${GITHUB_OUTPUT}"
else
echo "exists=false" >> "${GITHUB_OUTPUT}"
fi
- uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
path: dist
- name: Create GitHub Release
if: ${{ steps.release.outputs.exists != 'true' }}
uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2.6.1
with:
tag_name: ${{ needs.metadata.outputs.release_tag }}
name: ${{ needs.metadata.outputs.release_tag }}
files: dist/**
# Keep V8 artifact releases out of Codex's normal "latest release" channel.
prerelease: true
- name: Amend existing GitHub Release
if: ${{ steps.release.outputs.exists == 'true' }}
uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2.6.1
with:
tag_name: ${{ needs.metadata.outputs.release_tag }}
name: ${{ needs.metadata.outputs.release_tag }}
files: dist/**
overwrite_files: true
# Keep V8 artifact releases out of Codex's normal "latest release" channel.
prerelease: true

View File

@@ -6,41 +6,6 @@ on:
pull_request: {}
jobs:
python-sdk:
runs-on:
group: codex-runners
labels: codex-linux-x64
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
- name: Test Python SDK
shell: bash
run: |
set -euo pipefail
# Run inside Alpine so dependency resolution exercises the pinned
# runtime wheel on the same Linux wheel family that CI installs.
docker run --rm \
--user "$(id -u):$(id -g)" \
-e HOME=/tmp/codex-python-sdk-home \
-e UV_LINK_MODE=copy \
-v "${GITHUB_WORKSPACE}:${GITHUB_WORKSPACE}" \
-w "${GITHUB_WORKSPACE}/sdk/python" \
python:3.12-alpine \
sh -euxc '
python -m venv /tmp/uv
/tmp/uv/bin/python -m pip install uv==0.11.3
/tmp/uv/bin/uv sync --extra dev --frozen
/tmp/uv/bin/uv run --extra dev ruff check --output-format=github .
/tmp/uv/bin/uv run --extra dev ruff format --check .
/tmp/uv/bin/uv run --extra dev pytest
'
sdks:
runs-on:
group: codex-runners
@@ -48,10 +13,7 @@ jobs:
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
uses: actions/checkout@v6
- name: Install Linux bwrap build dependencies
shell: bash
@@ -61,82 +23,21 @@ jobs:
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
- name: Setup pnpm
uses: pnpm/action-setup@a8198c4bff370c8506180b035930dea56dbd5288 # v5
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0
uses: actions/setup-node@v6
with:
node-version: 22
cache: pnpm
- name: Set up Bazel CI
id: setup_bazel
uses: ./.github/actions/setup-bazel-ci
with:
target: x86_64-unknown-linux-gnu
- uses: dtolnay/rust-toolchain@1.93.0
- name: Build codex with Bazel
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
set -euo pipefail
# Use the shared CI wrapper so fork PRs fall back cleanly when
# BuildBuddy credentials are unavailable. This workflow needs the
# built `codex` binary on disk afterwards, so ask the wrapper to
# override CI's default remote_download_minimal behavior.
./.github/scripts/run-bazel-ci.sh \
--remote-download-toplevel \
-- \
build \
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
--build_metadata=TAG_job=sdk \
-- \
//codex-rs/cli:codex
# Resolve the exact output file using the same wrapper/config path as
# the build instead of guessing which Bazel convenience symlink is
# available on the runner.
cquery_output="$(
./.github/scripts/run-bazel-ci.sh \
-- \
cquery \
--output=files \
-- \
//codex-rs/cli:codex \
| grep -E '^(/|bazel-out/)' \
| tail -n 1
)"
if [[ "${cquery_output}" = /* ]]; then
codex_bazel_output_path="${cquery_output}"
else
codex_bazel_output_path="${GITHUB_WORKSPACE}/${cquery_output}"
fi
if [[ -z "${codex_bazel_output_path}" ]]; then
echo "Bazel did not report an output path for //codex-rs/cli:codex." >&2
exit 1
fi
if [[ ! -e "${codex_bazel_output_path}" ]]; then
echo "Unable to locate the Bazel-built codex binary at ${codex_bazel_output_path}." >&2
exit 1
fi
# Stage the binary into the workspace and point the SDK tests at that
# stable path. The tests spawn `codex` directly many times, so using a
# normal executable path is more reliable than invoking Bazel for each
# test process.
install_dir="${GITHUB_WORKSPACE}/.tmp/sdk-ci"
mkdir -p "${install_dir}"
install -m 755 "${codex_bazel_output_path}" "${install_dir}/codex"
echo "CODEX_EXEC_PATH=${install_dir}/codex" >> "$GITHUB_ENV"
- name: Warm up Bazel-built codex
shell: bash
run: |
set -euo pipefail
"${CODEX_EXEC_PATH}" --version
- name: build codex
run: cargo build --bin codex
working-directory: codex-rs
- name: Install dependencies
run: pnpm install --frozen-lockfile
@@ -149,12 +50,3 @@ jobs:
- name: Test SDK packages
run: pnpm -r --filter ./sdk/typescript run test
- name: Save bazel repository cache
if: always() && !cancelled() && steps.setup_bazel.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with:
path: |
~/.cache/bazel-repo-cache
key: bazel-cache-x86_64-unknown-linux-gnu-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }}

48
.github/workflows/shell-tool-mcp-ci.yml vendored Normal file
View File

@@ -0,0 +1,48 @@
name: shell-tool-mcp CI
on:
push:
paths:
- "shell-tool-mcp/**"
- ".github/workflows/shell-tool-mcp-ci.yml"
- "pnpm-lock.yaml"
- "pnpm-workspace.yaml"
pull_request:
paths:
- "shell-tool-mcp/**"
- ".github/workflows/shell-tool-mcp-ci.yml"
- "pnpm-lock.yaml"
- "pnpm-workspace.yaml"
env:
NODE_VERSION: 22
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v6
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
with:
node-version: ${{ env.NODE_VERSION }}
cache: "pnpm"
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Format check
run: pnpm --filter @openai/codex-shell-tool-mcp run format
- name: Run tests
run: pnpm --filter @openai/codex-shell-tool-mcp test
- name: Build
run: pnpm --filter @openai/codex-shell-tool-mcp run build

548
.github/workflows/shell-tool-mcp.yml vendored Normal file
View File

@@ -0,0 +1,548 @@
name: shell-tool-mcp
on:
workflow_call:
inputs:
release-version:
description: Version to publish (x.y.z or x.y.z-alpha.N). Defaults to GITHUB_REF_NAME when it starts with rust-v.
required: false
type: string
release-tag:
description: Tag name to use when downloading release artifacts (defaults to rust-v<version>).
required: false
type: string
publish:
description: Whether to publish to npm when the version is releasable.
required: false
default: true
type: boolean
env:
NODE_VERSION: 22
jobs:
metadata:
runs-on: ubuntu-latest
outputs:
version: ${{ steps.compute.outputs.version }}
release_tag: ${{ steps.compute.outputs.release_tag }}
should_publish: ${{ steps.compute.outputs.should_publish }}
npm_tag: ${{ steps.compute.outputs.npm_tag }}
steps:
- name: Compute version and tags
id: compute
run: |
set -euo pipefail
version="${{ inputs.release-version }}"
release_tag="${{ inputs.release-tag }}"
if [[ -z "$version" ]]; then
if [[ -n "$release_tag" && "$release_tag" =~ ^rust-v.+ ]]; then
version="${release_tag#rust-v}"
elif [[ "${GITHUB_REF_NAME:-}" =~ ^rust-v.+ ]]; then
version="${GITHUB_REF_NAME#rust-v}"
release_tag="${GITHUB_REF_NAME}"
else
echo "release-version is required when GITHUB_REF_NAME is not a rust-v tag."
exit 1
fi
fi
if [[ -z "$release_tag" ]]; then
release_tag="rust-v${version}"
fi
npm_tag=""
should_publish="false"
if [[ "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
should_publish="true"
elif [[ "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+-alpha\.[0-9]+$ ]]; then
should_publish="true"
npm_tag="alpha"
fi
echo "version=${version}" >> "$GITHUB_OUTPUT"
echo "release_tag=${release_tag}" >> "$GITHUB_OUTPUT"
echo "npm_tag=${npm_tag}" >> "$GITHUB_OUTPUT"
echo "should_publish=${should_publish}" >> "$GITHUB_OUTPUT"
bash-linux:
name: Build Bash (Linux) - ${{ matrix.variant }} - ${{ matrix.target }}
needs: metadata
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
container:
image: ${{ matrix.image }}
strategy:
fail-fast: false
matrix:
include:
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: ubuntu-24.04
image: ubuntu:24.04
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: ubuntu-22.04
image: ubuntu:22.04
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: debian-12
image: debian:12
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: debian-11
image: debian:11
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: centos-9
image: quay.io/centos/centos:stream9
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: ubuntu-24.04
image: arm64v8/ubuntu:24.04
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: ubuntu-22.04
image: arm64v8/ubuntu:22.04
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: ubuntu-20.04
image: arm64v8/ubuntu:20.04
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: debian-12
image: arm64v8/debian:12
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: debian-11
image: arm64v8/debian:11
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: centos-9
image: quay.io/centos/centos:stream9
steps:
- name: Install build prerequisites
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get install -y git build-essential bison autoconf gettext libncursesw5-dev
elif command -v dnf >/dev/null 2>&1; then
dnf install -y git gcc gcc-c++ make bison autoconf gettext ncurses-devel
elif command -v yum >/dev/null 2>&1; then
yum install -y git gcc gcc-c++ make bison autoconf gettext ncurses-devel
else
echo "Unsupported package manager in container"
exit 1
fi
- name: Checkout repository
uses: actions/checkout@v6
- name: Build patched Bash
shell: bash
run: |
set -euo pipefail
git clone https://git.savannah.gnu.org/git/bash /tmp/bash
cd /tmp/bash
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/bash-exec-wrapper.patch"
./configure --without-bash-malloc
cores="$(command -v nproc >/dev/null 2>&1 && nproc || getconf _NPROCESSORS_ONLN)"
make -j"${cores}"
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/bash/${{ matrix.variant }}"
mkdir -p "$dest"
cp bash "$dest/bash"
- uses: actions/upload-artifact@v7
with:
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
path: artifacts/**
if-no-files-found: error
bash-darwin:
name: Build Bash (macOS) - ${{ matrix.variant }} - ${{ matrix.target }}
needs: metadata
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
variant: macos-15
- runner: macos-14
target: aarch64-apple-darwin
variant: macos-14
steps:
- name: Checkout repository
uses: actions/checkout@v6
- name: Build patched Bash
shell: bash
run: |
set -euo pipefail
git clone https://git.savannah.gnu.org/git/bash /tmp/bash
cd /tmp/bash
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/bash-exec-wrapper.patch"
./configure --without-bash-malloc
cores="$(getconf _NPROCESSORS_ONLN)"
make -j"${cores}"
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/bash/${{ matrix.variant }}"
mkdir -p "$dest"
cp bash "$dest/bash"
- uses: actions/upload-artifact@v7
with:
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
path: artifacts/**
if-no-files-found: error
zsh-linux:
name: Build zsh (Linux) - ${{ matrix.variant }} - ${{ matrix.target }}
needs: metadata
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
container:
image: ${{ matrix.image }}
strategy:
fail-fast: false
matrix:
include:
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: ubuntu-24.04
image: ubuntu:24.04
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: ubuntu-22.04
image: ubuntu:22.04
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: debian-12
image: debian:12
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: debian-11
image: debian:11
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: centos-9
image: quay.io/centos/centos:stream9
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: ubuntu-24.04
image: arm64v8/ubuntu:24.04
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: ubuntu-22.04
image: arm64v8/ubuntu:22.04
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: ubuntu-20.04
image: arm64v8/ubuntu:20.04
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: debian-12
image: arm64v8/debian:12
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: debian-11
image: arm64v8/debian:11
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: centos-9
image: quay.io/centos/centos:stream9
steps:
- name: Install build prerequisites
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get install -y git build-essential bison autoconf gettext libncursesw5-dev
elif command -v dnf >/dev/null 2>&1; then
dnf install -y git gcc gcc-c++ make bison autoconf gettext ncurses-devel
elif command -v yum >/dev/null 2>&1; then
yum install -y git gcc gcc-c++ make bison autoconf gettext ncurses-devel
else
echo "Unsupported package manager in container"
exit 1
fi
- name: Checkout repository
uses: actions/checkout@v6
- name: Build patched zsh
shell: bash
run: |
set -euo pipefail
git clone https://git.code.sf.net/p/zsh/code /tmp/zsh
cd /tmp/zsh
git checkout 77045ef899e53b9598bebc5a41db93a548a40ca6
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/zsh-exec-wrapper.patch"
./Util/preconfig
./configure
cores="$(command -v nproc >/dev/null 2>&1 && nproc || getconf _NPROCESSORS_ONLN)"
make -j"${cores}"
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/zsh/${{ matrix.variant }}"
mkdir -p "$dest"
cp Src/zsh "$dest/zsh"
- name: Smoke test zsh exec wrapper
shell: bash
run: |
set -euo pipefail
tmpdir="$(mktemp -d)"
cat > "$tmpdir/exec-wrapper" <<'EOF'
#!/usr/bin/env bash
set -euo pipefail
: "${CODEX_WRAPPER_LOG:?missing CODEX_WRAPPER_LOG}"
printf '%s\n' "$@" > "$CODEX_WRAPPER_LOG"
file="$1"
shift
if [[ "$#" -eq 0 ]]; then
exec "$file"
fi
arg0="$1"
shift
exec -a "$arg0" "$file" "$@"
EOF
chmod +x "$tmpdir/exec-wrapper"
CODEX_WRAPPER_LOG="$tmpdir/wrapper.log" \
EXEC_WRAPPER="$tmpdir/exec-wrapper" \
/tmp/zsh/Src/zsh -fc '/bin/echo smoke-zsh' > "$tmpdir/stdout.txt"
grep -Fx "smoke-zsh" "$tmpdir/stdout.txt"
grep -Fx "/bin/echo" "$tmpdir/wrapper.log"
- uses: actions/upload-artifact@v7
with:
name: shell-tool-mcp-zsh-${{ matrix.target }}-${{ matrix.variant }}
path: artifacts/**
if-no-files-found: error
zsh-darwin:
name: Build zsh (macOS) - ${{ matrix.variant }} - ${{ matrix.target }}
needs: metadata
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
variant: macos-15
- runner: macos-14
target: aarch64-apple-darwin
variant: macos-14
steps:
- name: Install build prerequisites
shell: bash
run: |
set -euo pipefail
if ! command -v autoconf >/dev/null 2>&1; then
brew install autoconf
fi
- name: Checkout repository
uses: actions/checkout@v6
- name: Build patched zsh
shell: bash
run: |
set -euo pipefail
git clone https://git.code.sf.net/p/zsh/code /tmp/zsh
cd /tmp/zsh
git checkout 77045ef899e53b9598bebc5a41db93a548a40ca6
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/zsh-exec-wrapper.patch"
./Util/preconfig
./configure
cores="$(getconf _NPROCESSORS_ONLN)"
make -j"${cores}"
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/zsh/${{ matrix.variant }}"
mkdir -p "$dest"
cp Src/zsh "$dest/zsh"
- name: Smoke test zsh exec wrapper
shell: bash
run: |
set -euo pipefail
tmpdir="$(mktemp -d)"
cat > "$tmpdir/exec-wrapper" <<'EOF'
#!/usr/bin/env bash
set -euo pipefail
: "${CODEX_WRAPPER_LOG:?missing CODEX_WRAPPER_LOG}"
printf '%s\n' "$@" > "$CODEX_WRAPPER_LOG"
file="$1"
shift
if [[ "$#" -eq 0 ]]; then
exec "$file"
fi
arg0="$1"
shift
exec -a "$arg0" "$file" "$@"
EOF
chmod +x "$tmpdir/exec-wrapper"
CODEX_WRAPPER_LOG="$tmpdir/wrapper.log" \
EXEC_WRAPPER="$tmpdir/exec-wrapper" \
/tmp/zsh/Src/zsh -fc '/bin/echo smoke-zsh' > "$tmpdir/stdout.txt"
grep -Fx "smoke-zsh" "$tmpdir/stdout.txt"
grep -Fx "/bin/echo" "$tmpdir/wrapper.log"
- uses: actions/upload-artifact@v7
with:
name: shell-tool-mcp-zsh-${{ matrix.target }}-${{ matrix.variant }}
path: artifacts/**
if-no-files-found: error
package:
name: Package npm module
needs:
- metadata
- bash-linux
- bash-darwin
- zsh-linux
- zsh-darwin
runs-on: ubuntu-latest
env:
PACKAGE_VERSION: ${{ needs.metadata.outputs.version }}
steps:
- name: Checkout repository
uses: actions/checkout@v6
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
with:
node-version: ${{ env.NODE_VERSION }}
- name: Install JavaScript dependencies
run: pnpm install --frozen-lockfile
- name: Build (shell-tool-mcp)
run: pnpm --filter @openai/codex-shell-tool-mcp run build
- name: Download build artifacts
uses: actions/download-artifact@v8
with:
path: artifacts
- name: Assemble staging directory
id: staging
shell: bash
run: |
set -euo pipefail
staging="${STAGING_DIR}"
mkdir -p "$staging" "$staging/vendor"
cp shell-tool-mcp/README.md "$staging/"
cp shell-tool-mcp/package.json "$staging/"
found_vendor="false"
shopt -s nullglob
for vendor_dir in artifacts/*/vendor; do
rsync -av "$vendor_dir/" "$staging/vendor/"
found_vendor="true"
done
if [[ "$found_vendor" == "false" ]]; then
echo "No vendor payloads were downloaded."
exit 1
fi
node - <<'NODE'
import fs from "node:fs";
import path from "node:path";
const stagingDir = process.env.STAGING_DIR;
const version = process.env.PACKAGE_VERSION;
const pkgPath = path.join(stagingDir, "package.json");
const pkg = JSON.parse(fs.readFileSync(pkgPath, "utf8"));
pkg.version = version;
fs.writeFileSync(pkgPath, JSON.stringify(pkg, null, 2) + "\n");
NODE
echo "dir=$staging" >> "$GITHUB_OUTPUT"
env:
STAGING_DIR: ${{ runner.temp }}/shell-tool-mcp
- name: Ensure binaries are executable
run: |
set -euo pipefail
staging="${{ steps.staging.outputs.dir }}"
chmod +x \
"$staging"/vendor/*/bash/*/bash \
"$staging"/vendor/*/zsh/*/zsh
- name: Create npm tarball
shell: bash
run: |
set -euo pipefail
mkdir -p dist/npm
staging="${{ steps.staging.outputs.dir }}"
pack_info=$(cd "$staging" && npm pack --ignore-scripts --json --pack-destination "${GITHUB_WORKSPACE}/dist/npm")
filename=$(PACK_INFO="$pack_info" node -e 'const data = JSON.parse(process.env.PACK_INFO); console.log(data[0].filename);')
mv "dist/npm/${filename}" "dist/npm/codex-shell-tool-mcp-npm-${PACKAGE_VERSION}.tgz"
- uses: actions/upload-artifact@v7
with:
name: codex-shell-tool-mcp-npm
path: dist/npm/codex-shell-tool-mcp-npm-${{ env.PACKAGE_VERSION }}.tgz
if-no-files-found: error
publish:
name: Publish npm package
needs:
- metadata
- package
if: ${{ inputs.publish && needs.metadata.outputs.should_publish == 'true' }}
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
steps:
- name: Setup Node.js
uses: actions/setup-node@v6
with:
node-version: ${{ env.NODE_VERSION }}
registry-url: https://registry.npmjs.org
scope: "@openai"
# Trusted publishing requires npm CLI version 11.5.1 or later.
- name: Update npm
run: npm install -g npm@latest
- name: Download npm tarball
uses: actions/download-artifact@v8
with:
name: codex-shell-tool-mcp-npm
path: dist/npm
- name: Publish to npm
env:
NPM_TAG: ${{ needs.metadata.outputs.npm_tag }}
VERSION: ${{ needs.metadata.outputs.version }}
shell: bash
run: |
set -euo pipefail
tag_args=()
if [[ -n "${NPM_TAG}" ]]; then
tag_args+=(--tag "${NPM_TAG}")
fi
npm publish "dist/npm/codex-shell-tool-mcp-npm-${VERSION}.tgz" "${tag_args[@]}"

View File

@@ -1,411 +0,0 @@
name: v8-canary
on:
pull_request:
paths:
- ".bazelrc"
- ".github/actions/setup-bazel-ci/**"
- ".github/scripts/rusty_v8_bazel.py"
- ".github/scripts/rusty_v8_module_bazel.py"
- ".github/workflows/rusty-v8-release.yml"
- ".github/workflows/v8-canary.yml"
- "MODULE.bazel"
- "MODULE.bazel.lock"
- "codex-rs/Cargo.toml"
- "patches/BUILD.bazel"
- "patches/llvm_*.patch"
- "patches/rules_cc_*.patch"
- "patches/v8_*.patch"
- "third_party/v8/**"
push:
branches:
- main
paths:
- ".bazelrc"
- ".github/actions/setup-bazel-ci/**"
- ".github/scripts/rusty_v8_bazel.py"
- ".github/scripts/rusty_v8_module_bazel.py"
- ".github/workflows/rusty-v8-release.yml"
- ".github/workflows/v8-canary.yml"
- "MODULE.bazel"
- "MODULE.bazel.lock"
- "codex-rs/Cargo.toml"
- "patches/BUILD.bazel"
- "patches/llvm_*.patch"
- "patches/rules_cc_*.patch"
- "patches/v8_*.patch"
- "third_party/v8/**"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}::${{ github.event.pull_request.number > 0 && format('pr-{0}', github.event.pull_request.number) || github.ref_name }}
cancel-in-progress: ${{ github.ref_name != 'main' }}
jobs:
metadata:
runs-on: ubuntu-latest
outputs:
v8_version: ${{ steps.v8_version.outputs.version }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
- name: Set up Python
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: "3.12"
- name: Resolve exact v8 crate version
id: v8_version
shell: bash
run: |
set -euo pipefail
version="$(python3 .github/scripts/rusty_v8_bazel.py resolved-v8-crate-version)"
echo "version=${version}" >> "$GITHUB_OUTPUT"
build:
name: Build ${{ matrix.variant }} ${{ matrix.target }}
needs: metadata
runs-on: ${{ matrix.runner }}
permissions:
contents: read
actions: read
strategy:
fail-fast: false
matrix:
include:
- runner: ubuntu-24.04
bazel_config: ci-v8
platform: linux_amd64
sandbox: false
target: x86_64-unknown-linux-gnu
variant: release
- runner: ubuntu-24.04
bazel_config: ci-v8
platform: linux_amd64
sandbox: true
target: x86_64-unknown-linux-gnu
variant: ptrcomp-sandbox
- runner: ubuntu-24.04-arm
bazel_config: ci-v8
platform: linux_arm64
sandbox: false
target: aarch64-unknown-linux-gnu
variant: release
- runner: ubuntu-24.04-arm
bazel_config: ci-v8
platform: linux_arm64
sandbox: true
target: aarch64-unknown-linux-gnu
variant: ptrcomp-sandbox
- runner: macos-15-xlarge
bazel_config: ci-macos
platform: macos_amd64
sandbox: false
target: x86_64-apple-darwin
variant: release
- runner: macos-15-xlarge
bazel_config: ci-macos
platform: macos_amd64
sandbox: true
target: x86_64-apple-darwin
variant: ptrcomp-sandbox
- runner: macos-15-xlarge
bazel_config: ci-macos
platform: macos_arm64
sandbox: false
target: aarch64-apple-darwin
variant: release
- runner: macos-15-xlarge
bazel_config: ci-macos
platform: macos_arm64
sandbox: true
target: aarch64-apple-darwin
variant: ptrcomp-sandbox
- runner: ubuntu-24.04
bazel_config: ci-v8
platform: linux_amd64_musl
sandbox: false
target: x86_64-unknown-linux-musl
variant: release
- runner: ubuntu-24.04
bazel_config: ci-v8
platform: linux_amd64_musl
sandbox: true
target: x86_64-unknown-linux-musl
variant: ptrcomp-sandbox
- runner: ubuntu-24.04-arm
bazel_config: ci-v8
platform: linux_arm64_musl
sandbox: false
target: aarch64-unknown-linux-musl
variant: release
- runner: ubuntu-24.04-arm
bazel_config: ci-v8
platform: linux_arm64_musl
sandbox: true
target: aarch64-unknown-linux-musl
variant: ptrcomp-sandbox
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
persist-credentials: false
- name: Set up Bazel
uses: ./.github/actions/setup-bazel-ci
with:
target: ${{ matrix.target }}
- name: Set up Python
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: "3.12"
- name: Set up Rust toolchain for Cargo smoke
uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
with:
toolchain: "1.93.0"
- name: Build Bazel V8 release pair
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
PLATFORM: ${{ matrix.platform }}
SANDBOX: ${{ matrix.sandbox }}
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
target_suffix="${TARGET//-/_}"
pair_kind="release_pair"
if [[ "${SANDBOX}" == "true" ]]; then
pair_kind="sandbox_release_pair"
fi
pair_target="//third_party/v8:rusty_v8_${pair_kind}_${target_suffix}"
bazel_args=(
build
"--platforms=@llvm//platforms:${PLATFORM}"
--config=rusty-v8-upstream-libcxx
"${pair_target}"
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD)
)
if [[ "${SANDBOX}" != "true" ]]; then
bazel_args+=(--config=v8-release-compat)
fi
bazel \
--noexperimental_remote_repo_contents_cache \
"${bazel_args[@]}" \
"--config=${{ matrix.bazel_config }}" \
"--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}"
- name: Stage release pair
env:
BAZEL_CONFIG: ${{ matrix.bazel_config }}
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
PLATFORM: ${{ matrix.platform }}
SANDBOX: ${{ matrix.sandbox }}
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
stage_args=(
--platform "${PLATFORM}"
--target "${TARGET}"
--output-dir "dist/${TARGET}"
--bazel-config "${BAZEL_CONFIG}"
)
if [[ "${SANDBOX}" == "true" ]]; then
stage_args+=(--sandbox)
else
stage_args+=(--bazel-config v8-release-compat)
fi
python3 .github/scripts/rusty_v8_bazel.py stage-release-pair "${stage_args[@]}"
- name: Smoke test staged artifact with Cargo
env:
SANDBOX: ${{ matrix.sandbox }}
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
host_arch="$(uname -m)"
case "${TARGET}:${host_arch}" in
x86_64-apple-darwin:x86_64|aarch64-apple-darwin:arm64|x86_64-unknown-linux-gnu:x86_64|aarch64-unknown-linux-gnu:aarch64)
;;
*)
echo "Skipping non-native Cargo smoke for ${TARGET} on ${host_arch}."
exit 0
;;
esac
archive="$(find "dist/${TARGET}" -maxdepth 1 -type f -name 'librusty_v8_*.a.gz' -print -quit)"
binding="$(find "dist/${TARGET}" -maxdepth 1 -type f -name 'src_binding_*.rs' -print -quit)"
if [[ -z "${archive}" || -z "${binding}" ]]; then
echo "Missing staged archive or binding for ${TARGET}." >&2
exit 1
fi
cargo_args=(test -p codex-v8-poc)
if [[ "${SANDBOX}" == "true" ]]; then
cargo_args+=(--features sandbox)
fi
(
cd codex-rs
CARGO_TARGET_DIR="${RUNNER_TEMP}/rusty-v8-cargo-smoke-${TARGET}-${SANDBOX}" \
RUSTY_V8_ARCHIVE="${GITHUB_WORKSPACE}/${archive}" \
RUSTY_V8_SRC_BINDING_PATH="${GITHUB_WORKSPACE}/${binding}" \
cargo "${cargo_args[@]}"
)
- name: Upload staged artifacts
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: v8-canary-${{ needs.metadata.outputs.v8_version }}-${{ matrix.variant }}-${{ matrix.target }}
path: dist/${{ matrix.target }}/*
build-windows-source:
name: Build ptrcomp-sandbox ${{ matrix.target }} from source
needs: metadata
runs-on: ${{ matrix.runner }}
permissions:
contents: read
strategy:
fail-fast: false
matrix:
include:
- runner: windows-2022
target: x86_64-pc-windows-msvc
- runner: windows-2022
target: aarch64-pc-windows-msvc
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Configure git for upstream checkout
shell: bash
run: git config --global core.symlinks true
- name: Check out upstream rusty_v8
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
repository: denoland/rusty_v8
ref: v${{ needs.metadata.outputs.v8_version }}
path: upstream-rusty-v8
submodules: recursive
- name: Set up Python
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6
with:
python-version: "3.11"
architecture: x64
- name: Set up Codex Rust toolchain for Cargo smoke
uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
with:
toolchain: "1.93.0"
targets: ${{ matrix.target }}
- name: Install rusty_v8 Rust toolchain
env:
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
rustup toolchain install 1.91.0 --profile minimal --no-self-update
rustup target add --toolchain 1.91.0 "${TARGET}"
- name: Write upstream submodule status
shell: bash
working-directory: upstream-rusty-v8
run: git submodule status --recursive > git_submodule_status.txt
- name: Restore upstream source-build cache
uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
with:
path: |
upstream-rusty-v8/target/sccache
upstream-rusty-v8/target/${{ matrix.target }}/release/gn_out
key: rusty-v8-source-${{ matrix.target }}-sandbox-${{ hashFiles('upstream-rusty-v8/Cargo.lock', 'upstream-rusty-v8/build.rs', 'upstream-rusty-v8/git_submodule_status.txt') }}
restore-keys: |
rusty-v8-source-${{ matrix.target }}-sandbox-
- name: Install and start sccache
shell: pwsh
env:
SCCACHE_CACHE_SIZE: 256M
SCCACHE_DIR: ${{ github.workspace }}/upstream-rusty-v8/target/sccache
SCCACHE_IDLE_TIMEOUT: 0
run: |
$version = "v0.8.2"
$platform = "x86_64-pc-windows-msvc"
$basename = "sccache-$version-$platform"
$url = "https://github.com/mozilla/sccache/releases/download/$version/$basename.tar.gz"
cd ~
curl -LO $url
tar -xzvf "$basename.tar.gz"
. $basename/sccache --start-server
echo "$(pwd)/$basename" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
- name: Install Chromium clang for ARM64 MSVC cross build
if: matrix.target == 'aarch64-pc-windows-msvc'
shell: bash
working-directory: upstream-rusty-v8
run: python3 tools/clang/scripts/update.py
- name: Build upstream rusty_v8 sandbox release pair
env:
SCCACHE_IDLE_TIMEOUT: 0
TARGET: ${{ matrix.target }}
V8_FROM_SOURCE: "1"
shell: bash
working-directory: upstream-rusty-v8
run: cargo +1.91.0 build --locked --release --target "${TARGET}" --features v8_enable_sandbox
- name: Stage upstream sandbox release pair
env:
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
python3 .github/scripts/rusty_v8_bazel.py stage-upstream-release-pair \
--source-root upstream-rusty-v8 \
--target "${TARGET}" \
--output-dir "dist/${TARGET}" \
--sandbox
- name: Smoke link staged artifact with Cargo
env:
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
archive="$(find "dist/${TARGET}" -maxdepth 1 -type f -name 'rusty_v8_*.lib.gz' -print -quit)"
binding="$(find "dist/${TARGET}" -maxdepth 1 -type f -name 'src_binding_*.rs' -print -quit)"
if [[ -z "${archive}" || -z "${binding}" ]]; then
echo "Missing staged archive or binding for ${TARGET}." >&2
exit 1
fi
(
cd codex-rs
RUSTY_V8_ARCHIVE="${GITHUB_WORKSPACE}/${archive}" \
RUSTY_V8_SRC_BINDING_PATH="${GITHUB_WORKSPACE}/${binding}" \
cargo +1.93.0 test -p codex-v8-poc --target "${TARGET}" --features sandbox --no-run
)
- name: Upload staged artifacts
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
with:
name: v8-canary-${{ needs.metadata.outputs.v8_version }}-ptrcomp-sandbox-${{ matrix.target }}
path: dist/${{ matrix.target }}/*

3
.gitignore vendored
View File

@@ -10,7 +10,6 @@ node_modules
# build
dist/
bazel-*
user.bazelrc
build/
out/
storybook-static/
@@ -52,7 +51,6 @@ yarn-error.log*
# env
.env*
!.env.example
.venv/
# package
*.tgz
@@ -92,3 +90,4 @@ CHANGELOG.ignore.md
# Python bytecode files
__pycache__/
*.pyc

View File

@@ -1,7 +1,6 @@
{
"recommendations": [
"rust-lang.rust-analyzer",
"charliermarsh.ruff",
"tamasfe.even-better-toml",
"vadimcn.vscode-lldb",

View File

@@ -12,14 +12,6 @@
"editor.defaultFormatter": "tamasfe.even-better-toml",
"editor.formatOnSave": true,
},
"[python]": {
"editor.defaultFormatter": "charliermarsh.ruff",
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.fixAll.ruff": "explicit",
"source.organizeImports.ruff": "explicit",
},
},
// Array order for options in ~/.codex/config.toml such as `notify` and the
// `args` for an MCP server is significant, so we disable reordering.
"evenBetterToml.formatter.reorderArrays": false,

View File

@@ -15,21 +15,11 @@ In the codex-rs folder where the rust code lives:
- When you cannot make that API change and still need a small positional-literal callsite in Rust, follow the `argument_comment_lint` convention:
- Use an exact `/*param_name*/` comment before opaque literal arguments such as `None`, booleans, and numeric literals when passing them by position.
- Do not add these comments for string or char literals unless the comment adds real clarity; those literals are intentionally exempt from the lint.
- The parameter name in the comment must exactly match the callee signature.
- You can run `just argument-comment-lint` to run the lint check locally. This is powered by Bazel, so running it the first time can be slow if Bazel is not warmed up, though incremental invocations should take <15s. Most of the time, it is best to update the PR and let CI take responsibility for checking this (or run it asynchronously in the background after submitting the PR). Note CI checks all three platforms, which the local run does not.
- If you add one of these comments, the parameter name must exactly match the callee signature.
- When possible, make `match` statements exhaustive and avoid wildcard arms.
- Newly added traits should include doc comments that explain their role and how implementations are expected to use them.
- Discourage both `#[async_trait]` and `#[allow(async_fn_in_trait)]` in Rust traits.
- Prefer native RPITIT trait methods with explicit `Send` bounds on the returned future, as in `3c7f013f9735` / `#16630`.
- Preferred trait shape:
`fn foo(&self, ...) -> impl std::future::Future<Output = T> + Send;`
- Implementations may still use `async fn foo(&self, ...) -> T` when they satisfy that contract.
- Do not use `#[allow(async_fn_in_trait)]` as a shortcut around spelling the future contract explicitly.
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
- Do not add general product or user-facing documentation to the `docs/` folder. The official Codex documentation lives elsewhere. The exception is app-server API documentation, which is covered by the app-server guidance below.
- Prefer private modules and explicitly exported public crate API.
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
- If you change `ConfigToml` or nested config types, run `just write-config-schema` to update `codex-rs/core/config.schema.json`.
- When working with MCP tool calls, prefer using `codex-rs/codex-mcp/src/mcp_connection_manager.rs` to handle mutation of tools and tool calls. Aim to minimize the footprint of changes and leverage existing abstractions rather than plumbing code through multiple levels of function calls.
- If you change Rust dependencies (`Cargo.toml` or `Cargo.lock`), run `just bazel-lock-update` from the
repo root to refresh `MODULE.bazel.lock`, and include that lockfile update in the same change.
- After dependency changes, run `just bazel-lock-check` from the repo root so lockfile drift is caught
@@ -50,9 +40,6 @@ In the codex-rs folder where the rust code lives:
`codex-rs/tui/src/bottom_pane/mod.rs`, and similarly central orchestration modules.
- When extracting code from a large module, move the related tests and module/type docs toward
the new implementation so the invariants stay close to the code that owns them.
- Avoid adding new standalone methods to `codex-rs/tui/src/chatwidget.rs` unless the change is
trivial; prefer new modules/files and keep `chatwidget.rs` focused on orchestration.
- When running Rust commands (e.g. `just fix` or `cargo test`) be patient with the command and never try to kill them using the PID. Rust lock can make the execution slow, this is expected.
Run `just fmt` (in `codex-rs` directory) automatically after you have finished making Rust code changes; do not ask for approval to run it. Additionally, run the tests:
@@ -61,25 +48,14 @@ Run `just fmt` (in `codex-rs` directory) automatically after you have finished m
Before finalizing a large change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspacewide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Do not re-run tests after running `fix` or `fmt`.
## The `codex-core` crate
Over time, the `codex-core` crate (defined in `codex-rs/core/`) has become bloated because it is the largest crate, so it is often easier to add something new to `codex-core` rather than refactor out the library code you need so your new code neither takes a dependency on, nor contributes to the size of, `codex-core`.
To that end: **resist adding code to codex-core**!
Particularly when introducing a new concept/feature/API, before adding to `codex-core`, consider whether:
- There is an existing crate other than `codex-core` that is an appropriate place for your new code to live.
- It is time to introduce a new crate to the Cargo workspace for your new functionality. Refactor existing code as necessary to make this happen.
Likewise, when reviewing code, do not hesitate to push back on PRs that would unnecessarily add code to `codex-core`.
## TUI style conventions
See `codex-rs/tui/styles.md`.
## TUI code conventions
- When a change lands in `codex-rs/tui` and `codex-rs/tui_app_server` has a parallel implementation of the same behavior, reflect the change in `codex-rs/tui_app_server` too unless there is a documented reason not to.
- Use concise styling helpers from ratatuis Stylize trait.
- Basic spans: use "text".into()
- Styled spans: use "text".red(), "text".green(), "text".magenta(), "text".dim(), etc.
@@ -130,7 +106,7 @@ When UI or text output changes intentionally, update the snapshots as follows:
If you dont have the tool:
- `cargo install --locked cargo-insta`
- `cargo install cargo-insta`
### Test assertions
@@ -210,7 +186,7 @@ These guidelines apply to app-server protocol work in `codex-rs`, especially:
### Development Workflow
- Update app-server docs/examples when API behavior changes (at minimum `app-server/README.md`).
- Update docs/examples when API behavior changes (at minimum `app-server/README.md`).
- Regenerate schema fixtures when API shapes change:
`just write-app-server-schema`
(and `just write-app-server-schema --experimental` when experimental API fixtures are affected).

View File

@@ -17,53 +17,12 @@ platform(
platform(
name = "local_windows",
constraint_values = [
# We just need to pick one of the ABIs. Do the same one we target.
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
],
parents = ["@platforms//host"],
)
platform(
name = "local_windows_msvc",
constraint_values = [
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
],
parents = ["@platforms//host"],
)
platform(
name = "windows_x86_64_gnullvm",
constraint_values = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
],
)
platform(
name = "windows_x86_64_msvc",
constraint_values = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
],
)
toolchain(
name = "windows_gnullvm_tests_on_msvc_host_toolchain",
exec_compatible_with = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
],
target_compatible_with = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
],
toolchain = "@bazel_tools//tools/test:empty_toolchain",
toolchain_type = "@bazel_tools//tools/test:default_test_toolchain_type",
)
alias(
name = "rbe",
actual = "@rbe_platform",

View File

@@ -1,165 +1,42 @@
module(name = "codex")
bazel_dep(name = "bazel_skylib", version = "1.9.0")
bazel_dep(name = "platforms", version = "1.0.0")
bazel_dep(name = "llvm", version = "0.7.1")
# The upstream LLVM archive contains a few unix-only symlink entries and is
# missing a couple of MinGW compatibility archives that windows-gnullvm needs
# during extraction and linking, so patch it until upstream grows native support.
single_version_override(
module_name = "llvm",
patch_strip = 1,
patches = [
"//patches:llvm_rusty_v8_custom_libcxx.patch",
"//patches:llvm_windows_symlink_extract.patch",
],
)
# Abseil picks a MinGW pthread TLS path that does not match our hermetic
# windows-gnullvm toolchain; force it onto the portable C++11 thread-local path.
single_version_override(
module_name = "abseil-cpp",
patch_strip = 1,
patches = [
"//patches:abseil_windows_gnullvm_thread_identity.patch",
],
)
bazel_dep(name = "llvm", version = "0.6.7")
register_toolchains("@llvm//toolchain:all")
osx = use_extension("@llvm//extensions:osx.bzl", "osx")
osx.from_archive(
sha256 = "1bde70c0b1c2ab89ff454acbebf6741390d7b7eb149ca2a3ca24cc9203a408b7",
strip_prefix = "Payload/Library/Developer/CommandLineTools/SDKs/MacOSX26.4.sdk",
type = "pkg",
urls = [
"https://swcdn.apple.com/content/downloads/32/53/047-96692-A_OAHIHT53YB/ybtshxmrcju8m2qvw3w5elr4rajtg1x3y3/CLTools_macOSNMOS_SDK.pkg",
],
)
osx.frameworks(names = [
"ApplicationServices",
"AppKit",
"ColorSync",
"CoreFoundation",
"CoreGraphics",
"CoreImage",
"CoreMedia",
"CoreMIDI",
"CoreServices",
"CoreText",
"CoreVideo",
"DiskArbitration",
"AudioToolbox",
"AVFoundation",
"AVFAudio",
"AVRouting",
"CFNetwork",
"FontServices",
"AudioUnit",
"CoreAudio",
"CoreAudioTypes",
"Foundation",
"ImageIO",
"IOSurface",
"IOKit",
"Kernel",
"Metal",
"MetalKit",
"OpenGL",
"OSLog",
"QuartzCore",
"ScreenCaptureKit",
"Security",
"SystemConfiguration",
"UniformTypeIdentifiers",
"VideoToolbox",
])
osx.framework(name = "ApplicationServices")
osx.framework(name = "AppKit")
osx.framework(name = "ColorSync")
osx.framework(name = "CoreFoundation")
osx.framework(name = "CoreGraphics")
osx.framework(name = "CoreServices")
osx.framework(name = "CoreText")
osx.framework(name = "AudioToolbox")
osx.framework(name = "CFNetwork")
osx.framework(name = "FontServices")
osx.framework(name = "AudioUnit")
osx.framework(name = "CoreAudio")
osx.framework(name = "CoreAudioTypes")
osx.framework(name = "Foundation")
osx.framework(name = "ImageIO")
osx.framework(name = "IOKit")
osx.framework(name = "Kernel")
osx.framework(name = "OSLog")
osx.framework(name = "Security")
osx.framework(name = "SystemConfiguration")
use_repo(osx, "macos_sdk")
# Needed to disable xcode...
bazel_dep(name = "apple_support", version = "2.1.0")
bazel_dep(name = "rules_cc", version = "0.2.16")
single_version_override(
module_name = "rules_cc",
patch_strip = 1,
patches = [
"//patches:rules_cc_rusty_v8_custom_libcxx.patch",
],
)
bazel_dep(name = "rules_platform", version = "0.1.0")
bazel_dep(name = "rules_rs", version = "0.0.58")
# `rules_rs` still does not model `windows-gnullvm` as a distinct Windows exec
# platform, so patch it until upstream grows that support for both x86_64 and
# aarch64.
single_version_override(
module_name = "rules_rs",
patch_strip = 1,
patches = [
"//patches:rules_rs_windows_gnullvm_exec.patch",
"//patches:rules_rs_windows_exec_linker.patch",
],
version = "0.0.58",
)
bazel_dep(name = "rules_rs", version = "0.0.43")
rules_rust = use_extension("@rules_rs//rs/experimental:rules_rust.bzl", "rules_rust")
# Build-script probe binaries inherit CFLAGS/CXXFLAGS from Bazel's C++
# toolchain. On `windows-gnullvm`, llvm-mingw does not ship
# `libssp_nonshared`, so strip the forwarded stack-protector flags there.
rules_rust.patch(
patches = [
"//patches:rules_rust_windows_gnullvm_build_script.patch",
"//patches:rules_rust_windows_exec_msvc_build_script_env.patch",
"//patches:rules_rust_windows_bootstrap_process_wrapper_linker.patch",
"//patches:rules_rust_windows_build_script_runner_paths.patch",
"//patches:rules_rust_windows_msvc_direct_link_args.patch",
"//patches:rules_rust_windows_process_wrapper_skip_temp_outputs.patch",
"//patches:rules_rust_windows_exec_bin_target.patch",
"//patches:rules_rust_windows_exec_std.patch",
"//patches:rules_rust_windows_exec_rustc_dev_rlib.patch",
],
strip = 1,
)
use_repo(rules_rust, "rules_rust")
nightly_rust = use_extension(
"@rules_rs//rs/experimental:rules_rust_reexported_extensions.bzl",
"rust",
)
nightly_rust.toolchain(
versions = ["nightly/2025-09-18"],
dev_components = True,
edition = "2024",
)
# Keep Windows exec tools on MSVC so Bazel helper binaries link correctly, but
# lint crate targets as `windows-gnullvm` to preserve the repo's actual cfgs.
nightly_rust.repository_set(
name = "rust_windows_x86_64",
dev_components = True,
edition = "2024",
exec_triple = "x86_64-pc-windows-msvc",
exec_compatible_with = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
],
target_compatible_with = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
],
target_triple = "x86_64-pc-windows-msvc",
versions = ["nightly/2025-09-18"],
)
nightly_rust.repository_set(
name = "rust_windows_x86_64",
target_compatible_with = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
],
target_triple = "x86_64-pc-windows-gnullvm",
)
use_repo(nightly_rust, "rust_toolchains")
toolchains = use_extension("@rules_rs//rs/experimental/toolchains:module_extension.bzl", "toolchains")
toolchains.toolchain(
edition = "2024",
@@ -168,7 +45,6 @@ toolchains.toolchain(
use_repo(toolchains, "default_rust_toolchains")
register_toolchains("@default_rust_toolchains//:all")
register_toolchains("@rust_toolchains//:all")
crate = use_extension("@rules_rs//rs:extensions.bzl", "crate")
crate.from_cargo(
@@ -178,33 +54,10 @@ crate.from_cargo(
"aarch64-unknown-linux-gnu",
"aarch64-unknown-linux-musl",
"aarch64-apple-darwin",
# Keep both Windows ABIs in the generated Cargo metadata: the V8
# experiment still consumes release assets that only exist under the
# MSVC names while targeting the GNU toolchain.
"aarch64-pc-windows-msvc",
"aarch64-pc-windows-gnullvm",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"x86_64-apple-darwin",
"x86_64-pc-windows-msvc",
"x86_64-pc-windows-gnullvm",
],
use_experimental_platforms = True,
)
crate.from_cargo(
name = "argument_comment_lint_crates",
cargo_lock = "//tools/argument-comment-lint:Cargo.lock",
cargo_toml = "//tools/argument-comment-lint:Cargo.toml",
platform_triples = [
"aarch64-unknown-linux-gnu",
"aarch64-unknown-linux-musl",
"aarch64-apple-darwin",
"aarch64-pc-windows-msvc",
"aarch64-pc-windows-gnullvm",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"x86_64-apple-darwin",
"x86_64-pc-windows-msvc",
"x86_64-pc-windows-gnullvm",
],
use_experimental_platforms = True,
@@ -214,18 +67,8 @@ bazel_dep(name = "zstd", version = "1.5.7")
crate.annotation(
crate = "zstd-sys",
gen_build_script = "on",
patch_args = ["-p1"],
patches = [
"//patches:zstd-sys_windows_msvc_include_dirs.patch",
],
)
crate.annotation(
crate = "ring",
patch_args = ["-p1"],
patches = [
"//patches:ring_windows_msvc_include_dirs.patch",
],
gen_build_script = "off",
deps = ["@zstd"],
)
crate.annotation(
build_script_env = {
@@ -235,28 +78,12 @@ crate.annotation(
patch_args = ["-p1"],
patches = [
"//patches:aws-lc-sys_memcmp_check.patch",
"//patches:aws-lc-sys_windows_msvc_prebuilt_nasm.patch",
"//patches:aws-lc-sys_windows_msvc_memcmp_probe.patch",
],
)
crate.annotation(
# The build script only validates embedded source/version metadata.
crate = "rustc_apfloat",
gen_build_script = "off",
)
inject_repo(crate, "zstd")
use_repo(crate, "argument_comment_lint_crates")
bazel_dep(name = "bzip2", version = "1.0.8.bcr.3")
single_version_override(
module_name = "bzip2",
patch_strip = 1,
patches = [
"//patches:bzip2_windows_stack_args.patch",
],
)
crate.annotation(
crate = "bzip2-sys",
@@ -270,30 +97,20 @@ bazel_dep(name = "zlib", version = "1.3.1.bcr.8")
crate.annotation(
crate = "libz-sys",
gen_build_script = "on",
gen_build_script = "off",
deps = ["@zlib"],
)
inject_repo(crate, "zlib")
bazel_dep(name = "xz", version = "5.4.5.bcr.8")
single_version_override(
module_name = "xz",
patch_strip = 1,
patches = [
"//patches:xz_windows_stack_args.patch",
],
)
# TODO(zbarsky): Enable annotation after fixing windows arm64 builds.
crate.annotation(
crate = "lzma-sys",
gen_build_script = "off",
deps = ["@xz//:lzma"],
gen_build_script = "on",
)
bazel_dep(name = "openssl", version = "3.5.4.bcr.0")
inject_repo(crate, "xz")
crate.annotation(
build_script_data = [
"@openssl//:gen_dir",
@@ -315,47 +132,6 @@ crate.annotation(
workspace_cargo_toml = "rust/runfiles/Cargo.toml",
)
http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_file = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
new_local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "new_local_repository")
new_local_repository(
name = "v8_targets",
build_file = "//third_party/v8:BUILD.bazel",
path = "third_party/v8",
)
crate.annotation(
build_script_data = [
"@v8_targets//:rusty_v8_archive_for_target",
"@v8_targets//:rusty_v8_binding_for_target",
],
build_script_env = {
"RUSTY_V8_ARCHIVE": "$(execpath @v8_targets//:rusty_v8_archive_for_target)",
"RUSTY_V8_SRC_BINDING_PATH": "$(execpath @v8_targets//:rusty_v8_binding_for_target)",
},
crate = "v8",
# Keep the Rust feature aligned with the source-built Bazel artifacts.
# Windows MSVC still consumes upstream non-sandboxed prebuilts.
crate_features_select = {
"aarch64-apple-darwin": ["v8_enable_sandbox"],
"aarch64-pc-windows-gnullvm": ["v8_enable_sandbox"],
"aarch64-unknown-linux-gnu": ["v8_enable_sandbox"],
"aarch64-unknown-linux-musl": ["v8_enable_sandbox"],
"x86_64-apple-darwin": ["v8_enable_sandbox"],
"x86_64-pc-windows-gnullvm": ["v8_enable_sandbox"],
"x86_64-unknown-linux-gnu": ["v8_enable_sandbox"],
"x86_64-unknown-linux-musl": ["v8_enable_sandbox"],
},
gen_build_script = "on",
patch_args = ["-p1"],
patches = [
"//patches:rusty_v8_prebuilt_out_dir.patch",
],
)
inject_repo(crate, "v8_targets")
llvm = use_extension("@llvm//extensions:llvm.bzl", "llvm")
use_repo(llvm, "llvm-project")
@@ -379,23 +155,6 @@ crate.annotation(
inject_repo(crate, "llvm", "llvm-project", "macos_sdk")
crate.annotation(
# Provide the hermetic SDK path so the build script doesn't try to invoke an unavailable `xcrun --show-sdk-path`.
build_script_data = [
"@macos_sdk//sysroot",
],
build_script_env = {
"WEBRTC_SYS_DARWIN_SDK_PATH": "$(location @macos_sdk//sysroot)",
"WEBRTC_SYS_LINK_OUT_DIR": "1",
},
crate = "webrtc-sys",
gen_build_script = "on",
patch_args = ["-p1"],
patches = [
"//patches:webrtc-sys_hermetic_darwin_sysroot.patch",
],
)
# Fix readme inclusions
crate.annotation(
crate = "windows-link",
@@ -415,79 +174,6 @@ crate.annotation(
inject_repo(crate, "alsa_lib")
bazel_dep(name = "v8", version = "14.7.173.20")
archive_override(
module_name = "v8",
integrity = "sha256-v/x6I4X38a2wckzUIft3Dh0SUdkuOTokwxyF7lzW8Lc=",
patch_strip = 3,
patches = [
"//patches:v8_module_deps.patch",
"//patches:v8_bazel_rules.patch",
"//patches:v8_source_portability.patch",
],
strip_prefix = "v8-14.7.173.20",
urls = ["https://github.com/v8/v8/archive/refs/tags/14.7.173.20.tar.gz"],
)
http_archive(
name = "v8_crate_146_4_0",
build_file = "//third_party/v8:v8_crate.BUILD.bazel",
sha256 = "d97bcac5cdc5a195a4813f1855a6bc658f240452aac36caa12fd6c6f16026ab1",
strip_prefix = "v8-146.4.0",
type = "tar.gz",
urls = ["https://static.crates.io/crates/v8/v8-146.4.0.crate"],
)
http_archive(
name = "v8_crate_147_4_0",
build_file = "//third_party/v8:v8_crate.BUILD.bazel",
sha256 = "2df8fffd507fb18ed000673a83d937f58e60fb07f3306b2274284125b15137cd",
strip_prefix = "v8-147.4.0",
type = "tar.gz",
urls = ["https://static.crates.io/crates/v8/v8-147.4.0.crate"],
)
git_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
git_repository(
name = "rusty_v8_libcxx",
build_file = "//third_party/v8:libcxx.BUILD.bazel",
commit = "7ab65651aed6802d2599dcb7a73b1f82d5179d05",
remote = "https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxx.git",
)
git_repository(
name = "rusty_v8_libcxxabi",
build_file = "//third_party/v8:libcxxabi.BUILD.bazel",
commit = "8f11bb1d4438d0239d0dfc1bd9456a9f31629dda",
remote = "https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi.git",
)
git_repository(
name = "rusty_v8_llvm_libc",
build_file = "//third_party/v8:llvm_libc.BUILD.bazel",
commit = "b3aa5bb702ff9e890179fd1e7d3ba346e17ecf8e",
remote = "https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libc.git",
)
http_file(
name = "rusty_v8_147_4_0_aarch64_pc_windows_msvc_archive",
downloaded_file_path = "rusty_v8_release_aarch64-pc-windows-msvc.lib.gz",
sha256 = "1fa3f94d9e09cff1f6bcce94c478e5cb072c0755f6a0357abadb9dd3b48d8127",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v147.4.0/rusty_v8_release_aarch64-pc-windows-msvc.lib.gz",
],
)
http_file(
name = "rusty_v8_147_4_0_x86_64_pc_windows_msvc_archive",
downloaded_file_path = "rusty_v8_release_x86_64-pc-windows-msvc.lib.gz",
sha256 = "e2827ff98b1a9d4c0343000fc5124ac30dfab3007bc0129c168c9355fc2fcd7c",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v147.4.0/rusty_v8_release_x86_64-pc-windows-msvc.lib.gz",
],
)
use_repo(crate, "crates")
bazel_dep(name = "libcap", version = "2.27.bcr.1")

413
MODULE.bazel.lock generated

File diff suppressed because one or more lines are too long

3
NOTICE
View File

@@ -4,3 +4,6 @@ Copyright 2025 OpenAI
This project includes code derived from [Ratatui](https://github.com/ratatui/ratatui), licensed under the MIT license.
Copyright (c) 2016-2022 Florian Dehau
Copyright (c) 2023-2025 The Ratatui Developers
This project includes Meriyah parser assets from [meriyah](https://github.com/meriyah/meriyah), licensed under the ISC license.
Copyright (c) 2019 and later, KFlash and others.

View File

@@ -46,7 +46,7 @@ Each archive contains a single entry with the platform baked into the name (e.g.
### Using Codex with your ChatGPT plan
Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Business, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt).
Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt).
You can also use Codex with an API key, but this requires [additional setup](https://developers.openai.com/codex/auth#sign-in-with-an-api-key).

View File

@@ -11,7 +11,3 @@ Our security program is managed through Bugcrowd, and we ask that any validated
## Vulnerability Disclosure Program
Our Vulnerability Program Guidelines are defined on our [Bugcrowd program page](https://bugcrowd.com/engagements/openai).
## How to operate CODEX safely
For details on Codex security boundaries, including sandboxing, approvals, and network controls, see [Agent approvals & security](https://developers.openai.com/codex/agent-approvals-security).

View File

@@ -4,14 +4,20 @@
# version_regex matches against the CLI version (env!("CARGO_PKG_VERSION")); omit to apply to all versions.
# target_app specify which app should display the announcement (cli, vsce, ...).
# Test announcement only for local build version until 2027-05-10 excluded
[[announcements]]
content = "Welcome to Codex! Check out the new onboarding flow."
from_date = "2024-10-01"
to_date = "2024-10-15"
target_app = "cli"
# Test announcement only for local build version until 2026-01-10 excluded (past)
[[announcements]]
content = "This is a test announcement"
version_regex = "^0\\.0\\.0$"
to_date = "2027-05-10"
to_date = "2026-05-10"
[[announcements]]
content = "Update Required - This version will no longer be supported starting May 8th. Please upgrade to the latest version (https://github.com/openai/codex/releases/latest) using your preferred package manager."
# Matches 0.x.y versions from 0.0.y through 0.119.y; excludes 0.120.0 and newer.
version_regex = "^0\\.(?:[0-9]|[1-9][0-9]|1[01][0-9])\\."
to_date = "2026-05-08"
content = "**BREAKING NEWS**: `gpt-5.3-codex` is out! Upgrade to `0.98.0` for a faster, smarter, more steerable agent."
from_date = "2026-02-01"
to_date = "2026-02-16"
version_regex = "^0\\.(?:[0-9]|[1-8][0-9]|9[0-7])\\."

1
codex-cli/.dockerignore Normal file
View File

@@ -0,0 +1 @@
node_modules/

59
codex-cli/Dockerfile Normal file
View File

@@ -0,0 +1,59 @@
FROM node:24-slim
ARG TZ
ENV TZ="$TZ"
# Install basic development tools, ca-certificates, and iptables/ipset, then clean up apt cache to reduce image size
RUN apt-get update && apt-get install -y --no-install-recommends \
aggregate \
ca-certificates \
curl \
dnsutils \
fzf \
gh \
git \
gnupg2 \
iproute2 \
ipset \
iptables \
jq \
less \
man-db \
procps \
unzip \
ripgrep \
zsh \
&& rm -rf /var/lib/apt/lists/*
# Ensure default node user has access to /usr/local/share
RUN mkdir -p /usr/local/share/npm-global && \
chown -R node:node /usr/local/share
ARG USERNAME=node
# Set up non-root user
USER node
# Install global packages
ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global
ENV PATH=$PATH:/usr/local/share/npm-global/bin
# Install codex
COPY dist/codex.tgz codex.tgz
RUN npm install -g codex.tgz \
&& npm cache clean --force \
&& rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/node_modules/.cache \
&& rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/tests \
&& rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/docs
# Inside the container we consider the environment already sufficiently locked
# down, therefore instruct Codex CLI to allow running without sandboxing.
ENV CODEX_UNSAFE_ALLOW_NO_SANDBOX=1
# Copy and set up firewall script as root.
USER root
COPY scripts/init_firewall.sh /usr/local/bin/
RUN chmod 500 /usr/local/bin/init_firewall.sh
# Drop back to non-root.
USER node

Some files were not shown because too many files have changed in this diff Show More