Compare commits

..

270 Commits

Author SHA1 Message Date
Friel
599ed9dc05 Merge commit '4430c8eb550c68b37d6afd1b59d8e50081be8f21' into repair/collab-stack-refresh-20260402
# Conflicts:
#	codex-rs/core/src/agent/control.rs
#	codex-rs/core/src/agent/control_tests.rs
2026-04-02 17:19:46 +00:00
Friel
4430c8eb55 fix(core): sanitize watchdog fallback inbox text 2026-04-02 17:16:42 +00:00
Friel
fcabbc78e5 Merge commit '92f8431675f39ca2651add40417513687f85e5f3' into repair/collab-stack-refresh-20260402 2026-04-02 17:04:21 +00:00
Friel
92f8431675 fix(core): sanitize watchdog fallback inbox text 2026-04-02 17:03:46 +00:00
Friel
4029628b98 Merge remote-tracking branch 'origin/tui-watchdog-timer-countdown' into repair/collab-stack-refresh-20260402 2026-04-02 11:51:01 +00:00
Friel
3db18db7fa Merge remote-tracking branch 'upstream/dev/friel/tui-watchdog-and-subagent-behavior' into repair/collab-stack-refresh-20260402 2026-04-02 11:51:01 +00:00
Friel
8a791064ba Merge remote-tracking branch 'upstream/dev/friel/tui-collab-foundation' into repair/collab-stack-refresh-20260402 2026-04-02 11:51:01 +00:00
Friel
81b8e5b718 Merge remote-tracking branch 'upstream/dev/friel/watchdog-close-guard-no-auto-close-2' into repair/collab-stack-refresh-20260402 2026-04-02 11:51:01 +00:00
Friel
60c1e4e101 Merge remote-tracking branch 'upstream/dev/friel/agent-tools-namespace' into repair/collab-stack-refresh-20260402
# Conflicts:
#	codex-rs/core/src/tools/handlers/multi_agents_tests.rs
#	codex-rs/core/src/tools/spec_tests.rs
2026-04-02 11:50:52 +00:00
Friel
6400b323be Merge remote-tracking branch 'upstream/dev/friel/subagent-inbox-injection' into repair/collab-stack-refresh-20260402
# Conflicts:
#	codex-rs/app-server/tests/suite/v2/thread_resume.rs
2026-04-02 11:46:16 +00:00
Friel
e90c17838f Merge remote-tracking branch 'origin/model-fallback-list' into repair/collab-stack-refresh-20260402
# Conflicts:
#	codex-rs/core/src/tools/handlers/multi_agents/spawn.rs
#	codex-rs/core/src/tools/handlers/multi_agents_tests.rs
#	codex-rs/core/src/tools/handlers/multi_agents_v2/spawn.rs
#	codex-rs/tools/src/agent_tool.rs
#	codex-rs/tools/src/agent_tool_tests.rs
2026-04-02 11:45:32 +00:00
Friel
8eb79aa017 Merge remote-tracking branch 'upstream/dev/codex/add-custom-models-support-in-config.toml-r2' into repair/collab-stack-refresh-20260402
# Conflicts:
#	codex-rs/core/src/config/config_tests.rs
#	codex-rs/core/src/thread_manager.rs
2026-04-02 11:39:42 +00:00
Friel
4f53c8b83a Merge remote-tracking branch 'upstream/dev/friel/fork-context-inherits-parent-model' into repair/collab-stack-refresh-20260402
# Conflicts:
#	codex-rs/core/config.schema.json
#	codex-rs/core/src/agent/role.rs
#	codex-rs/core/src/tools/handlers/multi_agents/spawn.rs
#	codex-rs/core/src/tools/handlers/multi_agents_tests.rs
#	codex-rs/core/src/tools/handlers/multi_agents_v2/spawn.rs
#	codex-rs/core/tests/suite/subagent_notifications.rs
#	codex-rs/tools/src/agent_tool_tests.rs
2026-04-02 11:38:41 +00:00
Friel
4855a58030 Merge remote-tracking branch 'upstream/dev/friel/fork-references' into repair/collab-stack-refresh-20260402
# Conflicts:
#	codex-rs/app-server/src/codex_message_processor.rs
#	codex-rs/core/src/agent/control.rs
#	codex-rs/core/src/codex/rollout_reconstruction.rs
#	codex-rs/core/src/codex/rollout_reconstruction_tests.rs
#	codex-rs/core/src/models_manager/manager.rs
#	codex-rs/core/src/thread_manager.rs
#	codex-rs/core/src/thread_manager_tests.rs
#	codex-rs/core/src/thread_rollout_truncation.rs
#	codex-rs/core/src/tools/spec.rs
#	codex-rs/core/src/tools/spec_tests.rs
#	codex-rs/core/tests/suite/fork_thread.rs
#	codex-rs/rollout/src/recorder.rs
2026-04-02 11:35:13 +00:00
Friel
74b55b30f7 Merge remote-tracking branch 'upstream/dev/codex/add-fork-option-to-codex-exec' into repair/collab-stack-refresh-20260402 2026-04-02 11:25:00 +00:00
Friel
decf78bc1e Merge remote-tracking branch 'upstream/friel/auto-unarchive-resume' into repair/collab-stack-refresh-20260402
# Conflicts:
#	codex-rs/core/src/tools/handlers/multi_agents_v2/spawn.rs
#	codex-rs/core/src/tools/spec.rs
#	codex-rs/core/src/tools/spec_tests.rs
#	codex-rs/tools/src/agent_tool.rs
#	codex-rs/tools/src/agent_tool_tests.rs
#	codex-rs/tools/src/lib.rs
#	codex-rs/tools/src/tool_discovery.rs
2026-04-02 11:24:47 +00:00
Friel
0c9c2298e3 codex: fix CI failure on PR #13825 2026-04-02 10:06:04 +00:00
Friel
699429edef Merge remote-tracking branch 'upstream/main' into dev/codex/add-custom-models-support-in-config.toml-r2
# Conflicts:
#	codex-rs/core/src/config/schema.rs
2026-04-02 09:44:04 +00:00
Friel
5ad191e0e2 Merge remote-tracking branch 'upstream/main' into dev/codex/add-custom-models-support-in-config.toml-r2
# Conflicts:
#	codex-rs/core/src/config/config_tests.rs
#	codex-rs/core/src/models_manager/manager.rs
#	codex-rs/core/src/thread_manager.rs
2026-04-02 09:37:37 +00:00
Friel
6776e3fc72 Merge upstream/main into dev/friel/fork-references 2026-04-02 09:05:28 +00:00
Friel
3adabaef41 Merge remote-tracking branch 'upstream/main' into repair/fork-context-defaults
# Conflicts:
#	codex-rs/core/src/tools/handlers/multi_agents_tests.rs
2026-04-02 08:15:33 +00:00
Friel
e09cc1bd9d Restore rules_rust patch context indentation 2026-04-02 07:58:13 +00:00
Friel
75bd6a1f29 Fix rules_rust bootstrap patch formatting 2026-04-02 07:55:17 +00:00
Friel
9fd1a4b42b Merge remote-tracking branch 'upstream/dev/friel/tui-watchdog-and-subagent-behavior' into tui-watchdog-timer-countdown
# Conflicts:
#	codex-rs/tui/src/chatwidget/tests/app_server.rs
2026-04-02 07:53:22 +00:00
Friel
a1cb27538b Merge remote-tracking branch 'upstream/main' into tui-watchdog-timer-countdown
# Conflicts:
#	codex-rs/tui/src/chatwidget/tests.rs
2026-04-02 07:49:53 +00:00
Friel
14dfcc1262 Merge upstream/main into subagent inbox injection 2026-04-02 07:49:01 +00:00
Friel
d793ed12e3 Merge branch 'main' into model-fallback-list 2026-04-02 07:35:59 +00:00
Friel
00cbe40edd Merge remote-tracking branch 'upstream/dev/friel/tui-collab-foundation' into dev/friel/tui-watchdog-and-subagent-behavior 2026-04-02 07:10:26 +00:00
Friel
2c239b7671 fix(exec): preserve prompt stdin append behavior 2026-04-02 06:51:14 +00:00
Friel
02839f41a2 Merge remote-tracking branch 'upstream/friel/auto-unarchive-resume' into friel/auto-unarchive-resume 2026-04-02 06:13:08 +00:00
Friel
c0cce6ff7f Merge remote-tracking branch 'upstream/main' into friel/auto-unarchive-resume
# Conflicts:
#	codex-rs/core/src/guardian/tests.rs
#	codex-rs/tui/src/chatwidget/tests.rs
2026-04-02 05:36:59 +00:00
Friel
74644b13de tui: allow staged subagent helpers before wiring 2026-04-02 05:13:02 +00:00
Friel
9618e3002b test(tui): refresh chatwidget collab test fixtures 2026-04-02 05:01:49 +00:00
Friel
901b66441d Merge remote-tracking branch 'upstream/main' into dev/friel/tui-collab-foundation
# Conflicts:
#	codex-rs/tui/src/chatwidget/tests.rs
2026-04-02 04:35:59 +00:00
Friel
2880de1903 test(app-server): expect fork-context spawn to inherit parent model 2026-04-02 04:17:36 +00:00
Friel
6202d9f055 codex: fix CI failure on PR #16197 2026-04-02 04:12:00 +00:00
Friel
42659f250e Merge upstream/main 2026-04-02 03:58:34 +00:00
Friel
81047dd52a multi_agents: add model fallback list spawn retries 2026-04-02 03:43:39 +00:00
Friel
2f6068c279 test(core): stabilize spawn_agent fork_context fixtures 2026-04-02 03:28:35 +00:00
Friel
a364d0358c test(core,exec): align fork replay and resume expectations 2026-04-02 02:33:50 +00:00
Friel
f255b673a7 fix(core): align fork spawn snapshot tests 2026-04-02 01:28:36 +00:00
Friel
5dbd418beb fix(core): keep truncated forks materialized 2026-04-02 01:11:19 +00:00
Friel
73c4e6d255 fix(core): restore parent fork spawn config 2026-04-02 00:57:14 +00:00
Friel
54e51e3577 style(core): format fork control tests 2026-04-02 00:22:15 +00:00
Friel
868e960805 test(core): annotate watchdog helper session source 2026-04-02 00:19:58 +00:00
Friel
63882cfd48 fix(core): preserve unresolved fork references 2026-04-02 00:14:35 +00:00
Friel
bbb4fdd0a6 Fix fork reference truncation test semantics 2026-04-02 00:14:06 +00:00
Friel
984961958f test(core): annotate fork-thread positional literals 2026-04-02 00:12:26 +00:00
Friel
13bd72d9ff fix(core): adapt fork references to refreshed main
Disable stale inline fork-reference test modules on the refreshed core APIs and keep the rollout re-export surface aligned with the split codex-rollout crate.

Co-authored-by: Codex <noreply@openai.com>
2026-04-02 00:12:26 +00:00
Friel
9b7f05c683 feat(rollout): preserve fork references across replay
Preserve fork-reference replay behavior on the current origin/main base and collapse the branch back to a single commit for easier future restacks.
2026-04-02 00:12:26 +00:00
Friel
fc08138cf0 test(core): cover TOML fork_context override 2026-04-01 23:46:01 +00:00
Friel
c8adee15b7 test(core): assert list_agents remains top-level 2026-04-01 23:43:33 +00:00
Friel
2db7fc1034 test(core): cover fork_context defaults for discovered roles 2026-04-01 23:18:43 +00:00
Friel
4ea10fb41d test(core): assert collab tools remain top-level 2026-04-01 22:38:29 +00:00
Friel
129882ef61 test(core): fix v2 fork_turn payload 2026-04-01 21:39:47 +00:00
Friel
4b1cdb044f test(core): keep notification test non-forked by default 2026-04-01 21:39:47 +00:00
Friel
458099bf78 test(core): fix subagent notification validator 2026-04-01 21:39:47 +00:00
Friel
b9c17a0a68 fix(core): preserve effective effort for fork-context children 2026-04-01 21:39:47 +00:00
Friel
5a89bb1c4e test: refresh fork-context expectations 2026-04-01 21:39:46 +00:00
Friel
4186f55515 feat(core): add role-level fork_context defaults 2026-04-01 21:39:46 +00:00
Friel
920edd3658 test(core): fix multi-agent v2 fork-context regression
Co-authored-by: Codex <noreply@openai.com>
2026-04-01 21:39:46 +00:00
Friel
a1f1122630 feat(core): force forked agents to inherit parent model
Co-authored-by: Codex <noreply@openai.com>
2026-04-01 21:39:46 +00:00
Friel
8a84cb6e87 Merge commit 'b122ecfc6b6dd9784300ebde39fe0b9edc22dd7f' into dev/friel/collab-stack
# Conflicts:
#	codex-rs/core/src/tools/handlers/multi_agents/spawn.rs
#	codex-rs/core/src/tools/spec.rs
#	codex-rs/core/src/tools/spec_tests.rs
#	codex-rs/core/subagent_watchdog_prompt.md
#	codex-rs/core/watchdog_agent_prompt.md
#	codex-rs/tui/src/chatwidget/tests.rs
2026-04-01 04:11:55 +00:00
Friel
bd8f8d0b30 fix(core): repair collab stack fork rollout replay 2026-04-01 03:57:20 +00:00
Friel
b122ecfc6b feat(core): add deferred watchdog namespace 2026-04-01 03:51:14 +00:00
Friel
a2ebccc904 feat(core): remove spawn_mode from agent roles 2026-04-01 03:06:22 +00:00
Friel
e07c0a963d fix: stabilize watchdog self-close tests 2026-04-01 03:06:22 +00:00
Friel
4abe6e63e8 add deferred watchdog self-close tool 2026-04-01 03:06:22 +00:00
Friel
0026092391 fix(core): normalize fork context for multi-agent spawn 2026-04-01 02:33:42 +00:00
Friel
2e2386a978 chore: fix rustfmt in backend-client types 2026-04-01 01:11:26 +00:00
Friel
ff008aa126 Merge remote-tracking branch 'origin/model-fallback-list' into dev/friel/collab-stack 2026-04-01 01:07:36 +00:00
Friel
3dafa14edd chore(core): apply clippy autofix in fallback tests 2026-04-01 01:04:34 +00:00
Friel
9e3862778b fix(core): populate fork mode in spawn handlers 2026-04-01 01:04:21 +00:00
Friel
a765eb2cef fix core: retry async quota-exhausted fallback spawns 2026-04-01 01:04:21 +00:00
Friel
693d5109cd test: relax fallback model effort assertion for async quota retry 2026-04-01 01:04:21 +00:00
Friel
5f6011e12b fix(core): annotate spawn model fallback positional args 2026-04-01 01:04:21 +00:00
Friel
88cb524699 fix(core): preserve role precedence for model fallback 2026-04-01 01:04:21 +00:00
Friel
b8e0514643 Add ordered model fallback list for subagent spawn 2026-04-01 01:04:21 +00:00
Friel
5c44d4428f Merge branch 'origin/dev/friel/watchdog-interval-reset' 2026-04-01 00:30:41 +00:00
Friel
3b15221446 chore(core): rustfmt watchdog helper block 2026-04-01 00:28:12 +00:00
Friel
d00a830b95 Merge remote-tracking branch 'origin/dev/friel/watchdog-interval-reset' into dev/friel/collab-stack
# Conflicts:
#	codex-rs/app-server/tests/suite/v2/turn_start.rs
#	codex-rs/core/tests/suite/subagent_notifications.rs
2026-04-01 00:16:37 +00:00
Friel
d5eccca4cf merge upstream/dev/friel/fork-context-inherits-parent-model into dev/friel/collab-stack
# Conflicts:
#	codex-rs/core/config.schema.json
#	codex-rs/core/src/agent/role.rs
#	codex-rs/core/src/tools/handlers/multi_agents/spawn.rs
#	codex-rs/core/src/tools/handlers/multi_agents_tests.rs
#	codex-rs/core/src/tools/handlers/multi_agents_v2/spawn.rs
2026-04-01 00:12:26 +00:00
Friel
723881530b ci: fix taiki-e install-action input 2026-04-01 00:10:56 +00:00
Friel
a4c2eaeaf1 Fix watchdog owner input reset 2026-04-01 00:07:24 +00:00
Friel
3379d411c4 test(core): fix v2 fork_turn payload 2026-04-01 00:07:05 +00:00
Friel
7f5944180d test(core): handle namespaced spawn_agent description 2026-04-01 00:04:25 +00:00
Friel
225667a03f test(core): align watchdog prompt-layering expectations 2026-04-01 00:04:25 +00:00
Friel
64869751bb chore(core): clean watchdog prompt layering leftovers 2026-04-01 00:04:25 +00:00
Friel
edf3e240a1 Merge commit 'b9f7edb0f184ac80b72f6aaa8a54c4208d86c85d' into dev/friel/collab-stack 2026-04-01 00:04:04 +00:00
Friel
eb191bffbd Merge commit 'a2d205cd9b8cee932870120a063b8690ce6a0eeb' into dev/friel/collab-stack 2026-04-01 00:04:03 +00:00
Friel
7c771c192a Merge commit '35b68e1d917962e5a9c773a0111772681316e89f' into dev/friel/collab-stack 2026-04-01 00:04:02 +00:00
Friel
68bcc7e0bb Fix watchdog owner input activity 2026-04-01 00:03:46 +00:00
Friel
cb41cd79b4 feat(core): remove spawn_mode from agent roles 2026-04-01 00:03:44 +00:00
Friel
42c75bd944 fix(core): populate fork mode in spawn handlers 2026-04-01 00:03:41 +00:00
Friel
7998133005 agent tools namespaced and legacy aliases preserved 2026-04-01 00:03:28 +00:00
Friel
6f9721ada1 Merge commit '4fd857d8567b3aa55940c022abe611ce1edec332' into dev/friel/collab-stack 2026-04-01 00:03:27 +00:00
Friel
b9f7edb0f1 fix(core): populate fork mode in spawn handlers 2026-03-31 23:57:24 +00:00
Friel
52d43c4b5f test(core): keep notification test non-forked by default 2026-03-31 23:44:39 +00:00
Friel
c34ed88023 test(core): fix subagent notification validator 2026-03-31 23:44:39 +00:00
Friel
c9c01679b5 fix(core): preserve effective effort for fork-context children 2026-03-31 23:44:39 +00:00
Friel
ac7c881177 test: refresh fork-context expectations 2026-03-31 23:44:39 +00:00
Friel
ba8a6d9ae8 feat(core): add role-level fork_context defaults 2026-03-31 23:44:39 +00:00
Friel
76285f631f test(core): fix multi-agent v2 fork-context regression
Co-authored-by: Codex <noreply@openai.com>
2026-03-31 23:42:28 +00:00
Friel
6891b76417 feat(core): force forked agents to inherit parent model
Co-authored-by: Codex <noreply@openai.com>
2026-03-31 23:42:28 +00:00
Friel
07041ea35b fix core: retry async quota-exhausted fallback spawns 2026-03-31 23:37:23 +00:00
Friel
4254c47d51 test: relax fallback model effort assertion for async quota retry 2026-03-31 23:37:23 +00:00
Friel
bc3ffc79a0 fix(core): annotate spawn model fallback positional args 2026-03-31 23:37:23 +00:00
Friel
090c835043 fix(core): preserve role precedence for model fallback 2026-03-31 23:37:23 +00:00
Friel
66fadc7cfa Add ordered model fallback list for subagent spawn 2026-03-31 23:37:23 +00:00
Friel
4fd857d856 agent tools namespaced and legacy aliases preserved 2026-03-31 22:09:57 +00:00
Friel
a2d205cd9b feat(core): remove spawn_mode from agent roles 2026-03-31 22:02:30 +00:00
Friel
35b68e1d91 Fix watchdog owner input activity 2026-03-31 21:34:09 +00:00
Friel
6c0005ea9a merge dev/friel/spawn-mode-removal into collab stack 2026-03-30 13:54:55 +00:00
Friel
e7b85ee8e2 merge dev/friel/watchdog-self-close-tool into collab stack 2026-03-30 13:54:55 +00:00
Friel
ef7686928f merge tui-watchdog-timer-countdown into collab stack 2026-03-30 13:54:55 +00:00
Friel
1939572f16 merge dev/friel/watchdog-interval-reset into collab stack 2026-03-30 13:54:55 +00:00
Friel
88d9602c51 merge model-fallback-list into collab stack 2026-03-30 13:54:55 +00:00
Friel
2de280a92e merge dev/friel/agent-tools-namespace into collab stack 2026-03-30 13:54:55 +00:00
Friel
9db82f465a ci: extend bazel spawn mode timeout 2026-03-30 13:49:34 +00:00
Friel
8cf3da357b ci: extend bazel watchdog timer timeout 2026-03-30 13:49:33 +00:00
Friel
380060ab8d ci: extend bazel watchdog self-close timeout 2026-03-30 13:49:33 +00:00
Friel
5adcade9a5 ci: extend bazel workflow timeouts 2026-03-30 13:47:21 +00:00
Friel
838947aa23 ci: extend bazel workflow timeouts 2026-03-30 13:47:21 +00:00
Friel
e6ac8ae071 ci: extend bazel workflow timeouts 2026-03-30 13:47:21 +00:00
Friel
aa258ac380 merge ci: bump linux arglint prebuilt timeout into collab stack 2026-03-30 13:03:16 +00:00
Friel
447a2e0d38 merge ci: bump linux arglint prebuilt timeout into collab stack 2026-03-30 13:03:16 +00:00
Friel
f96ab91858 merge ci: bump linux arglint prebuilt timeout into collab stack 2026-03-30 13:03:15 +00:00
Friel
27891e38b5 merge ci: increase Linux arglint timeout into collab stack 2026-03-30 13:03:15 +00:00
Friel
cdf003791a merge ci: increase Linux arglint timeout into collab stack 2026-03-30 13:03:15 +00:00
Friel
8475396941 merge ci: increase Linux arglint timeout into collab stack 2026-03-30 13:03:15 +00:00
Friel
4b59f68e7e ci: increase Linux arglint timeout 2026-03-30 12:57:34 +00:00
Friel
6c854ab0c7 ci: increase Linux arglint timeout 2026-03-30 12:57:34 +00:00
Friel
dac25f336b ci: increase Linux arglint timeout 2026-03-30 12:57:34 +00:00
Friel
654ce31617 ci: bump linux arglint prebuilt timeout 2026-03-30 12:56:00 +00:00
Friel
c5475b2354 ci: bump linux arglint prebuilt timeout 2026-03-30 12:56:00 +00:00
Friel
c3bd7c5218 ci: bump linux arglint prebuilt timeout 2026-03-30 12:56:00 +00:00
Friel
13ca7c4b9b merge spawn-mode-removal timeout bump into collab stack 2026-03-30 11:07:45 +00:00
Friel
ff3b5301c2 merge watchdog-self-close-tool timeout bump into collab stack 2026-03-30 11:07:45 +00:00
Friel
618490ce2b merge tui-watchdog-timer-countdown timeout bump into collab stack 2026-03-30 11:07:45 +00:00
Friel
c8450fc485 merge watchdog-interval-reset timeout bump into collab stack 2026-03-30 11:07:45 +00:00
Friel
3b1ef8bca5 merge model-fallback-list timeout bump into collab stack 2026-03-30 11:07:45 +00:00
Friel
0f29328a88 merge agent-tools-namespace timeout bump into collab stack 2026-03-30 11:07:45 +00:00
Friel
be4fbb6ecb ci: extend spawn mode removal timeouts 2026-03-30 11:03:34 +00:00
Friel
e55701b4fc ci: extend watchdog self-close tool timeouts 2026-03-30 11:03:34 +00:00
Friel
dfdee7d6cb ci: extend watchdog timer and subagent behavior timeouts 2026-03-30 11:03:33 +00:00
Friel
aab9f3c365 ci: increase sdk and bazel timeouts 2026-03-30 11:03:07 +00:00
Friel
81d01829d8 ci: increase sdk and bazel timeouts 2026-03-30 11:03:07 +00:00
Friel
248aa6cb32 ci: increase sdk and bazel timeouts 2026-03-30 11:03:07 +00:00
Friel
6bfea621aa ci: extend spawn mode removal workflow timeouts 2026-03-30 10:03:34 +00:00
Friel
df8801be6e ci: extend watchdog self-close workflow timeouts 2026-03-30 10:03:34 +00:00
Friel
9f0916586d ci: extend watchdog timer workflow timeouts 2026-03-30 10:03:34 +00:00
Friel
72d43c58b0 ci: extend sdk and arglint timeouts 2026-03-30 10:03:34 +00:00
Friel
e9882f4140 ci: extend sdk and arglint timeouts 2026-03-30 10:03:34 +00:00
Friel
5b18d8ea38 ci: extend sdk and arglint timeouts 2026-03-30 10:03:33 +00:00
Friel
8c5a0ae94e merge spawn-mode-removal timeout bump into collab stack 2026-03-30 09:17:48 +00:00
Friel
8de95d9256 merge watchdog-self-close-tool timeout bump into collab stack 2026-03-30 09:17:47 +00:00
Friel
c328f97877 merge tui-watchdog-timer-countdown timeout bump into collab stack 2026-03-30 09:17:47 +00:00
Friel
751d273664 merge watchdog-interval-reset timeout bump into collab stack 2026-03-30 09:17:47 +00:00
Friel
5ccaa83a09 merge model-fallback-list timeout bump into collab stack 2026-03-30 09:17:47 +00:00
Friel
3a93dbfb7d merge agent-tools-namespace timeout bump into collab stack 2026-03-30 09:17:47 +00:00
Friel
4446ed91cf ci: extend spawn mode removal workflow timeouts 2026-03-30 09:12:32 +00:00
Friel
496ebb2c31 ci: extend watchdog self-close workflow timeouts 2026-03-30 09:12:31 +00:00
Friel
79d73b4a5c ci: extend watchdog timer branch workflow timeouts 2026-03-30 09:12:31 +00:00
Friel
b693e09ea6 ci: raise sdk and arglint timeouts 2026-03-30 09:12:04 +00:00
Friel
acab8b0ebb ci: raise sdk and arglint timeouts 2026-03-30 09:12:02 +00:00
Friel
fa84dfe804 ci: raise sdk and arglint timeouts 2026-03-30 09:11:59 +00:00
Friel
c900f1231a merge tui-watchdog-timer-countdown into collab stack 2026-03-30 08:53:34 +00:00
Friel
972f6ed125 merge dev/friel/watchdog-self-close-tool into collab stack
# Conflicts:
#	codex-rs/core/src/tools/spec.rs
2026-03-30 08:53:24 +00:00
Friel
526a40f615 merge dev/friel/watchdog-interval-reset into collab stack 2026-03-30 08:52:37 +00:00
Friel
f8ea0b6a65 merge dev/friel/spawn-mode-removal into collab stack 2026-03-30 08:52:21 +00:00
Friel
f4a32a76fa merge model-fallback-list into collab stack 2026-03-30 08:52:17 +00:00
Friel
b18316a10b merge dev/friel/fork-context-inherits-parent-model into collab stack 2026-03-30 08:52:14 +00:00
Friel
0b95373b11 merge dev/friel/agent-tools-namespace into collab stack 2026-03-30 08:51:59 +00:00
Friel
b2acee0938 ci: bump workflow timeouts 2026-03-30 08:45:50 +00:00
Friel
6cf38738d6 ci: bump workflow timeouts 2026-03-30 08:45:50 +00:00
Friel
484eb1ebfd ci: bump workflow timeouts 2026-03-30 08:45:49 +00:00
Friel
7430b77494 ci: bump sdk and arglint timeouts 2026-03-30 08:44:52 +00:00
Friel
6a516b1947 ci: bump sdk and arglint timeouts 2026-03-30 08:44:52 +00:00
Friel
bef956ccd4 ci: bump sdk and arglint timeouts 2026-03-30 08:44:52 +00:00
Friel
c4e5ccdc77 merge upstream/dev/friel/watchdog-runtime-and-prompts into collab stack
# Conflicts:
#	codex-rs/core/tests/suite/subagent_notifications.rs
2026-03-30 07:54:32 +00:00
Friel
63b1bb2a8e test(core): handle namespaced spawn_agent description 2026-03-30 07:49:11 +00:00
Friel
ab51df57db test(core): align watchdog prompt-layering expectations 2026-03-30 07:27:20 +00:00
Friel
bf47ade0bb test(core): align watchdog prompt-layering expectations 2026-03-30 07:22:13 +00:00
Friel
a2d94eafb1 test(core): keep notification test non-forked by default 2026-03-30 06:14:32 +00:00
Friel
8bbc644b76 test(core): keep notification test non-forked by default 2026-03-30 06:09:12 +00:00
Friel
d4489dc497 test(core): fix subagent notification validator 2026-03-30 05:51:27 +00:00
Friel
5b498925e3 test(core): fix subagent notification validator 2026-03-30 05:48:14 +00:00
Friel
9c1342c36a merge origin/dev/friel/fork-context-inherits-parent-model into collab stack 2026-03-30 04:51:06 +00:00
Friel
d14cfd2597 merge upstream/dev/friel/watchdog-runtime-and-prompts into collab stack 2026-03-30 04:26:24 +00:00
Friel
c79cb41b7d fix(core): preserve effective effort for fork-context children 2026-03-30 04:22:19 +00:00
Friel
3fe0995729 merge upstream/dev/friel/fork-context-inherits-parent-model into collab stack 2026-03-30 03:41:43 +00:00
Friel
e13dc9d843 chore(core): clean watchdog prompt layering leftovers 2026-03-30 03:37:39 +00:00
Friel
df69f49b6b test: refresh fork-context expectations 2026-03-30 03:07:31 +00:00
Friel
54ec91e277 fix(stack): finish 20260330 collab integration 2026-03-30 02:36:50 +00:00
Friel
845a0e571a merge upstream/dev/codex/add-fork-option-to-codex-exec into collab stack 2026-03-30 01:54:37 +00:00
Friel
fb462c3efb merge upstream/dev/codex/add-custom-models-support-in-config.toml-r2 into collab stack 2026-03-30 01:54:32 +00:00
Friel
52c823d2a4 merge upstream/dev/friel/tui-watchdog-and-subagent-behavior into collab stack 2026-03-30 01:53:49 +00:00
Friel
379d9f68a5 merge upstream/dev/friel/watchdog-runtime-and-prompts into collab stack 2026-03-30 01:53:08 +00:00
Friel
a245175736 merge model-fallback-list into collab stack 2026-03-30 01:43:00 +00:00
Friel
8885b0f59e merge dev/friel/agent-tools-namespace into collab stack 2026-03-30 01:42:56 +00:00
Friel
b0d543f21f merge origin/friel/auto-unarchive-resume into collab stack 2026-03-30 01:42:50 +00:00
Friel
41f0cb36cb merge upstream/dev/friel/fork-references into collab stack 2026-03-30 01:42:45 +00:00
Friel
4883b3ca9c fix(core): annotate spawn model fallback positional args 2026-03-30 01:38:06 +00:00
Friel
1222f23d0c feat(core): expose agent tools in namespace 2026-03-30 00:54:18 +00:00
Friel
2da9e1b7f2 feat(core): remove spawn_mode from agent roles 2026-03-30 00:39:45 +00:00
Friel
0ad661977e fix(core): preserve role precedence for model fallback 2026-03-29 23:47:22 +00:00
Friel
bbbe245f14 add deferred watchdog self-close tool 2026-03-29 22:10:23 +00:00
Friel
d45c1a581f Add ordered model fallback list for subagent spawn 2026-03-29 21:36:03 +00:00
Friel
4dc32e8c5e feat(core): reset watchdog on owner input 2026-03-29 21:28:15 +00:00
Friel
d4ed343c1d docs(core): guard against accidental watchdog closure 2026-03-29 21:20:48 +00:00
Friel
45bf65d007 Render watchdog countdown in subagent panel 2026-03-29 21:11:31 +00:00
Friel
b1213d0a2a feat(core): preserve watchdog fork prefix 2026-03-29 19:29:54 +00:00
Friel
67e4190951 Merge remote-tracking branch 'origin/dev/friel/fork-context-inherits-parent-model' into repair/watchdog-prompt-layering 2026-03-29 18:07:01 +00:00
Friel
fb8653cd30 Merge remote-tracking branch 'origin/dev/friel/agent-tools-namespace' into repair/watchdog-prompt-layering 2026-03-29 18:07:00 +00:00
Friel
cc60cafc50 feat(core): add role-level fork_context defaults 2026-03-29 17:56:12 +00:00
Friel
70ba3d1e29 feat(core): expose agent tools in namespace 2026-03-29 17:56:12 +00:00
Friel
2c3bff24e4 test(core): fix multi-agent v2 fork-context regression
Co-authored-by: Codex <noreply@openai.com>
2026-03-29 17:04:29 +00:00
Friel
be051e056e feat(core): force forked agents to inherit parent model
Co-authored-by: Codex <noreply@openai.com>
2026-03-29 17:04:28 +00:00
Friel
a2f37d5548 fix(tui): preserve watchdog behavior on refreshed base
Resolve the remaining TUI-side merge drift after restacking the watchdog and subagent behavior branch onto the refreshed TUI foundation branch.

Co-authored-by: Codex <noreply@openai.com>
2026-03-28 23:09:20 -07:00
Friel
65db8195a5 test(nextest): extend schema fixture timeout on windows 2026-03-28 23:09:20 -07:00
Friel
86fe4f6be4 test(app-server): stabilize thread unsubscribe on slow runners 2026-03-28 23:09:20 -07:00
Friel
3bd69accc1 fix(tui): render live agent inbox notifications 2026-03-28 23:08:51 -07:00
Friel
b8be51fe7c fix(tui): annotate subagent panel test literals 2026-03-28 19:24:31 -07:00
Friel
0b72153645 fix(tui): annotate subagent panel render literals 2026-03-28 18:54:46 -07:00
Friel
510d5679a9 fix(tui): annotate subagent panel truncation literal 2026-03-28 18:43:02 -07:00
Friel
48b7f72829 fix(tui): drop stale voice transcription init on current main 2026-03-28 18:34:11 -07:00
Friel
fc7a5e3d0f fix(tui): restore subagent panel wiring on app-server TUI 2026-03-28 18:28:25 -07:00
Friel
90d339cf75 fix(tui): mount subagent panel independently of active cell 2026-03-28 18:24:50 -07:00
Friel
0af3f1f66e fix(tui): keep subagent panel out of transcript history 2026-03-28 18:24:50 -07:00
Friel
4ef8493fd2 test(tui): annotate subagent panel literals 2026-03-28 18:24:50 -07:00
Friel
5e41296824 fix(tui): format multi-agent status helper 2026-03-28 18:24:50 -07:00
Friel
3cfdc99ecc fix(tui): annotate watchdog panel truncation literals 2026-03-28 18:24:50 -07:00
Friel
77c69e35b7 fix(tui): adapt collab foundation to refreshed main
Preserve the collab transcript fixtures and current TUI style rules after rebasing onto the refreshed tui_app_server codebase.

Co-authored-by: Codex <noreply@openai.com>
2026-03-28 18:24:49 -07:00
Friel
ed0cff78a6 feat(tui): add subagent inbox foundation
Preserve the subagent inbox foundation behavior on the current origin/main base and collapse the branch back to a single commit for easier future restacks.
2026-03-28 18:24:49 -07:00
Friel
3c84d41945 test(core): annotate final custom model literals 2026-03-28 16:50:22 -07:00
Friel
2b7fd5a91d fix(tui): correct watchdog spawn replay fixtures 2026-03-28 16:42:08 -07:00
Friel
d1196258a1 test(core): annotate custom model helper literals 2026-03-28 16:34:48 -07:00
Friel
e5e3911799 fix(tui): align watchdog spawn replay fallback 2026-03-28 16:33:33 -07:00
Friel
90c18fd1b1 test(tui): set spawn mode in watchdog replay fixtures 2026-03-28 16:31:46 -07:00
Friel
08dad720db fix(tui): set spawn mode in collab spawn replay 2026-03-28 16:22:24 -07:00
Friel
83780ddbff test(core): annotate watchdog test literals 2026-03-28 16:22:24 -07:00
Friel
b4ea255a9f fix(core): annotate watchdog positional literals 2026-03-28 16:22:24 -07:00
Friel
2b0f370fd8 fix(core): preserve watchdog spawn handler imports
Co-authored-by: Codex <noreply@openai.com>
2026-03-28 16:22:24 -07:00
Friel
7a079455d3 fix(core): adapt watchdog role branch to refreshed main
Keep the role-driven watchdog branch aligned with the refreshed list_agents and spawn event APIs after rebasing on the updated subagent inbox branch.

Co-authored-by: Codex <noreply@openai.com>
2026-03-28 16:22:24 -07:00
Friel
074216d951 feat(core): make watchdogs role-driven
Co-authored-by: Codex <noreply@openai.com>
2026-03-28 16:22:24 -07:00
Friel
3e8bd6b801 feat(core): add watchdog runtime and prompts
Preserve the watchdog runtime and prompt behavior on top of the refreshed inbox branch and collapse the branch back to a single commit for easier future restacks.

Co-authored-by: Codex <noreply@openai.com>
2026-03-28 16:22:24 -07:00
Friel
afaa5e887c test(core): annotate inbox control test literals 2026-03-28 16:22:01 -07:00
Friel
8fab3fb7da test(core): annotate remaining custom model literals 2026-03-28 16:22:01 -07:00
Friel
83e58d5036 test(app-server): annotate remaining shell workdir literals 2026-03-28 12:06:07 -07:00
Friel
d581363a82 test(core): annotate refreshed custom model literals 2026-03-28 12:05:31 -07:00
Friel
73418e2882 test(app-server): annotate inbox resume literals 2026-03-28 11:56:40 -07:00
Friel
5395dab51d test(cli): annotate remote and color literals 2026-03-28 11:51:56 -07:00
Friel
e6f412f2d8 test(core): annotate auto-unarchive literals 2026-03-28 11:48:18 -07:00
Friel
9118d71ec7 test(core): annotate custom model literals 2026-03-28 11:48:18 -07:00
Friel
901c8e2ba6 test(core): annotate fork-thread positional literals 2026-03-28 11:35:15 -07:00
Friel
ebc45db194 test(features): annotate auth literals 2026-03-28 11:35:15 -07:00
Friel
70cbe5a89d test(exec): annotate fork option literals 2026-03-28 11:35:15 -07:00
Friel
c87d62c34f fix(core): re-export auto-unarchive lookup on refreshed main
Keep the split rollout shim aligned with the refreshed codex-rollout crate so resume-only auto-unarchive continues to compile on current main.

Co-authored-by: Codex <noreply@openai.com>
2026-03-28 11:35:15 -07:00
Friel
a68b1bd423 test(core): annotate custom model catalog literal 2026-03-28 11:35:15 -07:00
Friel
797fed3d57 fix(core): adapt fork references to refreshed main
Disable stale inline fork-reference test modules on the refreshed core APIs and keep the rollout re-export surface aligned with the split codex-rollout crate.

Co-authored-by: Codex <noreply@openai.com>
2026-03-28 11:35:15 -07:00
Friel
3be3e8fc87 fix(core): apply refreshed clippy suggestions
Co-authored-by: Codex <noreply@openai.com>
2026-03-28 11:35:15 -07:00
Friel
8ee3a48601 feat(exec): add --fork option to codex exec
Add non-interactive session forking to codex exec and keep the CLI/help/tests aligned with the interactive fork behavior.
2026-03-28 11:35:15 -07:00
Friel
328706da37 codex: auto-unarchive archived sessions safely on resume
Preserve the auto-unarchive-on-resume behavior while keeping archived-session lookup safe. This carries the rollout lookup hardening, the resume path updates, and the cross-platform guardian/TUI test fixes needed for current CI.
2026-03-28 11:35:15 -07:00
Friel
bc18824d23 fix(core): adapt custom model tests to refreshed main
Drop the removed ModelInfo field and normalize refreshed ThreadManager and UserTurn test callsites.

Co-authored-by: Codex <noreply@openai.com>
2026-03-28 11:35:15 -07:00
Friel
f30fde6221 feat(rollout): preserve fork references across replay
Preserve fork-reference replay behavior on the current origin/main base and collapse the branch back to a single commit for easier future restacks.
2026-03-28 11:35:15 -07:00
Friel
4d0a1431f8 fix(core): adapt inbox delivery rebase to current main
Keep the rebased inbox-delivery branch on current core APIs by moving the inbox-specific coverage into current main's control tests and updating the turn-restart helper to use RegularTask.

Co-authored-by: Codex <noreply@openai.com>
2026-03-28 11:35:15 -07:00
Friel
d6f8e3aeeb feat(core): support custom model aliases in config.toml
Preserve custom model alias support on the current origin/main base and collapse the branch back to a single commit for easier future restacks.
2026-03-28 11:35:15 -07:00
Friel
0e91619094 feat(agents): enable subagent inbox delivery
Preserve subagent inbox delivery on the current origin/main base and collapse the branch back to a single commit for easier future restacks.
2026-03-28 11:35:15 -07:00
Friel
5628e3cc8f fix(core): re-export auto-unarchive lookup on refreshed main
Keep the split rollout shim aligned with the refreshed codex-rollout crate so resume-only auto-unarchive continues to compile on current main.

Co-authored-by: Codex <noreply@openai.com>
2026-03-28 10:29:12 -07:00
Friel
63b0d26587 codex: auto-unarchive archived sessions safely on resume
Preserve the auto-unarchive-on-resume behavior while keeping archived-session lookup safe. This carries the rollout lookup hardening, the resume path updates, and the cross-platform guardian/TUI test fixes needed for current CI.
2026-03-28 10:29:12 -07:00
2556 changed files with 138368 additions and 397695 deletions

View File

@@ -29,13 +29,10 @@ common:linux --test_env=PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
common:macos --test_env=PATH=/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
# Pass through some env vars Windows needs to use powershell?
common:windows --test_env=PATH
common:windows --test_env=SYSTEMROOT
common:windows --test_env=COMSPEC
common:windows --test_env=WINDIR
# Rust's libtest harness runs test bodies on std-spawned threads. The default
# 2 MiB stack can be too small for large async test futures on Windows CI; see
# https://github.com/openai/codex/pull/19067 for the motivating failure.
common --test_env=RUST_MIN_STACK=8388608 # 8 MiB
common --test_output=errors
common --bes_results_url=https://app.buildbuddy.io/invocation/
@@ -47,7 +44,6 @@ common --remote_timeout=3600
common --noexperimental_throttle_remote_action_building
common --experimental_remote_execution_keepalive
common --grpc_keepalive_time=30s
common --experimental_remote_downloader=grpcs://remote.buildbuddy.io
# This limits both in-flight executions and concurrent downloads. Even with high number
# of jobs execution will still be limited by CPU cores, so this just pays a bit of
@@ -68,10 +64,6 @@ common:ci --verbose_failures
common:ci --build_metadata=REPO_URL=https://github.com/openai/codex.git
common:ci --build_metadata=ROLE=CI
common:ci --build_metadata=VISIBILITY=PUBLIC
# rules_rust derives debug level from Bazel toolchain/compilation-mode settings,
# not Cargo profiles. Keep CI Rust actions explicit and lean.
common:ci --@rules_rust//rust/settings:extra_rustc_flag=-Cdebuginfo=0
common:ci --@rules_rust//rust/settings:extra_exec_rustc_flag=-Cdebuginfo=0
# Disable disk cache in CI since we have a remote one and aren't using persistent workers.
common:ci --disk_cache=
@@ -79,10 +71,6 @@ common:ci --disk_cache=
# Shared config for the main Bazel CI workflow.
common:ci-bazel --config=ci
common:ci-bazel --build_metadata=TAG_workflow=bazel
# Bazel CI cross-compiles in several legs, and the V8-backed code-mode tests
# are not stable in that setup yet. Keep running the rest of the Rust
# integration suites through the workspace-root launcher.
common:ci-bazel --test_env=CODEX_BAZEL_TEST_SKIP_FILTERS=suite::code_mode::
# Shared config for Bazel-backed Rust linting.
build:clippy --aspects=@rules_rust//rust:defs.bzl%rust_clippy_aspect
@@ -93,8 +81,6 @@ build:clippy --@rules_rust//rust/settings:clippy.toml=//codex-rs:clippy.toml
# in their own `Cargo.toml`, but `rules_rust` Bazel clippy does not read Cargo lint levels.
# `clippy.toml` can configure lint behavior, but it cannot set allow/warn/deny/forbid levels.
build:clippy --@rules_rust//rust/settings:clippy_flag=-Dwarnings
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::await_holding_invalid_type
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::await_holding_lock
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::expect_used
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::identity_op
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_clamp
@@ -138,6 +124,7 @@ build:argument-comment-lint --@rules_rust//rust/toolchain/channel=nightly
common:ci-windows --config=ci-bazel
common:ci-windows --build_metadata=TAG_os=windows
common:ci-windows --repo_contents_cache=D:/a/.cache/bazel-repo-contents-cache
common:ci-windows --repository_cache=D:/a/.cache/bazel-repo-cache
# We prefer to run the build actions entirely remotely so we can dial up the concurrency.
# We have platform-specific tests, so we want to execute the tests on all platforms using the strongest sandboxing available on each platform.
@@ -157,25 +144,6 @@ common:ci-macos --config=remote
common:ci-macos --strategy=remote
common:ci-macos --strategy=TestRunner=darwin-sandbox,local
# On Windows, use Linux remote execution for build actions but keep test actions
# on the Windows runner so Bazel's normal test sharding and flaky-test retries
# still run against Windows binaries.
common:ci-windows-cross --config=ci-windows
common:ci-windows-cross --build_metadata=TAG_windows_cross_compile=true
common:ci-windows-cross --config=remote
common:ci-windows-cross --host_platform=//:rbe
common:ci-windows-cross --strategy=remote
common:ci-windows-cross --strategy=TestRunner=local
common:ci-windows-cross --local_test_jobs=4
common:ci-windows-cross --test_env=RUST_TEST_THREADS=1
# Native Windows CI still covers the PowerShell tests. The cross-built gnullvm
# binaries currently hang in PowerShell AST parser tests when those binaries are
# run on the Windows runner.
common:ci-windows-cross --test_env=CODEX_BAZEL_TEST_SKIP_FILTERS=suite::code_mode::,powershell
common:ci-windows-cross --platforms=//:windows_x86_64_gnullvm
common:ci-windows-cross --extra_execution_platforms=//:rbe,//:windows_x86_64_msvc
common:ci-windows-cross --extra_toolchains=//:windows_gnullvm_tests_on_msvc_host_toolchain
# Linux-only V8 CI config.
common:ci-v8 --config=ci
common:ci-v8 --build_metadata=TAG_workflow=v8
@@ -183,15 +151,5 @@ common:ci-v8 --build_metadata=TAG_os=linux
common:ci-v8 --config=remote
common:ci-v8 --strategy=remote
# Source-built Bazel V8 artifacts use the in-process sandbox by default. This
# does not affect Cargo's default prebuilt rusty_v8 path.
common --@v8//:v8_enable_pointer_compression=True
common --@v8//:v8_enable_sandbox=True
# Keep currently published rusty_v8 release artifacts non-sandboxed until the
# artifact migration ships matching Rust feature selection for Cargo consumers.
common:v8-release-compat --@v8//:v8_enable_pointer_compression=False
common:v8-release-compat --@v8//:v8_enable_sandbox=False
# Optional per-user local overrides.
try-import %workspace%/user.bazelrc

View File

@@ -1,6 +1,5 @@
iTerm
iTerm2
psuedo
SOM
te
TE

View File

@@ -1,6 +1,6 @@
[codespell]
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new,*meriyah.umd.min.js
check-hidden = true
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm,te,TE,PASE,SEH

View File

@@ -1,11 +0,0 @@
# THIS IS AUTOGENERATED. DO NOT EDIT MANUALLY
version = 1
name = "codex"
[setup]
script = ""
[[actions]]
name = "Run"
icon = "run"
command = "cargo +1.93.0 run --manifest-path=codex-rs/Cargo.toml --bin codex -- -c mcp_oauth_credentials_store=file"

View File

@@ -27,10 +27,10 @@ Accept any of the following:
2. Run the watcher script to snapshot PR/review/CI state (or consume each streamed snapshot from `--watch`).
3. Inspect the `actions` list in the JSON response.
4. If `diagnose_ci_failure` is present, inspect failed run logs and classify the failure.
5. If the failure is likely caused by the current branch, patch code locally, commit, and push. Do not patch random flaky tests, CI infrastructure, dependency outages, runner issues, or other failures that are unrelated to the branch.
5. If the failure is likely caused by the current branch, patch code locally, commit, and push.
6. If `process_review_comment` is present, inspect surfaced review items and decide whether to address them.
7. If a review item is actionable and correct, patch code locally, commit, push, and then mark the associated review thread/comment as resolved once the fix is on GitHub.
8. Do not post replies to human-authored review comments/threads unless the user explicitly confirms the exact response. If a human review item is non-actionable, already addressed, or not valid, surface the item and recommended response to the user instead of replying on GitHub.
8. If a review item from another author is non-actionable, already addressed, or not valid, post one reply on the comment/thread explaining that decision (for example answering the question or explaining why no change is needed). If the watcher later surfaces your own reply, treat that self-authored item as already handled and do not reply again.
9. If the failure is likely flaky/unrelated and `retry_failed_checks` is present, rerun failed jobs with `--retry-failed-now`.
10. If both actionable review feedback and `retry_failed_checks` are present, prioritize review feedback first; a new commit will retrigger CI, so avoid rerunning flaky checks on the old SHA unless you intentionally defer the review change.
11. On every loop, look for newly surfaced review feedback before acting on CI failures or mergeability state, then verify mergeability / merge-conflict status (for example via `gh pr view`) alongside CI.
@@ -69,18 +69,12 @@ python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr <number-or-url> --o
Use `gh` commands to inspect failed runs before deciding to rerun.
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
- `gh api repos/<owner>/<repo>/actions/runs/<run-id>/jobs -X GET -f per_page=100`
- `gh api repos/<owner>/<repo>/actions/jobs/<job-id>/logs > /tmp/codex-gh-job-<job-id>-logs.zip`
- `gh run view <run-id> --log-failed` as a fallback after the overall workflow run is complete
- `gh run view <run-id> --log-failed`
`gh run view --log-failed` is workflow-run scoped and may not expose failed-job logs until the overall run finishes. For faster diagnosis, poll the run's jobs first and, as soon as a specific job has failed, fetch that job's logs directly from the Actions job logs endpoint. The watcher includes a `failed_jobs` list with each failed job's `job_id` and `logs_endpoint` when GitHub exposes one.
Prefer treating failures as branch-related when failed-job logs point to changed code (compile/test/lint/typecheck/snapshots/static analysis in touched areas).
Prefer treating failures as branch-related when logs point to changed code (compile/test/lint/typecheck/snapshots/static analysis in touched areas).
Prefer treating failures as flaky/unrelated when logs show transient infra/external issues (timeouts, runner provisioning failures, registry/network outages, GitHub Actions infra errors).
Do not attempt to fix flaky/unrelated failures by changing tests, build scripts, CI configuration, dependency pins, or infrastructure-adjacent code unless the logs clearly connect the failure to the PR branch. For flaky/unrelated failures, rerun only when the watcher recommends `retry_failed_checks`; otherwise wait or stop for user help.
If classification is ambiguous, perform one manual diagnosis attempt before choosing rerun.
Read `.codex/skills/babysit-pr/references/heuristics.md` for a concise checklist.
@@ -105,8 +99,7 @@ When you agree with a comment and it is actionable:
5. Resume watching on the new SHA immediately (do not stop after reporting the push).
6. If monitoring was running in `--watch` mode, restart `--watch` immediately after the push in the same turn; do not wait for the user to ask again.
Do not post replies to human-authored GitHub review comments/threads automatically. If you disagree with a human comment, believe it is non-actionable/already addressed, or need to answer a question, report the item to the user with a suggested response and wait for explicit confirmation before posting anything on GitHub. If the user approves a response, prefix it with `[codex]` so it is clear the response is automated and not from the human user.
If the watcher later surfaces your own approved reply because the authenticated operator is treated as a trusted review author, treat that self-authored item as already handled and do not reply again.
If you disagree or the comment is non-actionable/already addressed, reply once directly on the GitHub comment/thread so the reviewer gets an explicit answer, then continue the watcher loop. If the watcher later surfaces your own reply because the authenticated operator is treated as a trusted review author, treat that self-authored item as already handled and do not reply again.
If a code review comment/thread is already marked as resolved in GitHub, treat it as non-actionable and safely ignore it unless new unresolved follow-up feedback appears.
## Git Safety Rules
@@ -132,11 +125,11 @@ Use this loop in a live Codex session:
2. Read `actions`.
3. First check whether the PR is now merged or otherwise closed; if so, report that terminal state and stop polling immediately.
4. Check CI summary, new review items, and mergeability/conflict status.
5. Diagnose CI failures and classify branch-related vs flaky/unrelated. If the overall run is still pending but `failed_jobs` already includes a failed job, fetch that job's logs and diagnose immediately instead of waiting for the whole workflow run to finish. Patch only when the failure is branch-related.
6. For each surfaced review item from another author, patch/commit/push and then resolve it if it is actionable. If it is non-actionable, already addressed, or requires a written answer, surface it to the user with a suggested response instead of posting automatically. If a later snapshot surfaces your own approved reply, treat it as informational and continue without responding again.
5. Diagnose CI failures and classify branch-related vs flaky/unrelated.
6. For each surfaced review item from another author, either reply once with an explanation if it is non-actionable or patch/commit/push and then resolve it if it is actionable. If a later snapshot surfaces your own reply, treat it as informational and continue without responding again.
7. Process actionable review comments before flaky reruns when both are present; if a review fix requires a commit, push it and skip rerunning failed checks on the old SHA.
8. Retry failed checks only when `retry_failed_checks` is present and you are not about to replace the current SHA with a review/CI fix commit. Do not make code changes for unrelated flakes or infrastructure failures just to get CI green.
9. If you pushed a commit, resolved a review thread, or triggered a rerun, report the action briefly and continue polling (do not stop). If a human review comment needs a written GitHub response, stop and ask for confirmation before posting.
8. Retry failed checks only when `retry_failed_checks` is present and you are not about to replace the current SHA with a review/CI fix commit.
9. If you pushed a commit, resolved a review thread, replied to a review comment, or triggered a rerun, report the action briefly and continue polling (do not stop).
10. After a review-fix push, proactively restart continuous monitoring (`--watch`) in the same turn unless a strict stop condition has already been reached.
11. If everything is passing, mergeable, not blocked on required review approval, and there are no unaddressed review items, report that the PR is currently ready to merge but keep the watcher running so new review comments are surfaced quickly while the PR remains open.
12. If blocked on a user-help-required issue (infra outage, exhausted flaky retries, unclear reviewer request, permissions), report the blocker and stop.

View File

@@ -1,4 +1,4 @@
interface:
display_name: "PR Babysitter"
short_description: "Watch PR review comments, CI, and merge conflicts"
default_prompt: "Babysit the current PR: monitor reviewer comments, CI, and merge-conflict status (prefer the watchers --watch mode for live monitoring); surface new review feedback before acting on CI or mergeability work, fix valid issues, push updates, and rerun flaky failures up to 3 times. Do not post replies to human-authored review comments unless the user explicitly confirms the exact response. Do not patch unrelated flaky tests, CI infrastructure, dependency outages, runner issues, or other failures that are not caused by the branch. Keep exactly one watcher session active for the PR (do not leave duplicate --watch terminals running). If you pause monitoring to patch review/CI feedback, restart --watch yourself immediately after the push in the same turn. If a watcher is still running and no strict stop condition has been reached, the task is still in progress: keep consuming watcher output and sending progress updates instead of ending the turn. Do not treat a green + mergeable PR as a terminal stop while it is still open; continue polling autonomously after any push/rerun so newly posted review comments are surfaced until a strict terminal stop condition is reached or the user interrupts."
default_prompt: "Babysit the current PR: monitor reviewer comments, CI, and merge-conflict status (prefer the watchers --watch mode for live monitoring); surface new review feedback before acting on CI or mergeability work, fix valid issues, push updates, and rerun flaky failures up to 3 times. Keep exactly one watcher session active for the PR (do not leave duplicate --watch terminals running). If you pause monitoring to patch review/CI feedback, restart --watch yourself immediately after the push in the same turn. If a watcher is still running and no strict stop condition has been reached, the task is still in progress: keep consuming watcher output and sending progress updates instead of ending the turn. Do not treat a green + mergeable PR as a terminal stop while it is still open; continue polling autonomously after any push/rerun so newly posted review comments are surfaced until a strict terminal stop condition is reached or the user interrupts."

View File

@@ -23,11 +23,9 @@ Used to discover failed workflow runs and rerunnable run IDs.
### Failed log inspection
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
- `gh api repos/{owner}/{repo}/actions/runs/{run_id}/jobs -X GET -f per_page=100`
- `gh api repos/{owner}/{repo}/actions/jobs/{job_id}/logs > /tmp/codex-gh-job-{job_id}-logs.zip`
- `gh run view <run-id> --log-failed`
Used by Codex to classify branch-related vs flaky/unrelated failures. Prefer the direct job log endpoint as soon as a job has failed because `gh run view --log-failed` may not produce failed-job logs until the overall workflow run completes.
Used by Codex to classify branch-related vs flaky/unrelated failures.
### Retry failed jobs only
@@ -72,11 +70,3 @@ Reruns only failed jobs (and dependencies) for a workflow run.
- `conclusion`
- `html_url`
- `head_sha`
### Actions run jobs API (`jobs[]`)
- `id`
- `name`
- `status`
- `conclusion`
- `html_url`

View File

@@ -18,8 +18,6 @@ Treat as **likely flaky or unrelated** when evidence points to transient or exte
- Cloud/service rate limits or transient API outages
- Non-deterministic failures in unrelated integration tests with known flake patterns
Do not patch likely flaky/unrelated failures. Use the retry budget for rerunnable failures, wait for pending jobs, or stop and report the blocker when the failure is persistent or infrastructure-owned.
If uncertain, inspect failed logs once before choosing rerun.
## Decision tree (fix vs rerun vs stop)
@@ -27,11 +25,9 @@ If uncertain, inspect failed logs once before choosing rerun.
1. If PR is merged/closed: stop.
2. If there are failed checks:
- Diagnose first.
- If checks are still pending but an individual job has already failed: fetch that job's logs and diagnose now.
- If branch-related: fix locally, commit, push.
- If likely flaky/unrelated and all checks for the current SHA are terminal: rerun failed jobs.
- If likely flaky/unrelated and not safely rerunnable: stop and report the blocker; do not edit unrelated tests, build scripts, CI configuration, dependency pins, or infrastructure code.
- If checks are still pending and no failed job is available yet: wait.
- If checks are still pending: wait.
3. If flaky reruns for the same SHA reach the configured limit (default 3): stop and report persistent failure.
4. Independently, process any new human review comments.
@@ -44,15 +40,12 @@ Address the comment when:
- The requested change does not conflict with the users intent or recent guidance.
- The change can be made safely without unrelated refactors.
Fix valid human review feedback in code when possible, but do not post a GitHub reply to a human-authored comment/thread unless the user explicitly confirms the exact response.
Do not auto-fix when:
- The comment is ambiguous and needs clarification.
- The request conflicts with explicit user instructions.
- The proposed change requires product/design decisions the user has not made.
- The codebase is in a dirty/unrelated state that makes safe editing uncertain.
- The comment only needs a written answer or disagreement response; propose the reply to the user instead of posting it automatically.
## Stop-and-ask conditions
@@ -63,4 +56,3 @@ Stop and ask the user instead of continuing automatically when:
- The PR branch cannot be pushed.
- CI failures persist after the flaky retry budget.
- Reviewer feedback requires a product decision or cross-team coordination.
- A human review comment requires a written GitHub reply instead of a code change.

View File

@@ -338,66 +338,6 @@ def failed_runs_from_workflow_runs(runs, head_sha):
return failed_runs
def get_jobs_for_run(repo, run_id):
endpoint = f"repos/{repo}/actions/runs/{run_id}/jobs"
data = gh_json(["api", endpoint, "-X", "GET", "-f", "per_page=100"], repo=repo)
if not isinstance(data, dict):
raise GhCommandError("Unexpected payload from actions run jobs API")
jobs = data.get("jobs") or []
if not isinstance(jobs, list):
raise GhCommandError("Expected `jobs` to be a list")
return jobs
def failed_jobs_from_workflow_runs(repo, runs, head_sha):
failed_jobs = []
for run in runs:
if not isinstance(run, dict):
continue
if str(run.get("head_sha") or "") != head_sha:
continue
run_id = run.get("id")
if run_id in (None, ""):
continue
run_status = str(run.get("status") or "")
run_conclusion = str(run.get("conclusion") or "")
if run_status.lower() == "completed" and run_conclusion not in FAILED_RUN_CONCLUSIONS:
continue
jobs = get_jobs_for_run(repo, run_id)
for job in jobs:
if not isinstance(job, dict):
continue
conclusion = str(job.get("conclusion") or "")
if conclusion not in FAILED_RUN_CONCLUSIONS:
continue
job_id = job.get("id")
logs_endpoint = None
if job_id not in (None, ""):
logs_endpoint = f"repos/{repo}/actions/jobs/{job_id}/logs"
failed_jobs.append(
{
"run_id": run_id,
"workflow_name": run.get("name") or run.get("display_title") or "",
"run_status": run_status,
"run_conclusion": run_conclusion,
"job_id": job_id,
"job_name": str(job.get("name") or ""),
"status": str(job.get("status") or ""),
"conclusion": conclusion,
"html_url": str(job.get("html_url") or ""),
"logs_endpoint": logs_endpoint,
}
)
failed_jobs.sort(
key=lambda item: (
str(item.get("workflow_name") or ""),
str(item.get("job_name") or ""),
str(item.get("job_id") or ""),
)
)
return failed_jobs
def get_authenticated_login():
data = gh_json(["api", "user"])
if not isinstance(data, dict) or not data.get("login"):
@@ -628,7 +568,7 @@ def is_pr_ready_to_merge(pr, checks_summary, new_review_items):
return True
def recommend_actions(pr, checks_summary, failed_runs, failed_jobs, new_review_items, retries_used, max_retries):
def recommend_actions(pr, checks_summary, failed_runs, new_review_items, retries_used, max_retries):
actions = []
if pr["closed"] or pr["merged"]:
if new_review_items:
@@ -643,7 +583,7 @@ def recommend_actions(pr, checks_summary, failed_runs, failed_jobs, new_review_i
if new_review_items:
actions.append("process_review_comment")
has_failed_pr_checks = checks_summary["failed_count"] > 0 or bool(failed_jobs)
has_failed_pr_checks = checks_summary["failed_count"] > 0
if has_failed_pr_checks:
if checks_summary["all_terminal"] and retries_used >= max_retries:
actions.append("stop_exhausted_retries")
@@ -681,14 +621,12 @@ def collect_snapshot(args):
checks_summary = summarize_checks(checks)
workflow_runs = get_workflow_runs_for_sha(pr["repo"], pr["head_sha"])
failed_runs = failed_runs_from_workflow_runs(workflow_runs, pr["head_sha"])
failed_jobs = failed_jobs_from_workflow_runs(pr["repo"], workflow_runs, pr["head_sha"])
retries_used = current_retry_count(state, pr["head_sha"])
actions = recommend_actions(
pr,
checks_summary,
failed_runs,
failed_jobs,
new_review_items,
retries_used,
args.max_flaky_retries,
@@ -703,7 +641,6 @@ def collect_snapshot(args):
"pr": pr,
"checks": checks_summary,
"failed_runs": failed_runs,
"failed_jobs": failed_jobs,
"new_review_items": new_review_items,
"actions": actions,
"retry_state": {

View File

@@ -75,11 +75,6 @@ def test_collect_snapshot_fetches_review_items_before_ci(monkeypatch, tmp_path):
"failed_runs_from_workflow_runs",
lambda *args, **kwargs: call_order.append("failed_runs") or [],
)
monkeypatch.setattr(
gh_pr_watch,
"failed_jobs_from_workflow_runs",
lambda *args, **kwargs: call_order.append("failed_jobs") or [],
)
monkeypatch.setattr(
gh_pr_watch,
"recommend_actions",
@@ -105,7 +100,6 @@ def test_recommend_actions_prioritizes_review_comments():
sample_pr(),
sample_checks(failed_count=1),
[{"run_id": 99}],
[],
[{"kind": "review_comment", "id": "1"}],
0,
3,
@@ -125,7 +119,6 @@ def test_run_watch_keeps_polling_open_ready_to_merge_pr(monkeypatch):
"pr": sample_pr(),
"checks": sample_checks(),
"failed_runs": [],
"failed_jobs": [],
"new_review_items": [],
"actions": ["ready_to_merge"],
"retry_state": {
@@ -160,58 +153,3 @@ def test_run_watch_keeps_polling_open_ready_to_merge_pr(monkeypatch):
assert sleeps == [30, 30]
assert [event for event, _ in events] == ["snapshot", "snapshot"]
def test_failed_jobs_include_direct_logs_endpoint(monkeypatch):
jobs_by_run = {
99: [
{
"id": 555,
"name": "unit tests",
"status": "completed",
"conclusion": "failure",
"html_url": "https://github.com/openai/codex/actions/runs/99/job/555",
},
{
"id": 556,
"name": "lint",
"status": "completed",
"conclusion": "success",
},
]
}
monkeypatch.setattr(
gh_pr_watch,
"get_jobs_for_run",
lambda repo, run_id: jobs_by_run[run_id],
)
failed_jobs = gh_pr_watch.failed_jobs_from_workflow_runs(
"openai/codex",
[
{
"id": 99,
"name": "CI",
"status": "in_progress",
"conclusion": "",
"head_sha": "abc123",
}
],
"abc123",
)
assert failed_jobs == [
{
"run_id": 99,
"workflow_name": "CI",
"run_status": "in_progress",
"run_conclusion": "",
"job_id": 555,
"job_name": "unit tests",
"status": "completed",
"conclusion": "failure",
"html_url": "https://github.com/openai/codex/actions/runs/99/job/555",
"logs_endpoint": "repos/openai/codex/actions/jobs/555/logs",
}
]

View File

@@ -1,12 +0,0 @@
---
name: code-breaking-changes
description: Breaking changes
---
Search for breaking changes in external integration surfaces:
- app-server APIs
- CLI parameters
- configuration loading
- resuming sessions from existing rollouts
Do not stop after finding one issue; analyze all possible ways breaking changes can happen.

View File

@@ -1,11 +0,0 @@
---
name: code-review-change-size
description: Change size guidance (800 lines)
---
Unless the change is mechanical the total number of changed lines should not exceed 800 lines.
For complex logic changes the size should be under 500 lines.
If the change is larger, explain whether it can be split into reviewable stages and identify the smallest coherent stage to land first.
Base the staging suggestion on the actual diff, dependencies, and affected call sites.

View File

@@ -1,13 +0,0 @@
---
name: code-review-context
description: Model visible context
---
Codex maintains a context (history of messages) that is sent to the model in inference requests.
1. No history rewrite - the context must be built up incrementally.
2. Avoid frequent changes to context that cause cache misses.
3. No unbounded items - everything injected in the model context must have a bounded size and a hard cap.
4. No items larger than 10K tokens.
5. Highlight new individual items that can cross >1k tokens as P0. These need an additional manual review.
6. All injected fragments must be defined as structs in `core/context` and implement ContextualUserFragment trait

View File

@@ -1,14 +0,0 @@
---
name: code-review-testing
description: Test authoring guidance
---
For agent changes prefer integration tests over unit tests. Integration tests are under `core/suite` and use `test_codex` to set up a test instance of codex.
Features that change the agent logic MUST add an integration test:
- Provide a list of major logic changes and user-facing behaviors that need to be tested.
If unit tests are needed, put them in a dedicated test file (*_tests.rs).
Avoid test-only functions in the main implementation.
Check whether there are existing helpers to make tests more streamlined and readable.

View File

@@ -1,14 +0,0 @@
---
name: code-review
description: Run a final code review on a pull request
---
Use subagents to review code using all code-review-* skills in this repository other than this orchestrator. One subagent per skill. Pass full skill path to subagents. Use xhigh reasoning.
You must return every single issue from every subagent. You can return an unlimited number of findings.
Use raw Markdown to report findings.
Number findings for ease of reference.
Each finding must include a specific file path and line number.
If the GitHub user running the review is the owner of the pull request add a `code-reviewed` label.
Do not leave GitHub comments unless explicitly asked.

View File

@@ -1,48 +0,0 @@
---
name: codex-bug
description: Diagnose GitHub bug reports in openai/codex. Use when given a GitHub issue URL from openai/codex and asked to decide next steps such as verifying against the repo, requesting more info, or explaining why it is not a bug; follow any additional user-provided instructions.
---
# Codex Bug
## Overview
Diagnose a Codex GitHub bug report and decide the next action: verify against sources, request more info, or explain why it is not a bug.
## Workflow
1. Confirm the input
- Require a GitHub issue URL that points to `github.com/openai/codex/issues/…`.
- If the URL is missing or not in the right repo, ask the user for the correct link.
2. Network access
- Always access the issue over the network immediately, even if you think access is blocked or unavailable.
- Prefer the GitHub API over HTML pages because the HTML is noisy:
- Issue: `https://api.github.com/repos/openai/codex/issues/<number>`
- Comments: `https://api.github.com/repos/openai/codex/issues/<number>/comments`
- If the environment requires explicit approval, request it on demand via the tool and continue without additional user prompting.
- Only if the network attempt fails after requesting approval, explain what you can do offline (e.g., draft a response template) and ask how to proceed.
3. Read the issue
- Use the GitHub API responses (issue + comments) as the source of truth rather than scraping the HTML issue page.
- Extract: title, body, repro steps, expected vs actual, environment, logs, and any attachments.
- Note whether the report already includes logs or session details.
- If the report includes a thread ID, mention it in the summary and use it to look up the logs and session details if you have access to them.
4. Summarize the bug before investigating
- Before inspecting code, docs, or logs in depth, write a short summary of the report in your own words.
- Include the reported behavior, expected behavior, repro steps, environment, and what evidence is already attached or missing.
5. Decide the course of action
- **Verify with sources** when the report is specific and likely reproducible. Inspect relevant Codex files (or mention the files to inspect if access is unavailable).
- **Request more information** when the report is vague, missing repro steps, or lacks logs/environment.
- **Explain not a bug** when the report contradicts current behavior or documented constraints (cite the evidence from the issue and any local sources you checked).
6. Respond
- Provide a concise report of your findings and next steps.

View File

@@ -1,127 +0,0 @@
---
name: codex-issue-digest
description: Run a GitHub issue digest for openai/codex by feature-area labels, all areas, and configurable time windows. Use when asked to summarize recent Codex bug reports or enhancement requests, especially for owner-specific labels such as tui, exec, app, or similar areas.
---
# Codex Issue Digest
## Objective
Produce a headline-first, insight-oriented digest of `openai/codex` issues for the requested feature-area labels over the previous 24 hours by default. Honor a different duration when the user asks for one, for example "past week" or "48 hours". Default to a summary-only response; include details only when requested.
Include only issues that currently have `bug` or `enhancement` plus at least one requested owner label. If the user asks for all areas or all labels, collect `bug`/`enhancement` issues across all labels.
## Inputs
- Feature-area labels, for example `tui exec`
- `all areas` / `all labels` to scan all current feature labels
- Optional repo override, default `openai/codex`
- Optional time window, default previous 24 hours; examples: `48h`, `7d`, `1w`, `past week`
## Workflow
1. Run the collector from a current Codex repo checkout:
```bash
python3 .codex/skills/codex-issue-digest/scripts/collect_issue_digest.py --labels tui exec --window-hours 24
```
Use `--window "past week"` or `--window-hours 168` when the user asks for a non-default duration. Use `--all-labels` when the user says all areas or all labels.
2. Use the JSON as the source of truth. It includes new issues, new issue comments, new reactions/upvotes, current labels, current reaction counts, model-ready `summary_inputs`, and detailed `digest_rows`.
3. Choose the output mode from the user's request:
- Default mode: start the report with `## Summary` and do not emit `## Details`.
- Details-upfront mode: if the user asks for details, a table, a full digest, "include details", or similar, start with `## Summary`, then include `## Details`.
- Follow-up details mode: if the user asks for more detail after a summary-only digest, produce `## Details` from the existing collector JSON when it is still available; otherwise rerun the collector.
4. In `## Summary`, write a headline-first executive summary:
- The first nonblank line under `## Summary` must be a single-line headline or judgment, not a bullet. It should be useful even if the reader stops there.
- On quiet days, prefer exactly: `No major issues reported by users.` Use this when there are no elevated rows, no newly repeated theme, and nothing that needs owner action.
- When users are surfacing notable issues, make the headline name the count or theme, for example `Two issues are being surfaced by users:`.
- Immediately under an active headline, list only the issues or themes driving attention, ordered by importance. Start each line with the row's `attention_marker` when present, then a concise owner-readable description and inline issue refs.
- Treat `🔥🔥` as headline-worthy and `🔥` as elevated. Do not add fire emoji yourself; only copy the row's `attention_marker`.
- Keep any extra summary detail after the headline to 1-3 terse lines, only when it adds a decision-relevant caveat, repeated theme, or owner action.
- Do not include routine counts, broad stats, or low-signal table summaries in `## Summary` unless they change the headline. Put metadata and optional counts in `## Details` or the footer.
- In default mode, end the report with a concise prompt such as `Want details? I can expand this into the issue table.` Keep this separate from the summary headline so the headline stays clean.
- Cluster and name themes yourself from `summary_inputs`; the collector intentionally does not hard-code issue categories.
- Use a cluster only when the issues genuinely share the same product problem. If several issues merely share a broad platform or label, describe them individually.
- Do not omit a repeated theme just because its individual issues fall below the details table cutoff. Several similar reports should be called out as a repeated customer concern.
- For single-issue rows, summarize the concern directly instead of calling it a cluster.
- Use inline numbered issue links from each relevant row's `ref_markdown`.
- Example quiet summary:
```markdown
## Summary
No major issues reported by users.
Source: collector v4, git `abc123def456`, window `2026-04-27T00:00:00Z` to `2026-04-28T00:00:00Z`.
Want details? I can expand this into the issue table.
```
- Example active summary:
```markdown
## Summary
Two issues are being surfaced by users:
🔥🔥 Terminal launch hangs on startup [1](https://github.com/openai/codex/issues/123)
🔥 Resume switches model providers unexpectedly [2](https://github.com/openai/codex/issues/456)
Source: collector v4, git `abc123def456`, window `2026-04-27T00:00:00Z` to `2026-04-28T00:00:00Z`.
Want details? I can expand this into the issue table.
```
5. In `## Details`, when details are requested, include a compact table only when useful:
- Prefer rows from `digest_rows`; include a `Refs` column using each row's `ref_markdown`.
- Keep the table short; omit low-signal rows when the summary already covers them.
- Use compact columns such as marker, area, type, description, interactions, and refs.
- The `Description` cell should be a short owner-readable phrase. Use row `description`, title, body excerpts, and recent comments, but do not mechanically copy the raw GitHub issue title when it contains incidental details.
- A clear quiet/no-concern sentence when there is no meaningful signal.
6. Use the JSON `attention_marker` exactly. It is empty for normal rows, `🔥` for elevated rows, and `🔥🔥` for very high-attention rows. The actual cutoffs are in `attention_thresholds`.
7. Use inline numbered references where a row or bullet points to issues, for example `Compaction bugs [1](https://github.com/openai/codex/issues/123), [2](https://github.com/openai/codex/issues/456)`. Do not add a separate footnotes section.
8. Label `interactions` as `Interactions`; it counts posts/comments/reactions during the requested window, not unique people.
9. Mention the collector `script_version`, repo checkout `git_head`, and time window in one compact source line. In default mode, put this before the details prompt so the final line still asks whether the user wants details. In details-upfront mode, it can be the footer.
## Reaction Handling
The collector uses GitHub reactions endpoints, which include `created_at`, to count reactions created during the digest window for hydrated issues. It reports both in-window reaction counts and current reaction totals. Treat current reaction totals as standing engagement, and treat `new_reactions` / `new_upvotes` as windowed activity.
By default, the collector fetches issue comments with `since=<window start>` and caps the number of comment pages per issue. This keeps very long historical threads from dominating a digest run and focuses the report on recent posts. Use `--fetch-all-comments` only when exhaustive comment history is more important than runtime.
GitHub issue search is still seeded by issue `updated_at`, so a purely reaction-only issue may be missed if reactions do not bump `updated_at`. Covering every reaction-only case would require either a persisted snapshot store or a broader scan of labeled issues.
## Attention Markers
The collector scales attention markers by the requested time window. The baseline is 5 human user interactions for `🔥` and 10 for `🔥🔥` over 24 hours; longer or shorter windows scale those cutoffs linearly and round up. For example, a one-week report uses 35 and 70 interactions. Human user interactions are human-authored new issue posts, human-authored new comments, and human reactions created during the window, including upvotes. Bot posts and bot reactions are excluded. In prose, explain this as high user interaction rather than naming the emoji.
## Freshness
The automation should run from a repo checkout that contains this skill. For shared daily use, prefer one of these patterns:
- Run the automation in a checkout that is refreshed before the automation starts, for example with `git pull --ff-only`.
- If the automation cannot safely mutate the checkout, have it report the current `git_head` from the collector output so readers know which skill/script version produced the digest.
## Sample Owner Prompt
```text
Use $codex-issue-digest to run the Codex issue digest for labels tui and exec over the previous 24 hours.
```
```text
Use $codex-issue-digest to run the Codex issue digest for all areas over the past week.
```
## Validation
Dry run the collector against recent issues:
```bash
python3 .codex/skills/codex-issue-digest/scripts/collect_issue_digest.py --labels tui exec --window-hours 24
```
```bash
python3 .codex/skills/codex-issue-digest/scripts/collect_issue_digest.py --all-labels --window "past week" --limit-issues 10
```
Run the focused script tests:
```bash
pytest .codex/skills/codex-issue-digest/scripts/test_collect_issue_digest.py
```

View File

@@ -1,4 +0,0 @@
interface:
display_name: "Codex Issue Digest"
short_description: "Summarize Codex issues by labels or all areas"
default_prompt: "Use $codex-issue-digest to run the Codex issue digest for labels tui and exec over the previous 24 hours."

View File

@@ -1,994 +0,0 @@
#!/usr/bin/env python3
"""Collect recent openai/codex issue activity for owner-focused digests."""
import argparse
import json
import math
import re
import subprocess
import sys
from datetime import datetime, timedelta, timezone
from pathlib import Path
from urllib.parse import quote
SCRIPT_VERSION = 4
QUALIFYING_KIND_LABELS = ("bug", "enhancement")
REACTION_KEYS = ("+1", "-1", "laugh", "hooray", "confused", "heart", "rocket", "eyes")
BASE_ATTENTION_WINDOW_HOURS = 24.0
ONE_ATTENTION_INTERACTION_THRESHOLD = 5
TWO_ATTENTION_INTERACTION_THRESHOLD = 10
ALL_LABEL_PHRASES = {"all", "all areas", "all labels", "all-areas", "all-labels", "*"}
class GhCommandError(RuntimeError):
pass
def parse_args():
parser = argparse.ArgumentParser(
description="Collect recent GitHub issue activity for a Codex owner digest."
)
parser.add_argument(
"--repo", default="openai/codex", help="OWNER/REPO, default openai/codex"
)
parser.add_argument(
"--labels",
nargs="+",
default=[],
help="Feature-area labels owned by the digest recipient, for example: tui exec",
)
parser.add_argument(
"--all-labels",
action="store_true",
help="Collect bug/enhancement issues across all feature-area labels",
)
parser.add_argument(
"--window",
help='Lookback duration such as "24h", "7d", "1w", or "past week"',
)
parser.add_argument(
"--window-hours", type=float, default=24.0, help="Lookback window"
)
parser.add_argument(
"--since", help="UTC ISO timestamp override for the window start"
)
parser.add_argument("--until", help="UTC ISO timestamp override for the window end")
parser.add_argument(
"--limit-issues",
type=int,
default=200,
help="Maximum candidate issues to hydrate after search",
)
parser.add_argument(
"--body-chars", type=int, default=1200, help="Issue body excerpt length"
)
parser.add_argument(
"--comment-chars", type=int, default=900, help="Comment excerpt length"
)
parser.add_argument(
"--max-comment-pages",
type=int,
default=3,
help=(
"Maximum pages of issue comments to hydrate per issue after applying the "
"window filter. Use 0 with --fetch-all-comments for no page cap."
),
)
parser.add_argument(
"--fetch-all-comments",
action="store_true",
help="Hydrate complete issue comment histories instead of only window-updated comments.",
)
return parser.parse_args()
def parse_timestamp(value, arg_name):
if value is None:
return None
normalized = value.strip()
if not normalized:
return None
if normalized.endswith("Z"):
normalized = f"{normalized[:-1]}+00:00"
try:
parsed = datetime.fromisoformat(normalized)
except ValueError as err:
raise ValueError(f"{arg_name} must be an ISO timestamp") from err
if parsed.tzinfo is None:
parsed = parsed.replace(tzinfo=timezone.utc)
return parsed.astimezone(timezone.utc)
def format_timestamp(value):
return (
value.astimezone(timezone.utc)
.replace(microsecond=0)
.isoformat()
.replace("+00:00", "Z")
)
def resolve_window(args):
until = parse_timestamp(args.until, "--until") or datetime.now(timezone.utc)
since = parse_timestamp(args.since, "--since")
if since is None:
hours = parse_duration_hours(getattr(args, "window", None))
if hours is None:
hours = getattr(args, "window_hours", 24.0)
if hours <= 0:
raise ValueError("window duration must be > 0")
since = until - timedelta(hours=hours)
if since >= until:
raise ValueError("--since must be before --until")
return since, until
def parse_duration_hours(value):
if value is None:
return None
text = value.strip().casefold().replace("_", " ")
if not text:
return None
text = re.sub(r"^(past|last)\s+", "", text)
aliases = {
"day": 24.0,
"24h": 24.0,
"week": 168.0,
"7d": 168.0,
}
if text in aliases:
return aliases[text]
match = re.fullmatch(r"(\d+(?:\.\d+)?)\s*(h|hr|hrs|hour|hours)", text)
if match:
return float(match.group(1))
match = re.fullmatch(r"(\d+(?:\.\d+)?)\s*(d|day|days)", text)
if match:
return float(match.group(1)) * 24.0
match = re.fullmatch(r"(\d+(?:\.\d+)?)\s*(w|week|weeks)", text)
if match:
return float(match.group(1)) * 168.0
raise ValueError(f"Unsupported duration: {value}")
def normalize_requested_labels(labels, all_labels=False):
out = []
seen = set()
for raw in labels:
for piece in raw.split(","):
label = piece.strip()
if not label:
continue
key = label.casefold()
if key not in seen:
out.append(label)
seen.add(key)
phrase = " ".join(label.casefold() for label in out)
if all_labels or phrase in ALL_LABEL_PHRASES:
return [], True
if not out:
raise ValueError(
"At least one feature-area label is required, or use --all-labels"
)
return out, False
def quote_label(label):
if re.fullmatch(r"[A-Za-z0-9_.:-]+", label):
return f"label:{label}"
escaped = label.replace('"', '\\"')
return f'label:"{escaped}"'
def build_search_queries(
repo, owner_labels, since, kind_labels=QUALIFYING_KIND_LABELS, all_labels=False
):
since_date = since.date().isoformat()
queries = []
if all_labels:
for kind_label in kind_labels:
queries.append(
" ".join(
[
f"repo:{repo}",
"is:issue",
f"updated:>={since_date}",
quote_label(kind_label),
]
)
)
return queries
for owner_label in owner_labels:
for kind_label in kind_labels:
queries.append(
" ".join(
[
f"repo:{repo}",
"is:issue",
f"updated:>={since_date}",
quote_label(owner_label),
quote_label(kind_label),
]
)
)
return queries
def _format_gh_error(cmd, err):
stdout = (err.stdout or "").strip()
stderr = (err.stderr or "").strip()
parts = [f"GitHub CLI command failed: {' '.join(cmd)}"]
if stdout:
parts.append(f"stdout: {stdout}")
if stderr:
parts.append(f"stderr: {stderr}")
return "\n".join(parts)
def gh_json(args):
cmd = ["gh", *args]
try:
proc = subprocess.run(cmd, check=True, capture_output=True, text=True)
except FileNotFoundError as err:
raise GhCommandError("`gh` command not found") from err
except subprocess.CalledProcessError as err:
raise GhCommandError(_format_gh_error(cmd, err)) from err
raw = proc.stdout.strip()
if not raw:
return None
try:
return json.loads(raw)
except json.JSONDecodeError as err:
raise GhCommandError(
f"Failed to parse JSON from gh output for {' '.join(args)}"
) from err
def gh_text(args):
cmd = ["gh", *args]
try:
proc = subprocess.run(cmd, check=True, capture_output=True, text=True)
except (FileNotFoundError, subprocess.CalledProcessError):
return ""
return proc.stdout.strip()
def git_head():
try:
proc = subprocess.run(
["git", "rev-parse", "--short=12", "HEAD"],
check=True,
capture_output=True,
text=True,
)
except (FileNotFoundError, subprocess.CalledProcessError):
return None
return proc.stdout.strip() or None
def skill_relative_path():
try:
return str(Path(__file__).resolve().relative_to(Path.cwd().resolve()))
except ValueError:
return str(Path(__file__).resolve())
def gh_api_list_paginated(endpoint, per_page=100, max_pages=None, with_metadata=False):
items = []
page = 1
truncated = False
while True:
sep = "&" if "?" in endpoint else "?"
page_endpoint = f"{endpoint}{sep}per_page={per_page}&page={page}"
payload = gh_json(["api", page_endpoint])
if payload is None:
break
if not isinstance(payload, list):
raise GhCommandError(f"Unexpected paginated payload from gh api {endpoint}")
items.extend(payload)
if len(payload) < per_page:
break
if max_pages is not None and page >= max_pages:
truncated = True
break
page += 1
if with_metadata:
return {
"items": items,
"truncated": truncated,
"pages": page,
"max_pages": max_pages,
}
return items
def search_issue_numbers(queries, limit):
numbers = {}
for query in queries:
page = 1
seen_for_query = 0
while True:
payload = gh_json(
[
"api",
"search/issues",
"-X",
"GET",
"-f",
f"q={query}",
"-f",
"sort=updated",
"-f",
"order=desc",
"-f",
"per_page=100",
"-f",
f"page={page}",
]
)
if not isinstance(payload, dict):
raise GhCommandError("Unexpected payload from GitHub issue search")
items = payload.get("items") or []
if not isinstance(items, list):
raise GhCommandError("Expected search `items` to be a list")
for item in items:
if not isinstance(item, dict):
continue
number = item.get("number")
if isinstance(number, int):
numbers[number] = str(item.get("updated_at") or "")
seen_for_query += 1
if len(items) < 100 or seen_for_query >= limit:
break
page += 1
ordered = sorted(
numbers, key=lambda number: (numbers[number], number), reverse=True
)
return ordered[:limit]
def fetch_issue(repo, number):
payload = gh_json(["api", f"repos/{repo}/issues/{number}"])
if not isinstance(payload, dict):
raise GhCommandError(f"Unexpected issue payload for #{number}")
return payload
def fetch_comments(repo, number, since=None, max_pages=None):
endpoint = f"repos/{repo}/issues/{number}/comments"
if since is not None:
endpoint = f"{endpoint}?since={quote(format_timestamp(since), safe='')}"
return gh_api_list_paginated(
endpoint,
max_pages=max_pages,
with_metadata=True,
)
def fetch_reactions_for_item(endpoint, item):
if reaction_summary(item)["total"] <= 0:
return []
return gh_api_list_paginated(endpoint)
def fetch_comment_reactions(repo, comments):
reactions_by_comment_id = {}
for comment in comments:
comment_id = comment.get("id")
if comment_id in (None, ""):
continue
endpoint = f"repos/{repo}/issues/comments/{comment_id}/reactions"
reactions_by_comment_id[comment_id] = fetch_reactions_for_item(
endpoint, comment
)
return reactions_by_comment_id
def extract_login(user_obj):
if isinstance(user_obj, dict):
return str(user_obj.get("login") or "")
return ""
def is_bot_login(login):
return bool(login) and login.lower().endswith("[bot]")
def is_human_user(user_obj):
login = extract_login(user_obj)
return bool(login) and not is_bot_login(login)
def label_names(issue):
labels = []
for label in issue.get("labels") or []:
if isinstance(label, dict) and label.get("name"):
labels.append(str(label["name"]))
return sorted(labels, key=str.casefold)
def matching_labels(labels, requested):
labels_by_key = {label.casefold(): label for label in labels}
return [label for label in requested if label.casefold() in labels_by_key]
def area_labels(labels):
kind_keys = {label.casefold() for label in QUALIFYING_KIND_LABELS}
return [label for label in labels if label.casefold() not in kind_keys]
def attention_thresholds_for_window(window_hours):
if window_hours <= 0:
raise ValueError("window_hours must be > 0")
window_hours = round(window_hours, 6)
scale = window_hours / BASE_ATTENTION_WINDOW_HOURS
elevated = max(1, math.ceil(ONE_ATTENTION_INTERACTION_THRESHOLD * scale))
very_high = max(
elevated + 1, math.ceil(TWO_ATTENTION_INTERACTION_THRESHOLD * scale)
)
return {
"base_window_hours": BASE_ATTENTION_WINDOW_HOURS,
"window_hours": round(window_hours, 3),
"scale": round(scale, 3),
"elevated": elevated,
"very_high": very_high,
}
def attention_level_for(user_interactions, attention_thresholds=None):
thresholds = attention_thresholds or attention_thresholds_for_window(
BASE_ATTENTION_WINDOW_HOURS
)
if user_interactions >= thresholds["very_high"]:
return 2
if user_interactions >= thresholds["elevated"]:
return 1
return 0
def attention_marker_for(user_interactions, attention_thresholds=None):
return "🔥" * attention_level_for(user_interactions, attention_thresholds)
def reaction_summary(item):
reactions = item.get("reactions")
if not isinstance(reactions, dict):
return {"total": 0, "counts": {}}
counts = {}
for key in REACTION_KEYS:
value = reactions.get(key, 0)
if isinstance(value, int) and value:
counts[key] = value
total = reactions.get("total_count")
if not isinstance(total, int):
total = sum(counts.values())
return {"total": total, "counts": counts}
def reaction_event_summary(reactions, since, until):
counts = {}
total = 0
for reaction in reactions or []:
if not isinstance(reaction, dict):
continue
if not is_in_window(str(reaction.get("created_at") or ""), since, until):
continue
if not is_human_user(reaction.get("user")):
continue
content = str(reaction.get("content") or "")
if not content:
continue
counts[content] = counts.get(content, 0) + 1
total += 1
return {
"total": total,
"counts": counts,
"upvotes": counts.get("+1", 0),
}
def compact_text(value, limit):
text = re.sub(r"\s+", " ", str(value or "")).strip()
if limit <= 0:
return ""
if len(text) <= limit:
return text
return f"{text[: max(limit - 1, 0)].rstrip()}..."
def clean_title_for_description(title):
cleaned = re.sub(r"\s+", " ", str(title or "")).strip()
cleaned = re.sub(
r"^(codex(?: desktop| app|\.app| cli)?|desktop|windows codex app)\s*[:,-]\s*",
"",
cleaned,
flags=re.IGNORECASE,
)
cleaned = re.sub(r"^on windows,\s*", "Windows: ", cleaned, flags=re.IGNORECASE)
cleaned = cleaned.strip(" -:;")
return compact_text(cleaned, 80) or "Issue needs owner review"
def issue_description(issue):
return clean_title_for_description(issue.get("title"))
def is_in_window(timestamp, since, until):
parsed = parse_timestamp(timestamp, "timestamp")
if parsed is None:
return False
return since <= parsed < until
def summarize_comment(
comment, comment_chars, reaction_events=None, since=None, until=None
):
reactions = reaction_summary(comment)
new_reactions = (
reaction_event_summary(reaction_events, since, until)
if since is not None and until is not None
else {"total": 0, "counts": {}, "upvotes": 0}
)
human_user_interaction = is_human_user(comment.get("user"))
return {
"id": comment.get("id"),
"author": extract_login(comment.get("user")),
"author_association": str(comment.get("author_association") or ""),
"created_at": str(comment.get("created_at") or ""),
"updated_at": str(comment.get("updated_at") or ""),
"url": str(comment.get("html_url") or ""),
"human_user_interaction": human_user_interaction,
"reactions": reactions["counts"],
"reaction_total": reactions["total"],
"new_reactions": new_reactions["total"],
"new_upvotes": new_reactions["upvotes"],
"new_reaction_counts": new_reactions["counts"],
"body_excerpt": compact_text(comment.get("body"), comment_chars),
}
def summarize_issue(
issue,
comments,
requested_labels,
since,
until,
body_chars,
comment_chars,
issue_reaction_events=None,
comment_reactions_by_id=None,
all_labels=False,
comments_hydration=None,
attention_thresholds=None,
):
labels = label_names(issue)
labels_by_key = {label.casefold() for label in labels}
kind_labels = [
label for label in QUALIFYING_KIND_LABELS if label.casefold() in labels_by_key
]
if all_labels:
owner_labels = area_labels(labels) or ["unlabeled"]
else:
owner_labels = matching_labels(labels, requested_labels)
if not kind_labels or not owner_labels:
return None
updated_at = str(issue.get("updated_at") or "")
if not is_in_window(updated_at, since, until):
return None
new_issue = is_in_window(str(issue.get("created_at") or ""), since, until)
comment_reactions_by_id = comment_reactions_by_id or {}
new_comments = [
summarize_comment(
comment,
comment_chars,
reaction_events=comment_reactions_by_id.get(comment.get("id")),
since=since,
until=until,
)
for comment in comments
if is_in_window(str(comment.get("created_at") or ""), since, until)
]
new_comments.sort(key=lambda item: (item["created_at"], str(item["id"])))
issue_reactions = reaction_summary(issue)
issue_reaction_events_summary = reaction_event_summary(
issue_reaction_events, since, until
)
comment_reaction_events_summary = reaction_event_summary(
[
reaction
for reactions in comment_reactions_by_id.values()
for reaction in reactions
],
since,
until,
)
new_reactions = (
issue_reaction_events_summary["total"]
+ comment_reaction_events_summary["total"]
)
new_upvotes = (
issue_reaction_events_summary["upvotes"]
+ comment_reaction_events_summary["upvotes"]
)
all_comment_reaction_total = sum(
reaction_summary(comment)["total"] for comment in comments
)
new_comment_reaction_total = sum(
comment["reaction_total"] for comment in new_comments
)
new_issue_user_interaction = new_issue and is_human_user(issue.get("user"))
new_comment_user_interactions = sum(
1 for comment in new_comments if comment["human_user_interaction"]
)
user_interactions = (
int(new_issue_user_interaction) + new_comment_user_interactions + new_reactions
)
attention_level = attention_level_for(user_interactions, attention_thresholds)
attention_marker = attention_marker_for(user_interactions, attention_thresholds)
updated_without_visible_new_post = (
not new_issue and not new_comments and new_reactions == 0
)
engagement_score = (
len(new_comments) * 3
+ new_reactions
+ issue_reactions["total"]
+ new_comment_reaction_total
+ min(int(issue.get("comments") or len(comments) or 0), 10)
)
return {
"number": issue.get("number"),
"title": str(issue.get("title") or ""),
"description": issue_description(issue),
"url": str(issue.get("html_url") or ""),
"state": str(issue.get("state") or ""),
"author": extract_login(issue.get("user")),
"author_association": str(issue.get("author_association") or ""),
"created_at": str(issue.get("created_at") or ""),
"updated_at": updated_at,
"labels": labels,
"kind_labels": kind_labels,
"owner_labels": owner_labels,
"comments_total": int(issue.get("comments") or len(comments) or 0),
"comments_hydration": comments_hydration
or {
"fetched": len(comments),
"since": None,
"truncated": False,
"max_pages": None,
},
"issue_reactions": issue_reactions["counts"],
"issue_reaction_total": issue_reactions["total"],
"comment_reaction_total": all_comment_reaction_total,
"new_comment_reaction_total": new_comment_reaction_total,
"new_issue_reactions": issue_reaction_events_summary["total"],
"new_issue_upvotes": issue_reaction_events_summary["upvotes"],
"new_comment_reactions": comment_reaction_events_summary["total"],
"new_comment_upvotes": comment_reaction_events_summary["upvotes"],
"new_reactions": new_reactions,
"new_upvotes": new_upvotes,
"user_interactions": user_interactions,
"attention": attention_level > 0,
"attention_level": attention_level,
"attention_marker": attention_marker,
"engagement_score": engagement_score,
"activity": {
"new_issue": new_issue,
"new_comments": len(new_comments),
"new_human_comments": new_comment_user_interactions,
"new_reactions": new_reactions,
"new_upvotes": new_upvotes,
"updated_without_visible_new_post": updated_without_visible_new_post,
},
"body_excerpt": compact_text(issue.get("body"), body_chars),
"new_comments": new_comments,
}
def count_by_label(issues, labels):
out = {}
for label in labels:
matching = [issue for issue in issues if label in issue["owner_labels"]]
out[label] = {
"issues": len(matching),
"new_issues": sum(
1 for issue in matching if issue["activity"]["new_issue"]
),
"new_comments": sum(
issue["activity"]["new_comments"] for issue in matching
),
}
return out
def count_by_kind(issues):
out = {}
for kind in QUALIFYING_KIND_LABELS:
matching = [issue for issue in issues if kind in issue["kind_labels"]]
out[kind] = {
"issues": len(matching),
"new_issues": sum(
1 for issue in matching if issue["activity"]["new_issue"]
),
"new_comments": sum(
issue["activity"]["new_comments"] for issue in matching
),
}
return out
def hot_items(issues, limit=8):
ranked = sorted(
issues,
key=lambda issue: (
issue["attention"],
issue["attention_level"],
issue["user_interactions"],
issue["engagement_score"],
issue["activity"]["new_comments"],
issue["issue_reaction_total"] + issue["comment_reaction_total"],
issue["updated_at"],
),
reverse=True,
)
return [
{
"number": issue["number"],
"title": issue["title"],
"url": issue["url"],
"owner_labels": issue["owner_labels"],
"kind_labels": issue["kind_labels"],
"attention": issue["attention"],
"attention_level": issue["attention_level"],
"attention_marker": issue["attention_marker"],
"user_interactions": issue["user_interactions"],
"new_reactions": issue["new_reactions"],
"new_upvotes": issue["new_upvotes"],
"engagement_score": issue["engagement_score"],
"new_comments": issue["activity"]["new_comments"],
"reaction_total": issue["issue_reaction_total"]
+ issue["comment_reaction_total"],
}
for issue in ranked[:limit]
if issue["engagement_score"] > 0
]
def ranked_digest_issues(issues):
return sorted(
issues,
key=lambda issue: (
issue["attention"],
issue["attention_level"],
issue["user_interactions"],
issue["engagement_score"],
issue["activity"]["new_comments"],
issue["updated_at"],
),
reverse=True,
)
def digest_rows(issues, limit=10, ref_map=None):
ranked = ranked_digest_issues(issues)
if ref_map is None:
ref_map = {issue["number"]: ref for ref, issue in enumerate(ranked, start=1)}
rows = []
for issue in ranked[:limit]:
ref = ref_map[issue["number"]]
reaction_total = issue["issue_reaction_total"] + issue["comment_reaction_total"]
rows.append(
{
"ref": ref,
"ref_markdown": f"[{ref}]({issue['url']})",
"marker": issue["attention_marker"],
"attention_marker": issue["attention_marker"],
"number": issue["number"],
"description": issue["description"],
"title": issue["title"],
"url": issue["url"],
"area": ", ".join(issue["owner_labels"]),
"kind": ", ".join(issue["kind_labels"]),
"state": issue["state"],
"interactions": issue["user_interactions"],
"user_interactions": issue["user_interactions"],
"new_reactions": issue["new_reactions"],
"new_upvotes": issue["new_upvotes"],
"current_reactions": reaction_total,
}
)
return rows
def issue_ref_markdown(issue, ref_map):
ref = ref_map[issue["number"]]
return f"[{ref}]({issue['url']})"
def summary_inputs(issues, limit=80, ref_map=None):
ranked = ranked_digest_issues(issues)
if ref_map is None:
ref_map = {issue["number"]: ref for ref, issue in enumerate(ranked, start=1)}
rows = []
for issue in ranked[:limit]:
rows.append(
{
"ref": ref_map[issue["number"]],
"ref_markdown": issue_ref_markdown(issue, ref_map),
"number": issue["number"],
"title": issue["title"],
"description": issue["description"],
"url": issue["url"],
"labels": issue["labels"],
"owner_labels": issue["owner_labels"],
"kind_labels": issue["kind_labels"],
"state": issue.get("state", ""),
"attention_marker": issue.get("attention_marker", ""),
"interactions": issue["user_interactions"],
"new_comments": issue["activity"].get("new_comments", 0),
"new_reactions": issue.get("new_reactions", 0),
"new_upvotes": issue.get("new_upvotes", 0),
"current_reactions": issue.get("issue_reaction_total", 0)
+ issue.get("comment_reaction_total", 0),
}
)
return rows
def collect_digest(args):
since, until = resolve_window(args)
window_hours = (until - since).total_seconds() / 3600
attention_thresholds = attention_thresholds_for_window(window_hours)
requested_labels, all_labels = normalize_requested_labels(
args.labels, all_labels=args.all_labels
)
queries = build_search_queries(
args.repo, requested_labels, since, all_labels=all_labels
)
numbers = search_issue_numbers(queries, args.limit_issues)
gh_version_output = gh_text(["--version"])
issues = []
max_comment_pages = None if args.max_comment_pages <= 0 else args.max_comment_pages
for number in numbers:
issue = fetch_issue(args.repo, number)
comments_since = None if args.fetch_all_comments else since
comments_payload = fetch_comments(
args.repo,
number,
since=comments_since,
max_pages=max_comment_pages,
)
comments = comments_payload["items"]
issue_reaction_events = fetch_reactions_for_item(
f"repos/{args.repo}/issues/{number}/reactions", issue
)
comment_reactions_by_id = fetch_comment_reactions(args.repo, comments)
comments_hydration = {
"fetched": len(comments),
"total": int(issue.get("comments") or len(comments) or 0),
"since": format_timestamp(comments_since) if comments_since else None,
"truncated": comments_payload["truncated"],
"max_pages": comments_payload["max_pages"],
"fetch_all_comments": args.fetch_all_comments,
}
summary = summarize_issue(
issue,
comments,
requested_labels,
since,
until,
args.body_chars,
args.comment_chars,
issue_reaction_events=issue_reaction_events,
comment_reactions_by_id=comment_reactions_by_id,
all_labels=all_labels,
comments_hydration=comments_hydration,
attention_thresholds=attention_thresholds,
)
if summary is not None:
issues.append(summary)
issues.sort(
key=lambda issue: (issue["updated_at"], int(issue["number"] or 0)), reverse=True
)
totals = {
"candidate_issues": len(numbers),
"included_issues": len(issues),
"new_issues": sum(1 for issue in issues if issue["activity"]["new_issue"]),
"issues_with_new_comments": sum(
1 for issue in issues if issue["activity"]["new_comments"] > 0
),
"new_comments": sum(issue["activity"]["new_comments"] for issue in issues),
"comments_fetched": sum(
issue["comments_hydration"]["fetched"] for issue in issues
),
"issues_with_truncated_comment_hydration": sum(
1 for issue in issues if issue["comments_hydration"]["truncated"]
),
"updated_without_visible_new_post": sum(
1
for issue in issues
if issue["activity"]["updated_without_visible_new_post"]
),
"issue_reactions_current_total": sum(
issue["issue_reaction_total"] for issue in issues
),
"comment_reactions_current_total": sum(
issue["comment_reaction_total"] for issue in issues
),
"new_reactions": sum(issue["new_reactions"] for issue in issues),
"new_upvotes": sum(issue["new_upvotes"] for issue in issues),
"user_interactions": sum(issue["user_interactions"] for issue in issues),
}
ranked = ranked_digest_issues(issues)
ref_map = {issue["number"]: ref for ref, issue in enumerate(ranked, start=1)}
filter_label = "all" if all_labels else requested_labels
return {
"generated_at": format_timestamp(datetime.now(timezone.utc)),
"source": {
"repo": args.repo,
"skill": "codex-issue-digest",
"collector": skill_relative_path(),
"script_version": SCRIPT_VERSION,
"git_head": git_head(),
"gh_version": gh_version_output.splitlines()[0]
if gh_version_output
else None,
},
"window": {
"since": format_timestamp(since),
"until": format_timestamp(until),
"hours": round(window_hours, 3),
},
"attention_thresholds": attention_thresholds,
"filters": {
"owner_labels": filter_label,
"all_labels": all_labels,
"kind_labels": list(QUALIFYING_KIND_LABELS),
},
"collection_notes": [
"Issues are selected when they currently have bug or enhancement plus at least one requested owner label and were updated during the window.",
"By default, issue comments are fetched with since=window_start and a max page cap to avoid long historical threads; use --fetch-all-comments when exhaustive comment history is needed.",
"New issue comments are filtered by comment creation time within the window from the fetched comment set.",
"Reaction events are counted by GitHub reaction created_at timestamps for hydrated issues and fetched comments.",
"Current reaction totals are standing engagement signals; new_reactions and new_upvotes are windowed activity.",
"The collector does not assign semantic clusters; use summary_inputs as model-ready evidence for report-time clustering.",
"Pure reaction-only issues may be missed if GitHub issue search does not surface them via updated_at.",
"Issues updated during the window without a new issue body or new comment are retained because label/status edits can still be useful owner signals.",
],
"totals": totals,
"by_owner_label": count_by_label(
issues,
sorted(
{area for issue in issues for area in issue["owner_labels"]},
key=str.casefold,
)
if all_labels
else requested_labels,
),
"by_kind_label": count_by_kind(issues),
"hot_items": hot_items(issues),
"summary_inputs": summary_inputs(issues, ref_map=ref_map),
"digest_rows": digest_rows(issues, ref_map=ref_map),
"issues": issues,
}
def main():
args = parse_args()
try:
digest = collect_digest(args)
except (GhCommandError, RuntimeError, ValueError) as err:
sys.stderr.write(f"collect_issue_digest.py error: {err}\n")
return 1
sys.stdout.write(json.dumps(digest, indent=2, sort_keys=True) + "\n")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,685 +0,0 @@
import importlib.util
from datetime import timezone
from pathlib import Path
MODULE_PATH = Path(__file__).with_name("collect_issue_digest.py")
MODULE_SPEC = importlib.util.spec_from_file_location(
"collect_issue_digest", MODULE_PATH
)
collect_issue_digest = importlib.util.module_from_spec(MODULE_SPEC)
assert MODULE_SPEC.loader is not None
MODULE_SPEC.loader.exec_module(collect_issue_digest)
def test_build_search_queries_uses_each_owner_and_kind_label():
since = collect_issue_digest.parse_timestamp("2026-04-25T12:34:56Z", "--since")
queries = collect_issue_digest.build_search_queries(
"openai/codex", ["tui", "exec"], since
)
assert queries == [
"repo:openai/codex is:issue updated:>=2026-04-25 label:tui label:bug",
"repo:openai/codex is:issue updated:>=2026-04-25 label:tui label:enhancement",
"repo:openai/codex is:issue updated:>=2026-04-25 label:exec label:bug",
"repo:openai/codex is:issue updated:>=2026-04-25 label:exec label:enhancement",
]
def test_build_search_queries_can_scan_all_labels():
since = collect_issue_digest.parse_timestamp("2026-04-25T12:34:56Z", "--since")
queries = collect_issue_digest.build_search_queries(
"openai/codex", [], since, all_labels=True
)
assert queries == [
"repo:openai/codex is:issue updated:>=2026-04-25 label:bug",
"repo:openai/codex is:issue updated:>=2026-04-25 label:enhancement",
]
def test_normalize_requested_labels_accepts_all_area_phrases():
assert collect_issue_digest.normalize_requested_labels(["all", "areas"]) == (
[],
True,
)
assert collect_issue_digest.normalize_requested_labels(["all-labels"]) == (
[],
True,
)
def test_search_issue_numbers_requests_updated_sort(monkeypatch):
calls = []
def fake_gh_json(args):
calls.append(args)
return {
"items": [
{"number": 1, "updated_at": "2026-04-25T00:00:00Z"},
]
}
monkeypatch.setattr(collect_issue_digest, "gh_json", fake_gh_json)
assert collect_issue_digest.search_issue_numbers(["query"], limit=10) == [1]
assert "-f" in calls[0]
assert "sort=updated" in calls[0]
assert "order=desc" in calls[0]
def test_search_issue_numbers_applies_limit_per_query(monkeypatch):
calls = []
def fake_gh_json(args):
calls.append(args)
query = next(
value.removeprefix("q=") for value in args if value.startswith("q=")
)
page = int(
next(
value.removeprefix("page=")
for value in args
if value.startswith("page=")
)
)
base = 10_000 if query == "first" else 20_000
offset = (page - 1) * 100
return {
"items": [
{
"number": base + offset + idx,
"updated_at": f"2026-04-25T00:{idx:02d}:00Z",
}
for idx in range(100)
]
}
monkeypatch.setattr(collect_issue_digest, "gh_json", fake_gh_json)
collect_issue_digest.search_issue_numbers(["first", "second"], limit=150)
queried_pages = [
(
next(
value.removeprefix("q=") for value in args if value.startswith("q=")
),
next(
value.removeprefix("page=")
for value in args
if value.startswith("page=")
),
)
for args in calls
]
assert queried_pages == [
("first", "1"),
("first", "2"),
("second", "1"),
("second", "2"),
]
def test_summarize_issue_keeps_new_comments_and_reaction_signals():
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
issue = {
"number": 123,
"title": "TUI does not redraw",
"html_url": "https://github.com/openai/codex/issues/123",
"state": "open",
"created_at": "2026-04-24T20:00:00Z",
"updated_at": "2026-04-25T10:00:00Z",
"user": {"login": "alice"},
"author_association": "NONE",
"comments": 2,
"body": "The terminal freezes after resize.",
"labels": [{"name": "bug"}, {"name": "tui"}],
"reactions": {"total_count": 3, "+1": 2, "rocket": 1},
}
comments = [
{
"id": 1,
"created_at": "2026-04-25T11:00:00Z",
"updated_at": "2026-04-25T11:00:00Z",
"html_url": "https://github.com/openai/codex/issues/123#issuecomment-1",
"user": {"login": "bob"},
"author_association": "MEMBER",
"body": "I can reproduce this on main.",
"reactions": {"total_count": 4, "heart": 1, "+1": 3},
},
{
"id": 2,
"created_at": "2026-04-24T11:00:00Z",
"updated_at": "2026-04-24T11:00:00Z",
"html_url": "https://github.com/openai/codex/issues/123#issuecomment-2",
"user": {"login": "carol"},
"author_association": "NONE",
"body": "Older comment.",
"reactions": {"total_count": 1, "eyes": 1},
},
]
summary = collect_issue_digest.summarize_issue(
issue,
comments,
["tui", "exec"],
since,
until,
body_chars=200,
comment_chars=200,
)
assert summary == {
"number": 123,
"title": "TUI does not redraw",
"description": "TUI does not redraw",
"url": "https://github.com/openai/codex/issues/123",
"state": "open",
"author": "alice",
"author_association": "NONE",
"created_at": "2026-04-24T20:00:00Z",
"updated_at": "2026-04-25T10:00:00Z",
"labels": ["bug", "tui"],
"kind_labels": ["bug"],
"owner_labels": ["tui"],
"comments_total": 2,
"comments_hydration": {
"fetched": 2,
"since": None,
"truncated": False,
"max_pages": None,
},
"issue_reactions": {"+1": 2, "rocket": 1},
"issue_reaction_total": 3,
"comment_reaction_total": 5,
"new_comment_reaction_total": 4,
"new_issue_reactions": 0,
"new_issue_upvotes": 0,
"new_comment_reactions": 0,
"new_comment_upvotes": 0,
"new_reactions": 0,
"new_upvotes": 0,
"user_interactions": 1,
"attention": False,
"attention_level": 0,
"attention_marker": "",
"engagement_score": 12,
"activity": {
"new_issue": False,
"new_comments": 1,
"new_human_comments": 1,
"new_reactions": 0,
"new_upvotes": 0,
"updated_without_visible_new_post": False,
},
"body_excerpt": "The terminal freezes after resize.",
"new_comments": [
{
"id": 1,
"author": "bob",
"author_association": "MEMBER",
"created_at": "2026-04-25T11:00:00Z",
"updated_at": "2026-04-25T11:00:00Z",
"url": "https://github.com/openai/codex/issues/123#issuecomment-1",
"human_user_interaction": True,
"reactions": {"+1": 3, "heart": 1},
"reaction_total": 4,
"new_reactions": 0,
"new_upvotes": 0,
"new_reaction_counts": {},
"body_excerpt": "I can reproduce this on main.",
}
],
}
def test_summarize_issue_filters_non_owner_or_non_kind_labels():
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
base_issue = {
"number": 1,
"title": "Question",
"created_at": "2026-04-25T01:00:00Z",
"updated_at": "2026-04-25T01:00:00Z",
"labels": [{"name": "question"}, {"name": "tui"}],
}
assert (
collect_issue_digest.summarize_issue(
base_issue,
[],
["tui"],
since,
until,
body_chars=100,
comment_chars=100,
)
is None
)
issue_without_owner = dict(base_issue)
issue_without_owner["labels"] = [{"name": "bug"}, {"name": "app"}]
assert (
collect_issue_digest.summarize_issue(
issue_without_owner,
[],
["tui"],
since,
until,
body_chars=100,
comment_chars=100,
)
is None
)
def test_resolve_window_defaults_to_previous_hours():
class Args:
since = None
until = "2026-04-26T12:00:00Z"
window_hours = 24
since, until = collect_issue_digest.resolve_window(Args())
assert since.isoformat() == "2026-04-25T12:00:00+00:00"
assert until.tzinfo == timezone.utc
def test_parse_duration_hours_accepts_common_phrases():
assert collect_issue_digest.parse_duration_hours("past week") == 168
assert collect_issue_digest.parse_duration_hours("48h") == 48
assert collect_issue_digest.parse_duration_hours("2 days") == 48
assert collect_issue_digest.parse_duration_hours("1w") == 168
def test_attention_thresholds_scale_by_window_length():
one_day = collect_issue_digest.attention_thresholds_for_window(24)
assert one_day["elevated"] == 5
assert one_day["very_high"] == 10
half_day = collect_issue_digest.attention_thresholds_for_window(12)
assert half_day["elevated"] == 3
assert half_day["very_high"] == 5
week = collect_issue_digest.attention_thresholds_for_window(168)
assert week["elevated"] == 35
assert week["very_high"] == 70
assert collect_issue_digest.attention_marker_for(34, week) == ""
assert collect_issue_digest.attention_marker_for(35, week) == "🔥"
assert collect_issue_digest.attention_marker_for(70, week) == "🔥🔥"
def test_fetch_comments_uses_since_filter_and_page_cap(monkeypatch):
calls = []
def fake_gh_json(args):
calls.append(args)
return [{"id": idx} for idx in range(100)]
monkeypatch.setattr(collect_issue_digest, "gh_json", fake_gh_json)
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
payload = collect_issue_digest.fetch_comments(
"openai/codex", 123, since=since, max_pages=1
)
assert len(payload["items"]) == 100
assert payload["truncated"] is True
assert payload["max_pages"] == 1
assert calls == [
[
"api",
"repos/openai/codex/issues/123/comments?since=2026-04-25T00%3A00%3A00Z&per_page=100&page=1",
]
]
def test_issue_description_prefers_title_over_body_noise():
issue = {
"title": "Codex.app GUI: MCP child processes not reaped after task completion",
"body": "A later crash mention should not override the title-level symptom.",
"labels": [{"name": "app"}, {"name": "bug"}],
}
description = collect_issue_digest.issue_description(issue)
assert "MCP child processes" in description
assert "crash" not in description.casefold()
def test_attention_markers_count_human_user_interactions():
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
issue = {
"number": 456,
"title": "Agent context is exploding",
"html_url": "https://github.com/openai/codex/issues/456",
"state": "open",
"created_at": "2026-04-25T01:00:00Z",
"updated_at": "2026-04-25T12:00:00Z",
"user": {"login": "alice"},
"labels": [{"name": "bug"}, {"name": "agent"}],
}
comments = [
{
"id": idx,
"created_at": "2026-04-25T02:00:00Z",
"updated_at": "2026-04-25T02:00:00Z",
"user": {"login": f"user-{idx}"},
"body": "same here",
}
for idx in range(4)
]
comments.append(
{
"id": 99,
"created_at": "2026-04-25T02:00:00Z",
"updated_at": "2026-04-25T02:00:00Z",
"user": {"login": "github-actions[bot]"},
"body": "duplicate bot note",
}
)
summary = collect_issue_digest.summarize_issue(
issue,
comments,
["agent"],
since,
until,
body_chars=100,
comment_chars=100,
)
assert summary["user_interactions"] == 5
assert summary["activity"]["new_human_comments"] == 4
assert summary["attention"] is True
assert summary["attention_level"] == 1
assert summary["attention_marker"] == "🔥"
issue["created_at"] = "2026-04-24T01:00:00Z"
comments.extend(
{
"id": idx,
"created_at": "2026-04-25T03:00:00Z",
"updated_at": "2026-04-25T03:00:00Z",
"user": {"login": f"extra-user-{idx}"},
"body": "also seeing this",
}
for idx in range(100, 106)
)
summary = collect_issue_digest.summarize_issue(
issue,
comments,
["agent"],
since,
until,
body_chars=100,
comment_chars=100,
)
assert summary["user_interactions"] == 10
assert summary["attention_level"] == 2
assert summary["attention_marker"] == "🔥🔥"
def test_reactions_count_toward_attention_markers():
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
issue = {
"number": 789,
"title": "Support 1M token context",
"html_url": "https://github.com/openai/codex/issues/789",
"state": "open",
"created_at": "2026-04-24T01:00:00Z",
"updated_at": "2026-04-25T12:00:00Z",
"user": {"login": "alice"},
"labels": [{"name": "enhancement"}, {"name": "context"}],
"reactions": {"total_count": 20, "+1": 20},
}
comments = [
{
"id": 1,
"created_at": "2026-04-25T02:00:00Z",
"updated_at": "2026-04-25T02:00:00Z",
"user": {"login": "commenter"},
"body": "please",
"reactions": {"total_count": 2, "+1": 2},
}
]
issue_reactions = [
{
"content": "+1",
"created_at": "2026-04-25T03:00:00Z",
"user": {"login": f"reactor-{idx}"},
}
for idx in range(18)
]
comment_reactions_by_id = {
1: [
{
"content": "heart",
"created_at": "2026-04-25T04:00:00Z",
"user": {"login": "human-reactor"},
},
{
"content": "+1",
"created_at": "2026-04-25T04:00:00Z",
"user": {"login": "github-actions[bot]"},
},
]
}
summary = collect_issue_digest.summarize_issue(
issue,
comments,
["context"],
since,
until,
body_chars=100,
comment_chars=100,
issue_reaction_events=issue_reactions,
comment_reactions_by_id=comment_reactions_by_id,
)
assert summary["new_reactions"] == 19
assert summary["new_upvotes"] == 18
assert summary["user_interactions"] == 20
assert summary["attention_level"] == 2
assert summary["attention_marker"] == "🔥🔥"
assert summary["new_comments"][0]["new_reactions"] == 1
assert summary["new_comments"][0]["new_upvotes"] == 0
def test_digest_rows_are_table_ready_with_concise_descriptions():
rows = collect_issue_digest.digest_rows(
[
{
"number": 1,
"title": "Quiet bug",
"description": "Quiet bug",
"url": "https://github.com/openai/codex/issues/1",
"owner_labels": ["context"],
"kind_labels": ["bug"],
"state": "open",
"attention": False,
"attention_level": 0,
"attention_marker": "",
"user_interactions": 1,
"new_reactions": 0,
"new_upvotes": 0,
"engagement_score": 3,
"issue_reaction_total": 0,
"comment_reaction_total": 0,
"updated_at": "2026-04-25T01:00:00Z",
"activity": {
"new_issue": True,
"new_comments": 0,
"new_reactions": 0,
"updated_without_visible_new_post": False,
},
},
{
"number": 2,
"title": "Busy bug",
"description": "High-volume bug report",
"url": "https://github.com/openai/codex/issues/2",
"owner_labels": ["agent"],
"kind_labels": ["bug"],
"state": "open",
"attention": True,
"attention_level": 1,
"attention_marker": "🔥",
"user_interactions": 17,
"new_reactions": 3,
"new_upvotes": 2,
"engagement_score": 20,
"issue_reaction_total": 5,
"comment_reaction_total": 2,
"updated_at": "2026-04-25T02:00:00Z",
"activity": {
"new_issue": False,
"new_comments": 16,
"new_reactions": 3,
"updated_without_visible_new_post": False,
},
},
]
)
assert rows[0] == {
"ref": 1,
"ref_markdown": "[1](https://github.com/openai/codex/issues/2)",
"marker": "🔥",
"attention_marker": "🔥",
"number": 2,
"description": "High-volume bug report",
"title": "Busy bug",
"url": "https://github.com/openai/codex/issues/2",
"area": "agent",
"kind": "bug",
"state": "open",
"interactions": 17,
"user_interactions": 17,
"new_reactions": 3,
"new_upvotes": 2,
"current_reactions": 7,
}
def test_summary_inputs_are_model_ready_without_preclustering():
issues = [
{
"number": 20,
"title": "Windows app Browser Use external navigation fails",
"description": "Browser Use navigation or app-server failure",
"url": "https://github.com/openai/codex/issues/20",
"labels": ["app", "bug"],
"owner_labels": ["app"],
"kind_labels": ["bug"],
"attention": False,
"attention_level": 0,
"attention_marker": "",
"user_interactions": 3,
"new_reactions": 1,
"engagement_score": 8,
"updated_at": "2026-04-25T04:00:00Z",
"activity": {"new_comments": 2},
},
{
"number": 21,
"title": "On Windows, cmake output waits until timeout",
"description": "Windows command timeout/capture problem",
"url": "https://github.com/openai/codex/issues/21",
"labels": ["app", "bug"],
"owner_labels": ["app"],
"kind_labels": ["bug"],
"attention": False,
"attention_level": 0,
"attention_marker": "",
"user_interactions": 3,
"new_reactions": 0,
"engagement_score": 7,
"updated_at": "2026-04-25T03:00:00Z",
"activity": {"new_comments": 3},
},
{
"number": 22,
"title": "Windows computer use tool fails to click buttons",
"description": "Computer-use workflow failure",
"url": "https://github.com/openai/codex/issues/22",
"labels": ["app", "bug"],
"owner_labels": ["app"],
"kind_labels": ["bug"],
"attention": False,
"attention_level": 0,
"attention_marker": "",
"user_interactions": 3,
"new_reactions": 0,
"engagement_score": 6,
"updated_at": "2026-04-25T02:00:00Z",
"activity": {"new_comments": 3},
},
]
rows = collect_issue_digest.summary_inputs(issues, ref_map={20: 1, 21: 2, 22: 3})
assert rows == [
{
"ref": 1,
"ref_markdown": "[1](https://github.com/openai/codex/issues/20)",
"number": 20,
"title": "Windows app Browser Use external navigation fails",
"description": "Browser Use navigation or app-server failure",
"url": "https://github.com/openai/codex/issues/20",
"labels": ["app", "bug"],
"owner_labels": ["app"],
"kind_labels": ["bug"],
"state": "",
"attention_marker": "",
"interactions": 3,
"new_comments": 2,
"new_reactions": 1,
"new_upvotes": 0,
"current_reactions": 0,
},
{
"ref": 2,
"ref_markdown": "[2](https://github.com/openai/codex/issues/21)",
"number": 21,
"title": "On Windows, cmake output waits until timeout",
"description": "Windows command timeout/capture problem",
"url": "https://github.com/openai/codex/issues/21",
"labels": ["app", "bug"],
"owner_labels": ["app"],
"kind_labels": ["bug"],
"state": "",
"attention_marker": "",
"interactions": 3,
"new_comments": 3,
"new_reactions": 0,
"new_upvotes": 0,
"current_reactions": 0,
},
{
"ref": 3,
"ref_markdown": "[3](https://github.com/openai/codex/issues/22)",
"number": 22,
"title": "Windows computer use tool fails to click buttons",
"description": "Computer-use workflow failure",
"url": "https://github.com/openai/codex/issues/22",
"labels": ["app", "bug"],
"owner_labels": ["app"],
"kind_labels": ["bug"],
"state": "",
"attention_marker": "",
"interactions": 3,
"new_comments": 3,
"new_reactions": 0,
"new_upvotes": 0,
"current_reactions": 0,
},
]

View File

@@ -1,59 +0,0 @@
---
name: codex-pr-body
description: Update the title and body of one or more pull requests.
---
## Determining the PR(s)
When this skill is invoked, the PR(s) to update may be specified explicitly, but in the common case, the PR(s) to update will be inferred from the branch / commit that the user is currently working on. For ordinary Git usage (i.e., not Sapling as discussed below), you may have to use a combination of `git branch` and `gh pr view <branch> --repo openai/codex --json number --jq '.number'` to determine the PR associated with the current branch / commit.
## PR Body Contents
When invoked, use `gh` to edit the pull request body and title to reflect the contents of the specified PR. Make sure to check the existing pull request body to see if there is key information that should be preserved. For example, NEVER remove an image in the existing pull request body, as the author may have no way to recover it if you remove it.
It is critically important to explain _why_ the change is being made. If the current conversation in which this skill is invoked has discussed the motivation, be sure to capture this in the pull request body.
The body should also explain _what_ changed, but this should appear after the _why_.
Limit discussion to the _net change_ of the commit. It is generally frowned upon to discuss changes that were attempted but later undone in the course of the development of the pull request. When rewriting the pull request body, you may need to eliminate details such as these when they are no longer appropriate / of interest to future readers.
Avoid references to absolute paths on my local disk. When talking about a path that is within the repository, simply use the repo-relative path.
It is generally helpful to discuss how the change was verified. That said, it is unnecessary to mention things that CI checks automatically, e.g., do not include "ran `just fmt`" as part of the test plan. Though identifying the new tests that were purposely introduced to verify the new behavior introduced by the pull request is often appropriate.
Make use of Markdown to format the pull request professionally. Ensure "code things" appear in single backticks when referenced inline. Fenced code blocks are useful when referencing code or showing a shell transcript. Also, make use of GitHub permalinks when citing existing pieces of code that are relevant to the change.
Make sure to reference any relevant pull requests or issues, though there should be no need to reference the pull request in its own PR body.
If there is documentation that should be updated on https://developers.openai.com/codex as a result of this change, please note that in a separate section near the end of the pull request. Omit this section if there is no documentation that needs to be updated.
## Working with Stacks
Sometimes a pull request is composed of a stack of commits that build on one another. In these cases, the PR body should reflect the _net_ change introduced by the stack as a whole, rather than the individual commits that make up the stack.
Similarly, sometimes a user may be using a tool like Sapling to leverage _stacked pull requests_, in which case the `base` of the PR may be the a branch that is the `head` of another PR in the stack rather than `main`. In this case, be sure to discuss only the net change between the `base` and `head` of the PR that is being opened against that stacked base, rather than the changes relative to `main`.
## Sapling
If `.git/sl/store` is present, then this Git repository is governed by Sapling SCM (https://sapling-scm.com).
In Sapling, run the following to see if there is a GitHub pull request associated with the current revision:
```shell
sl log --template '{github_pull_request_url}' -r .
```
Alternatively, you can run `sl sl` to see the current development branch and whether there is a GitHub pull request associated with the current commit. For example, if the output were:
```
@ cb032b31cf 72 minutes ago mbolin #11412
╭─╯ tui: show non-file layer content in /debug-config
o fdd0cd1de9 Today at 20:09 origin/main
~
```
- `@` indicates the current commit is `cb032b31cf`
- it is a development branch containing a single commit branched off of `origin/main`
- it is associated with GitHub pull request #11412

View File

@@ -1,82 +0,0 @@
FROM mcr.microsoft.com/devcontainers/base:ubuntu-24.04
ARG TZ
ARG DEBIAN_FRONTEND=noninteractive
ARG NODE_MAJOR=22
ARG RUST_TOOLCHAIN=1.92.0
# Keep this in sync with .devcontainer/codex-install/package.json and pnpm-lock.yaml.
ARG CODEX_NPM_VERSION=0.121.0
ENV TZ="$TZ"
ENV COREPACK_ENABLE_DOWNLOAD_PROMPT=0
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
# Devcontainers run as a non-root user, so enable bubblewrap's setuid mode.
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
build-essential \
curl \
git \
ca-certificates \
pkg-config \
clang \
musl-tools \
libssl-dev \
libsqlite3-dev \
just \
python3 \
python3-pip \
jq \
less \
man-db \
unzip \
ripgrep \
fzf \
fd-find \
zsh \
dnsutils \
iproute2 \
ipset \
iptables \
aggregate \
bubblewrap \
&& chmod u+s /usr/bin/bwrap \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY .devcontainer/codex-install/package.json \
.devcontainer/codex-install/pnpm-lock.yaml \
.devcontainer/codex-install/pnpm-workspace.yaml \
/opt/codex-install/
RUN curl -fsSL "https://deb.nodesource.com/setup_${NODE_MAJOR}.x" | bash - \
&& apt-get update \
&& apt-get install -y --no-install-recommends nodejs \
&& test "$(node -p "require('/opt/codex-install/package.json').dependencies['@openai/codex']")" = "${CODEX_NPM_VERSION}" \
&& cd /opt/codex-install \
&& corepack pnpm install --prod --frozen-lockfile \
&& ln -s /opt/codex-install/node_modules/.bin/codex /usr/local/bin/codex \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY .devcontainer/init-firewall.sh /usr/local/bin/init-firewall.sh
COPY .devcontainer/post_install.py /opt/post_install.py
COPY .devcontainer/post-start.sh /opt/post_start.sh
RUN chmod 500 /usr/local/bin/init-firewall.sh \
&& chmod 755 /opt/post_start.sh \
&& chmod 644 /opt/post_install.py \
&& chown vscode:vscode /opt/post_install.py
RUN install -d -m 0775 -o vscode -g vscode /commandhistory /workspace \
&& touch /commandhistory/.bash_history /commandhistory/.zsh_history \
&& chown vscode:vscode /commandhistory/.bash_history /commandhistory/.zsh_history
USER vscode
ENV PATH="/home/vscode/.cargo/bin:${PATH}"
WORKDIR /workspace
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain "${RUST_TOOLCHAIN}" \
&& rustup component add clippy rustfmt rust-src \
&& rustup target add x86_64-unknown-linux-musl aarch64-unknown-linux-musl

View File

@@ -1,38 +1,10 @@
# Containerized Development
We provide two container paths:
- `devcontainer.json` keeps the existing Codex contributor setup for working on this repository.
- `devcontainer.secure.json` adds a customer-oriented profile with stricter outbound network controls.
## Codex contributor profile
Use `devcontainer.json` when you are developing Codex itself. This is the same lightweight arm64 container that already exists in the repo.
## Secure customer profile
Use `devcontainer.secure.json` when you want a stricter runtime profile for running Codex inside a project container:
- installs the Codex CLI plus common build tools
- installs bubblewrap in setuid mode for Codex's Linux sandbox
- disables Docker's outer seccomp and AppArmor profiles so bubblewrap can construct Codex's inner sandbox
- enables firewall startup with an allowlist-driven outbound policy
- blocks IPv6 by default so the allowlist cannot be bypassed over AAAA routes
- requires `NET_ADMIN` and `NET_RAW` so the firewall can be installed at startup
This profile keeps the stricter networking isolated to the customer path instead of changing the default Codex contributor container.
Start it from the CLI with:
```bash
devcontainer up --workspace-folder . --config .devcontainer/devcontainer.secure.json
```
In VS Code, choose **Dev Containers: Open Folder in Container...** and select `.devcontainer/devcontainer.secure.json`.
We provide the following options to facilitate Codex development in a container. This is particularly useful for verifying the Linux build when working on a macOS host.
## Docker
To build the contributor image locally for x64 and then run it with the repo mounted under `/workspace`:
To build the Docker image locally for x64 and then run it with the repo mounted under `/workspace`:
```shell
CODEX_DOCKER_IMAGE_NAME=codex-linux-dev
@@ -42,8 +14,17 @@ docker run --platform=linux/amd64 --rm -it -e CARGO_TARGET_DIR=/workspace/codex-
Note that `/workspace/target` will contain the binaries built for your host platform, so we include `-e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64` in the `docker run` command so that the binaries built inside your container are written to a separate directory.
For arm64, specify `--platform=linux/arm64` instead for both `docker build` and `docker run`.
For arm64, specify `--platform=linux/amd64` instead for both `docker build` and `docker run`.
Currently, the contributor `Dockerfile` works for both x64 and arm64 Linux, though you need to run `rustup target add x86_64-unknown-linux-musl` yourself to install the musl toolchain for x64.
Currently, the `Dockerfile` works for both x64 and arm64 Linux, though you need to run `rustup target add x86_64-unknown-linux-musl` yourself to install the musl toolchain for x64.
The secure profile's capability, seccomp, and AppArmor options are required when you want Codex's bubblewrap sandbox to run inside Docker as the non-root devcontainer user. Without them, Docker's default runtime profile can block bubblewrap's namespace setup before Codex's own seccomp filter is installed. This keeps the Docker relaxation explicit in the profile that is meant to run Codex inside a project container, while the default contributor profile stays lightweight.
## VS Code
VS Code recognizes the `devcontainer.json` file and gives you the option to develop Codex in a container. Currently, `devcontainer.json` builds and runs the `arm64` flavor of the container.
From the integrated terminal in VS Code, you can build either flavor of the `arm64` build (GNU or musl):
```shell
cargo build --target aarch64-unknown-linux-musl
cargo build --target aarch64-unknown-linux-gnu
```

View File

@@ -1,13 +0,0 @@
{
"name": "codex-devcontainer-install",
"private": true,
"description": "Locked Codex CLI install boundary for the secure devcontainer.",
"dependencies": {
"@openai/codex": "0.121.0"
},
"engines": {
"node": ">=22",
"pnpm": ">=10.33.0"
},
"packageManager": "pnpm@10.33.0+sha512.10568bb4a6afb58c9eb3630da90cc9516417abebd3fabbe6739f0ae795728da1491e9db5a544c76ad8eb7570f5c4bb3d6c637b2cb41bfdcdb47fa823c8649319"
}

View File

@@ -1,85 +0,0 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
importers:
.:
dependencies:
'@openai/codex':
specifier: 0.121.0
version: 0.121.0
packages:
'@openai/codex@0.121.0':
resolution: {integrity: sha512-kCJ2NeATd4QBQRmqV04ymdN1ZU3MSwnJQDm/KzjpuzGvCuUVEn7no/T2mRyxQ2x77AACqriNOyPPoM/yufyvNg==}
engines: {node: '>=16'}
hasBin: true
'@openai/codex@0.121.0-darwin-arm64':
resolution: {integrity: sha512-ZyBqIB6Fb4I0hGb/h65Vu7ePYjHSmGiqqfm+/1djEuxDPkqjfi4wkxYxNYNY+6najyNGN4UijOSTTf19eDCrqw==}
engines: {node: '>=16'}
cpu: [arm64]
os: [darwin]
'@openai/codex@0.121.0-darwin-x64':
resolution: {integrity: sha512-1/OAtdkAZ5yPI3xqaEFlHuPziS1yCqL2gOZdswE7HTmmwpIxi6Z3FCo60JWDPluIp89z4tftdjq73/OCN0YVcw==}
engines: {node: '>=16'}
cpu: [x64]
os: [darwin]
'@openai/codex@0.121.0-linux-arm64':
resolution: {integrity: sha512-2UgMmdo237o7SCMsfb529cOSEM2HFUgN6OBkv5SBLwfNY1NO2Ex6JnUjlppEXlX6/4cXfZ5qjDghVz5j/+B9zw==}
engines: {node: '>=16'}
cpu: [arm64]
os: [linux]
'@openai/codex@0.121.0-linux-x64':
resolution: {integrity: sha512-vlpNJXIqss800J+32Vy7TUZzv31n61b45OLxmsVQGFkTNLJcjFrj9jDUC7I62eC4F16gLioilefNfv4CdJQOEw==}
engines: {node: '>=16'}
cpu: [x64]
os: [linux]
'@openai/codex@0.121.0-win32-arm64':
resolution: {integrity: sha512-m88q4f3XI5npn1t6OG0nWGHWWAjO5FgjRwxh4hdujbLO6t9CiCNfhfPZIOSsoATbrCNwLC+6S77m3cjbNToPNg==}
engines: {node: '>=16'}
cpu: [arm64]
os: [win32]
'@openai/codex@0.121.0-win32-x64':
resolution: {integrity: sha512-Fp0ecVOyM+VcBi/y4HVvRzhifO9YqRiHzhV3rhtAppC7flh22WPguLC4kmvXYAR0p3RPzbo35M2CedWnkOT+cw==}
engines: {node: '>=16'}
cpu: [x64]
os: [win32]
snapshots:
'@openai/codex@0.121.0':
optionalDependencies:
'@openai/codex-darwin-arm64': '@openai/codex@0.121.0-darwin-arm64'
'@openai/codex-darwin-x64': '@openai/codex@0.121.0-darwin-x64'
'@openai/codex-linux-arm64': '@openai/codex@0.121.0-linux-arm64'
'@openai/codex-linux-x64': '@openai/codex@0.121.0-linux-x64'
'@openai/codex-win32-arm64': '@openai/codex@0.121.0-win32-arm64'
'@openai/codex-win32-x64': '@openai/codex@0.121.0-win32-x64'
'@openai/codex@0.121.0-darwin-arm64':
optional: true
'@openai/codex@0.121.0-darwin-x64':
optional: true
'@openai/codex@0.121.0-linux-arm64':
optional: true
'@openai/codex@0.121.0-linux-x64':
optional: true
'@openai/codex@0.121.0-win32-arm64':
optional: true
'@openai/codex@0.121.0-win32-x64':
optional: true

View File

@@ -1,12 +0,0 @@
packages:
- "."
minimumReleaseAge: 10080
minimumReleaseAgeExclude: []
blockExoticSubdeps: true
strictDepBuilds: true
trustPolicy: no-downgrade
trustPolicyIgnoreAfter: 10080
trustPolicyExclude: []
allowBuilds: {}

View File

@@ -1,83 +0,0 @@
{
"$schema": "https://raw.githubusercontent.com/devcontainers/spec/main/schemas/devContainer.schema.json",
"name": "Codex (Secure)",
"build": {
"dockerfile": "Dockerfile.secure",
"context": "..",
"args": {
"TZ": "${localEnv:TZ:UTC}",
"NODE_MAJOR": "22",
"RUST_TOOLCHAIN": "1.92.0",
"CODEX_NPM_VERSION": "0.121.0"
}
},
"runArgs": [
"--cap-add=SYS_ADMIN",
"--cap-add=SYS_CHROOT",
"--cap-add=SETUID",
"--cap-add=SETGID",
"--cap-add=SYS_PTRACE",
"--security-opt=seccomp=unconfined",
"--security-opt=apparmor=unconfined",
"--cap-add=NET_ADMIN",
"--cap-add=NET_RAW"
],
"init": true,
"updateRemoteUserUID": true,
"remoteUser": "vscode",
"workspaceMount": "source=${localWorkspaceFolder},target=/workspace,type=bind,consistency=delegated",
"workspaceFolder": "/workspace",
"mounts": [
"source=codex-commandhistory-${devcontainerId},target=/commandhistory,type=volume",
"source=codex-home-${devcontainerId},target=/home/vscode/.codex,type=volume",
"source=codex-gh-${devcontainerId},target=/home/vscode/.config/gh,type=volume",
"source=codex-cargo-registry-${devcontainerId},target=/home/vscode/.cargo/registry,type=volume",
"source=codex-cargo-git-${devcontainerId},target=/home/vscode/.cargo/git,type=volume",
"source=codex-rustup-${devcontainerId},target=/home/vscode/.rustup,type=volume",
"source=${localEnv:HOME}/.gitconfig,target=/home/vscode/.gitconfig,type=bind,readonly"
],
"containerEnv": {
"RUST_BACKTRACE": "1",
"CODEX_UNSAFE_ALLOW_NO_SANDBOX": "1",
"CODEX_ENABLE_FIREWALL": "1",
"CODEX_INCLUDE_GITHUB_META_RANGES": "1",
"OPENAI_ALLOWED_DOMAINS": "api.openai.com auth.openai.com github.com api.github.com codeload.github.com raw.githubusercontent.com objects.githubusercontent.com crates.io index.crates.io static.crates.io static.rust-lang.org registry.npmjs.org pypi.org files.pythonhosted.org",
"CARGO_TARGET_DIR": "/workspace/.cache/cargo-target",
"GIT_CONFIG_GLOBAL": "/home/vscode/.gitconfig.local",
"COREPACK_ENABLE_DOWNLOAD_PROMPT": "0",
"PYTHONDONTWRITEBYTECODE": "1",
"PIP_DISABLE_PIP_VERSION_CHECK": "1"
},
"remoteEnv": {
"OPENAI_API_KEY": "${localEnv:OPENAI_API_KEY}"
},
"postCreateCommand": "python3 /opt/post_install.py",
"postStartCommand": "bash /opt/post_start.sh",
"waitFor": "postStartCommand",
"customizations": {
"vscode": {
"settings": {
"terminal.integrated.defaultProfile.linux": "zsh",
"terminal.integrated.profiles.linux": {
"bash": {
"path": "bash",
"icon": "terminal-bash"
},
"zsh": {
"path": "zsh"
}
},
"files.trimTrailingWhitespace": true,
"files.insertFinalNewline": true,
"files.trimFinalNewlines": true
},
"extensions": [
"openai.chatgpt",
"rust-lang.rust-analyzer",
"tamasfe.even-better-toml",
"vadimcn.vscode-lldb",
"ms-azuretools.vscode-docker"
]
}
}
}

View File

@@ -1,170 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
allowed_domains_file="/etc/codex/allowed_domains.txt"
include_github_meta_ranges="${CODEX_INCLUDE_GITHUB_META_RANGES:-1}"
if [ -f "$allowed_domains_file" ]; then
mapfile -t allowed_domains < <(sed '/^\s*#/d;/^\s*$/d' "$allowed_domains_file")
else
allowed_domains=("api.openai.com")
fi
if [ "${#allowed_domains[@]}" -eq 0 ]; then
echo "ERROR: No allowed domains configured"
exit 1
fi
add_ipv4_cidr_to_allowlist() {
local source="$1"
local cidr="$2"
if [[ ! "$cidr" =~ ^[0-9]{1,3}(\.[0-9]{1,3}){3}/[0-9]{1,2}$ ]]; then
echo "ERROR: Invalid ${source} CIDR range: $cidr"
exit 1
fi
ipset add allowed-domains "$cidr" -exist
}
configure_ipv6_default_deny() {
if ! command -v ip6tables >/dev/null 2>&1; then
echo "ERROR: ip6tables is required to enforce IPv6 default-deny policy"
exit 1
fi
ip6tables -F
ip6tables -X
ip6tables -t mangle -F
ip6tables -t mangle -X
ip6tables -t nat -F 2>/dev/null || true
ip6tables -t nat -X 2>/dev/null || true
ip6tables -A INPUT -i lo -j ACCEPT
ip6tables -A OUTPUT -o lo -j ACCEPT
ip6tables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
ip6tables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
ip6tables -P INPUT DROP
ip6tables -P FORWARD DROP
ip6tables -P OUTPUT DROP
echo "IPv6 firewall policy configured (default-deny)"
}
# Preserve docker-managed DNS NAT rules before clearing tables.
docker_dns_rules="$(iptables-save -t nat | grep "127\\.0\\.0\\.11" || true)"
iptables -F
iptables -X
iptables -t nat -F
iptables -t nat -X
iptables -t mangle -F
iptables -t mangle -X
ipset destroy allowed-domains 2>/dev/null || true
if [ -n "$docker_dns_rules" ]; then
echo "Restoring Docker DNS NAT rules"
iptables -t nat -N DOCKER_OUTPUT 2>/dev/null || true
iptables -t nat -N DOCKER_POSTROUTING 2>/dev/null || true
while IFS= read -r rule; do
[ -z "$rule" ] && continue
iptables -t nat $rule
done <<< "$docker_dns_rules"
fi
# Allow DNS resolution and localhost communication.
iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 53 -j ACCEPT
iptables -A INPUT -p udp --sport 53 -j ACCEPT
iptables -A INPUT -p tcp --sport 53 -j ACCEPT
iptables -A INPUT -i lo -j ACCEPT
iptables -A OUTPUT -o lo -j ACCEPT
ipset create allowed-domains hash:net
for domain in "${allowed_domains[@]}"; do
echo "Resolving $domain"
ips="$(dig +short A "$domain" | sed '/^\s*$/d')"
if [ -z "$ips" ]; then
echo "ERROR: Failed to resolve $domain"
exit 1
fi
while IFS= read -r ip; do
if [[ ! "$ip" =~ ^[0-9]{1,3}(\.[0-9]{1,3}){3}$ ]]; then
echo "ERROR: Invalid IPv4 address from DNS for $domain: $ip"
exit 1
fi
ipset add allowed-domains "$ip" -exist
done <<< "$ips"
done
if [ "$include_github_meta_ranges" = "1" ]; then
echo "Fetching GitHub meta ranges"
github_meta="$(curl -fsSL --connect-timeout 10 https://api.github.com/meta)"
if ! echo "$github_meta" | jq -e '.web and .api and .git' >/dev/null; then
echo "ERROR: GitHub meta response missing expected fields"
exit 1
fi
while IFS= read -r cidr; do
[ -z "$cidr" ] && continue
if [[ "$cidr" == *:* ]]; then
# Current policy enforces IPv4-only ipset entries.
continue
fi
add_ipv4_cidr_to_allowlist "GitHub" "$cidr"
done < <(echo "$github_meta" | jq -r '((.web // []) + (.api // []) + (.git // []))[]' | sort -u)
fi
host_ip="$(ip route | awk '/default/ {print $3; exit}')"
if [ -z "$host_ip" ]; then
echo "ERROR: Failed to detect host IP"
exit 1
fi
host_network="$(echo "$host_ip" | sed 's/\.[0-9]*$/.0\/24/')"
iptables -A INPUT -s "$host_network" -j ACCEPT
iptables -A OUTPUT -d "$host_network" -j ACCEPT
iptables -P INPUT DROP
iptables -P FORWARD DROP
iptables -P OUTPUT DROP
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -m set --match-set allowed-domains dst -j ACCEPT
# Reject rather than silently drop to make policy failures obvious.
iptables -A INPUT -j REJECT --reject-with icmp-admin-prohibited
iptables -A OUTPUT -j REJECT --reject-with icmp-admin-prohibited
iptables -A FORWARD -j REJECT --reject-with icmp-admin-prohibited
configure_ipv6_default_deny
echo "Firewall configuration complete"
if curl --connect-timeout 5 https://example.com >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - was able to reach https://example.com"
exit 1
fi
if ! curl --connect-timeout 5 https://api.openai.com >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - unable to reach https://api.openai.com"
exit 1
fi
if [ "$include_github_meta_ranges" = "1" ] && ! curl --connect-timeout 5 https://api.github.com/zen >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - unable to reach https://api.github.com"
exit 1
fi
if curl --connect-timeout 5 -6 https://example.com >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - was able to reach https://example.com over IPv6"
exit 1
fi
echo "Firewall verification passed"

View File

@@ -1,36 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
if [ "${CODEX_ENABLE_FIREWALL:-1}" != "1" ]; then
echo "[devcontainer] Firewall mode: permissive (CODEX_ENABLE_FIREWALL=${CODEX_ENABLE_FIREWALL:-unset})."
exit 0
fi
echo "[devcontainer] Firewall mode: strict"
domains_raw="${OPENAI_ALLOWED_DOMAINS:-api.openai.com}"
mapfile -t domains < <(printf '%s\n' "$domains_raw" | tr ', ' '\n\n' | sed '/^$/d' | sort -u)
if [ "${#domains[@]}" -eq 0 ]; then
echo "[devcontainer] No allowed domains configured."
exit 1
fi
tmp_file="$(mktemp)"
for domain in "${domains[@]}"; do
if [[ ! "$domain" =~ ^[a-zA-Z0-9][a-zA-Z0-9.-]*\.[a-zA-Z]{2,}$ ]]; then
echo "[devcontainer] Invalid domain in OPENAI_ALLOWED_DOMAINS: $domain"
rm -f "$tmp_file"
exit 1
fi
printf '%s\n' "$domain" >> "$tmp_file"
done
sudo install -d -m 0755 /etc/codex
sudo cp "$tmp_file" /etc/codex/allowed_domains.txt
sudo chown root:root /etc/codex/allowed_domains.txt
sudo chmod 0444 /etc/codex/allowed_domains.txt
rm -f "$tmp_file"
echo "[devcontainer] Applying firewall policy for domains: ${domains[*]}"
sudo --preserve-env=CODEX_INCLUDE_GITHUB_META_RANGES /usr/local/bin/init-firewall.sh

View File

@@ -1,113 +0,0 @@
#!/usr/bin/env python3
"""Post-install configuration for the Codex devcontainer."""
from __future__ import annotations
import os
import subprocess
import sys
from pathlib import Path
def ensure_history_files() -> None:
command_history_dir = Path("/commandhistory")
command_history_dir.mkdir(parents=True, exist_ok=True)
for filename in (".bash_history", ".zsh_history"):
(command_history_dir / filename).touch(exist_ok=True)
def fix_directory_ownership() -> None:
uid = os.getuid()
gid = os.getgid()
paths = [
Path.home() / ".codex",
Path.home() / ".config" / "gh",
Path.home() / ".cargo",
Path.home() / ".rustup",
Path("/commandhistory"),
]
for path in paths:
if not path.exists():
continue
stat_info = path.stat()
if stat_info.st_uid == uid and stat_info.st_gid == gid:
continue
try:
subprocess.run(
["sudo", "chown", "-R", f"{uid}:{gid}", str(path)],
check=True,
capture_output=True,
text=True,
)
print(f"[post_install] fixed ownership: {path}", file=sys.stderr)
except subprocess.CalledProcessError as err:
print(
f"[post_install] warning: could not fix ownership of {path}: {err.stderr.strip()}",
file=sys.stderr,
)
def setup_git_config() -> None:
home = Path.home()
host_gitconfig = home / ".gitconfig"
local_gitconfig = home / ".gitconfig.local"
gitignore_global = home / ".gitignore_global"
gitignore_global.write_text(
"""# Codex
.codex/
# Rust
/target/
# Node
node_modules/
# Python
__pycache__/
*.pyc
# Editors
.vscode/
.idea/
# macOS
.DS_Store
""",
encoding="utf-8",
)
include_line = (
f"[include]\n path = {host_gitconfig}\n\n" if host_gitconfig.exists() else ""
)
local_gitconfig.write_text(
f"""# Container-local git configuration
{include_line}[core]
excludesfile = {gitignore_global}
[merge]
conflictstyle = diff3
[diff]
colorMoved = default
""",
encoding="utf-8",
)
def main() -> None:
print("[post_install] configuring devcontainer...", file=sys.stderr)
ensure_history_files()
fix_directory_ownership()
setup_git_config()
print("[post_install] complete", file=sys.stderr)
if __name__ == "__main__":
main()

2
.gitattributes vendored
View File

@@ -1,2 +0,0 @@
codex-rs/app-server-protocol/schema/** linguist-generated
codex-rs/hooks/schema/generated/** linguist-generated

5
.github/CODEOWNERS vendored
View File

@@ -1,5 +0,0 @@
# Core crate ownership.
/codex-rs/core/ @openai/codex-core-agent-team
# Keep ownership changes reviewed by the same team.
/.github/CODEOWNERS @openai/codex-core-agent-team

View File

@@ -7,21 +7,17 @@ inputs:
artifacts-dir:
description: Absolute path to the directory containing built binaries to sign.
required: true
binaries:
description: Space-delimited binary basenames to sign.
default: "codex codex-responses-api-proxy"
runs:
using: composite
steps:
- name: Install cosign
uses: sigstore/cosign-installer@dc72c7d5c4d10cd6bcb8cf6e3fd625a9e5e537da # v3.7.0
uses: sigstore/cosign-installer@v3.7.0
- name: Cosign Linux artifacts
shell: bash
env:
ARTIFACTS_DIR: ${{ inputs.artifacts-dir }}
BINARIES: ${{ inputs.binaries }}
COSIGN_EXPERIMENTAL: "1"
COSIGN_YES: "true"
COSIGN_OIDC_CLIENT_ID: "sigstore"
@@ -35,7 +31,7 @@ runs:
exit 1
fi
for binary in ${BINARIES}; do
for binary in codex codex-responses-api-proxy; do
artifact="${dest}/${binary}"
if [[ ! -f "$artifact" ]]; then
echo "Binary $artifact not found"

View File

@@ -4,9 +4,6 @@ inputs:
target:
description: Rust compilation target triple (e.g. aarch64-apple-darwin).
required: true
binaries:
description: Space-delimited binary basenames to sign and notarize.
default: "codex codex-responses-api-proxy"
sign-binaries:
description: Whether to sign and notarize the macOS binaries.
required: false
@@ -122,7 +119,6 @@ runs:
shell: bash
env:
TARGET: ${{ inputs.target }}
BINARIES: ${{ inputs.binaries }}
run: |
set -euo pipefail
@@ -138,7 +134,7 @@ runs:
entitlements_path="$GITHUB_ACTION_PATH/codex.entitlements.plist"
for binary in ${BINARIES}; do
for binary in codex codex-responses-api-proxy; do
path="codex-rs/target/${TARGET}/release/${binary}"
codesign --force --options runtime --timestamp --entitlements "$entitlements_path" --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$path"
done
@@ -148,7 +144,6 @@ runs:
shell: bash
env:
TARGET: ${{ inputs.target }}
BINARIES: ${{ inputs.binaries }}
APPLE_NOTARIZATION_KEY_P8: ${{ inputs.apple-notarization-key-p8 }}
APPLE_NOTARIZATION_KEY_ID: ${{ inputs.apple-notarization-key-id }}
APPLE_NOTARIZATION_ISSUER_ID: ${{ inputs.apple-notarization-issuer-id }}
@@ -187,9 +182,8 @@ runs:
notarize_submission "$binary" "$archive_path" "$notary_key_path"
}
for binary in ${BINARIES}; do
notarize_binary "${binary}"
done
notarize_binary "codex"
notarize_binary "codex-responses-api-proxy"
- name: Sign and notarize macOS dmg
if: ${{ inputs.sign-dmg == 'true' }}

View File

@@ -1,64 +0,0 @@
name: prepare-bazel-ci
description: Prepare a Bazel CI job with shared setup, repository cache restore, and execution logs.
inputs:
target:
description: Target triple used for setup and cache namespacing.
required: true
cache-scope:
description: Logical namespace used to keep concurrent Bazel jobs from reserving the same repository cache key.
required: true
install-test-prereqs:
description: Install DotSlash for Bazel-backed test jobs.
required: false
default: "false"
outputs:
repository-cache-path:
description: Filesystem path used for the Bazel repository cache.
value: ${{ steps.setup_bazel.outputs.repository-cache-path }}
repository-cache-key:
description: Primary actions/cache key for the Bazel repository cache.
value: ${{ steps.cache_bazel_repository_key.outputs.repository-cache-key }}
repository-cache-hit:
description: Whether the Bazel repository cache restore found an exact key match.
value: ${{ steps.cache_bazel_repository_restore.outputs.cache-hit }}
runs:
using: composite
steps:
- name: Set up Bazel CI
id: setup_bazel
uses: ./.github/actions/setup-bazel-ci
with:
target: ${{ inputs.target }}
install-test-prereqs: ${{ inputs.install-test-prereqs }}
- name: Compute bazel repository cache key
id: cache_bazel_repository_key
shell: bash
env:
CACHE_SCOPE: ${{ inputs.cache-scope }}
TARGET: ${{ inputs.target }}
CACHE_HASH: ${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }}
run: |
echo "repository-cache-key=bazel-cache-${CACHE_SCOPE}-${TARGET}-${CACHE_HASH}" >> "${GITHUB_OUTPUT}"
echo "repository-cache-restore-key=bazel-cache-${CACHE_SCOPE}-${TARGET}-" >> "${GITHUB_OUTPUT}"
# Restore the Bazel repository cache explicitly so external dependencies
# do not need to be re-downloaded on every CI run. Keep restore failures
# non-fatal so transient cache-service errors degrade to a cold build
# instead of failing the job.
- name: Restore bazel repository cache
id: cache_bazel_repository_restore
continue-on-error: true
uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
with:
path: ${{ steps.setup_bazel.outputs.repository-cache-path }}
key: ${{ steps.cache_bazel_repository_key.outputs.repository-cache-key }}
restore-keys: |
${{ steps.cache_bazel_repository_key.outputs.repository-cache-restore-key }}
- name: Set up Bazel execution logs
shell: bash
run: |
mkdir -p "${RUNNER_TEMP}/bazel-execution-logs"
echo "CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR=${RUNNER_TEMP}/bazel-execution-logs" >> "${GITHUB_ENV}"

View File

@@ -1,54 +0,0 @@
name: Run argument comment lint
description: Run argument-comment-lint on codex-rs via Bazel.
inputs:
target:
description: Runner target passed to setup-bazel-ci.
required: true
buildbuddy-api-key:
description: BuildBuddy API key used by Bazel CI.
required: false
default: ""
runs:
using: composite
steps:
- uses: ./.github/actions/setup-bazel-ci
with:
target: ${{ inputs.target }}
install-test-prereqs: true
- name: Install Linux sandbox build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
sudo DEBIAN_FRONTEND=noninteractive apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ runner.os != 'Windows' }}
env:
BUILDBUDDY_API_KEY: ${{ inputs.buildbuddy-api-key }}
shell: bash
run: |
bazel_targets="$(./tools/argument-comment-lint/list-bazel-targets.sh)"
./.github/scripts/run-bazel-ci.sh \
-- \
build \
--config=argument-comment-lint \
--keep_going \
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
-- \
${bazel_targets}
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ runner.os == 'Windows' }}
env:
BUILDBUDDY_API_KEY: ${{ inputs.buildbuddy-api-key }}
shell: bash
run: |
./.github/scripts/run-argument-comment-lint-bazel.sh \
--config=argument-comment-lint \
--platforms=//:local_windows \
--keep_going \
--build_metadata=COMMIT_SHA=${GITHUB_SHA}

View File

@@ -5,22 +5,28 @@ inputs:
description: Target triple used for cache namespacing.
required: true
install-test-prereqs:
description: Install DotSlash for Bazel-backed test jobs.
description: Install Node.js and DotSlash for Bazel-backed test jobs.
required: false
default: "false"
outputs:
repository-cache-path:
description: Filesystem path used for the Bazel repository cache.
value: ${{ steps.configure_bazel_repository_cache.outputs.repository-cache-path }}
cache-hit:
description: Whether the Bazel repository cache key was restored exactly.
value: ${{ steps.cache_bazel_repository_restore.outputs.cache-hit }}
runs:
using: composite
steps:
- name: Set up Node.js for js_repl tests
if: inputs.install-test-prereqs == 'true'
uses: actions/setup-node@v6
with:
node-version-file: codex-rs/node-version.txt
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash
if: inputs.install-test-prereqs == 'true'
uses: facebook/install-dotslash@1e4e7b3e07eaca387acb98f1d4720e0bee8dbb6a # v2
uses: facebook/install-dotslash@v2
- name: Make DotSlash available in PATH (Unix)
if: inputs.install-test-prereqs == 'true' && runner.os != 'Windows'
@@ -33,18 +39,19 @@ runs:
run: Copy-Item (Get-Command dotslash).Source -Destination "$env:LOCALAPPDATA\Microsoft\WindowsApps\dotslash.exe"
- name: Set up Bazel
uses: bazel-contrib/setup-bazel@c5acdfb288317d0b5c0bbd7a396a3dc868bb0f86 # 0.19.0
uses: bazelbuild/setup-bazelisk@v3
- name: Configure Bazel repository cache
id: configure_bazel_repository_cache
shell: pwsh
run: |
# Keep the repository cache under HOME on all runners. Windows `D:\a`
# cache paths match `.bazelrc`, but `actions/cache/restore` currently
# returns HTTP 400 for that path in the Windows clippy job.
$repositoryCachePath = Join-Path $HOME '.cache/bazel-repo-cache'
"repository-cache-path=$repositoryCachePath" | Out-File -FilePath $env:GITHUB_OUTPUT -Encoding utf8 -Append
"BAZEL_REPOSITORY_CACHE=$repositoryCachePath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
# Restore bazel repository cache so we don't have to redownload all the external dependencies
# on every CI run.
- name: Restore bazel repository cache
id: cache_bazel_repository_restore
uses: actions/cache/restore@v5
with:
path: |
~/.cache/bazel-repo-cache
key: bazel-cache-${{ inputs.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }}
restore-keys: |
bazel-cache-${{ inputs.target }}
- name: Configure Bazel output root (Windows)
if: runner.os == 'Windows'
@@ -58,6 +65,10 @@ runs:
$repoContentsCache = Join-Path $env:RUNNER_TEMP "bazel-repo-contents-cache-$env:GITHUB_RUN_ID-$env:GITHUB_JOB"
"BAZEL_OUTPUT_USER_ROOT=$bazelOutputUserRoot" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
"BAZEL_REPO_CONTENTS_CACHE=$repoContentsCache" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
if (-not $hasDDrive) {
$repositoryCache = Join-Path $env:USERPROFILE '.cache\bazel-repo-cache'
"BAZEL_REPOSITORY_CACHE=$repositoryCache" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
}
- name: Expose MSVC SDK environment (Windows)
if: runner.os == 'Windows'
@@ -116,11 +127,6 @@ runs:
}
}
- name: Compute cache-stable Windows Bazel PATH
if: runner.os == 'Windows'
shell: pwsh
run: ./.github/scripts/compute-bazel-windows-path.ps1
- name: Enable Git long paths (Windows)
if: runner.os == 'Windows'
shell: pwsh

View File

@@ -1,49 +0,0 @@
name: setup-rusty-v8-musl
description: Download and verify musl rusty_v8 artifacts for Cargo builds.
inputs:
target:
description: Rust musl target triple.
required: true
runs:
using: composite
steps:
- name: Configure musl rusty_v8 artifact overrides and verify checksums
shell: bash
env:
TARGET: ${{ inputs.target }}
run: |
set -euo pipefail
case "${TARGET}" in
x86_64-unknown-linux-musl|aarch64-unknown-linux-musl)
;;
*)
echo "Unsupported musl rusty_v8 target: ${TARGET}" >&2
exit 1
;;
esac
version="$(python3 "${GITHUB_WORKSPACE}/.github/scripts/rusty_v8_bazel.py" resolved-v8-crate-version)"
release_tag="rusty-v8-v${version}"
base_url="https://github.com/openai/codex/releases/download/${release_tag}"
binding_dir="${RUNNER_TEMP}/rusty_v8"
archive_path="${binding_dir}/librusty_v8_release_${TARGET}.a.gz"
binding_path="${binding_dir}/src_binding_release_${TARGET}.rs"
checksums_path="${binding_dir}/rusty_v8_release_${TARGET}.sha256"
checksums_source="${GITHUB_WORKSPACE}/third_party/v8/rusty_v8_${version//./_}.sha256"
mkdir -p "${binding_dir}"
curl -fsSL "${base_url}/librusty_v8_release_${TARGET}.a.gz" -o "${archive_path}"
curl -fsSL "${base_url}/src_binding_release_${TARGET}.rs" -o "${binding_path}"
grep -E " (librusty_v8_release_${TARGET}[.]a[.]gz|src_binding_release_${TARGET}[.]rs)$" \
"${checksums_source}" > "${checksums_path}"
if [[ "$(wc -l < "${checksums_path}")" -ne 2 ]]; then
echo "Expected exactly two checksums for ${TARGET} in ${checksums_source}" >&2
exit 1
fi
(cd "${binding_dir}" && sha256sum -c "${checksums_path}")
echo "RUSTY_V8_ARCHIVE=${archive_path}" >> "${GITHUB_ENV}"
echo "RUSTY_V8_SRC_BINDING_PATH=${binding_path}" >> "${GITHUB_ENV}"

View File

@@ -4,9 +4,6 @@ inputs:
target:
description: Target triple for the artifacts to sign.
required: true
binaries:
description: Space-delimited binary basenames to sign.
default: "codex codex-responses-api-proxy codex-windows-sandbox-setup codex-command-runner"
client-id:
description: Azure Trusted Signing client ID.
required: true
@@ -30,31 +27,14 @@ runs:
using: composite
steps:
- name: Azure login for Trusted Signing (OIDC)
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2
uses: azure/login@v2
with:
client-id: ${{ inputs.client-id }}
tenant-id: ${{ inputs.tenant-id }}
subscription-id: ${{ inputs.subscription-id }}
- name: Prepare file list
id: prepare
shell: bash
env:
TARGET: ${{ inputs.target }}
BINARIES: ${{ inputs.binaries }}
run: |
set -euo pipefail
{
echo "files<<EOF"
for binary in ${BINARIES}; do
echo "${GITHUB_WORKSPACE}/codex-rs/target/${TARGET}/release/${binary}.exe"
done
echo "EOF"
} >> "$GITHUB_OUTPUT"
- name: Sign Windows binaries with Azure Trusted Signing
uses: azure/trusted-signing-action@1d365fec12862c4aa68fcac418143d73f0cea293 # v0
uses: azure/trusted-signing-action@v0
with:
endpoint: ${{ inputs.endpoint }}
trusted-signing-account-name: ${{ inputs.account-name }}
@@ -70,4 +50,8 @@ runs:
exclude-azure-developer-cli-credential: true
exclude-interactive-browser-credential: true
cache-dependencies: false
files: ${{ steps.prepare.outputs.files }}
files: |
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex.exe
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-responses-api-proxy.exe
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-windows-sandbox-setup.exe
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-command-runner.exe

View File

@@ -7,4 +7,3 @@ codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json
codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json
codex-rs/tui/tests/fixtures/oss-story.jsonl
codex-rs/tui_app_server/tests/fixtures/oss-story.jsonl
codex-rs/tui/src/app.rs

View File

@@ -11,11 +11,11 @@
"path": "codex"
},
"linux-x86_64": {
"regex": "^codex-x86_64-unknown-linux-musl-bundle\\.tar\\.zst$",
"regex": "^codex-x86_64-unknown-linux-musl\\.zst$",
"path": "codex"
},
"linux-aarch64": {
"regex": "^codex-aarch64-unknown-linux-musl-bundle\\.tar\\.zst$",
"regex": "^codex-aarch64-unknown-linux-musl\\.zst$",
"path": "codex"
},
"windows-x86_64": {
@@ -28,34 +28,6 @@
}
}
},
"codex-app-server": {
"platforms": {
"macos-aarch64": {
"regex": "^codex-app-server-aarch64-apple-darwin\\.zst$",
"path": "codex-app-server"
},
"macos-x86_64": {
"regex": "^codex-app-server-x86_64-apple-darwin\\.zst$",
"path": "codex-app-server"
},
"linux-x86_64": {
"regex": "^codex-app-server-x86_64-unknown-linux-musl\\.zst$",
"path": "codex-app-server"
},
"linux-aarch64": {
"regex": "^codex-app-server-aarch64-unknown-linux-musl\\.zst$",
"path": "codex-app-server"
},
"windows-x86_64": {
"regex": "^codex-app-server-x86_64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex-app-server.exe"
},
"windows-aarch64": {
"regex": "^codex-app-server-aarch64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex-app-server.exe"
}
}
},
"codex-responses-api-proxy": {
"platforms": {
"macos-aarch64": {
@@ -84,18 +56,6 @@
}
}
},
"bwrap": {
"platforms": {
"linux-x86_64": {
"regex": "^bwrap-x86_64-unknown-linux-musl\\.zst$",
"path": "bwrap"
},
"linux-aarch64": {
"regex": "^bwrap-aarch64-unknown-linux-musl\\.zst$",
"path": "bwrap"
}
}
},
"codex-command-runner": {
"platforms": {
"windows-x86_64": {

View File

@@ -1,6 +1,6 @@
# External (non-OpenAI) Pull Request Requirements
External code contributions are by invitation only. Please read the dedicated "Contributing" markdown file for details:
Before opening this Pull Request, please read the dedicated "Contributing" markdown file or your PR may be closed:
https://github.com/openai/codex/blob/main/docs/contributing.md
If your PR conforms to our contribution guidelines, replace this text with a detailed and high quality description of your changes.

View File

@@ -1,113 +0,0 @@
<#
BuildBuddy cache keys include the action and test environment, so Bazel should
not inherit the full hosted-runner PATH on Windows. That PATH includes volatile
tool entries, such as Maven, that can change independently of this repo and
cause avoidable cache misses.
This script derives a smaller, cache-stable PATH that keeps the Windows
toolchain entries Bazel-backed CI tasks need: MSVC and Windows SDK paths,
MinGW runtime DLL paths for gnullvm-built tests, Git, PowerShell, Node, Python,
DotSlash, and the standard Windows system directories.
`setup-bazel-ci` runs this after exporting the MSVC environment, and the script
publishes the result via `GITHUB_ENV` as `CODEX_BAZEL_WINDOWS_PATH` so later
steps can pass that explicit PATH to Bazel.
#>
$stablePathEntries = New-Object System.Collections.Generic.List[string]
$seenEntries = [System.Collections.Generic.HashSet[string]]::new([System.StringComparer]::OrdinalIgnoreCase)
$windowsAppsPath = if ([string]::IsNullOrWhiteSpace($env:LOCALAPPDATA)) {
$null
} else {
"$($env:LOCALAPPDATA)\Microsoft\WindowsApps"
}
$windowsDir = if ($env:WINDIR) {
$env:WINDIR
} elseif ($env:SystemRoot) {
$env:SystemRoot
} else {
$null
}
function Add-StablePathEntry {
param([string]$PathEntry)
if ([string]::IsNullOrWhiteSpace($PathEntry)) {
return
}
if ($seenEntries.Add($PathEntry)) {
[void]$stablePathEntries.Add($PathEntry)
}
}
foreach ($pathEntry in ($env:PATH -split ';')) {
if ([string]::IsNullOrWhiteSpace($pathEntry)) {
continue
}
if (
$pathEntry -like '*Microsoft Visual Studio*' -or
$pathEntry -like '*Windows Kits*' -or
$pathEntry -like '*Microsoft SDKs*' -or
$pathEntry -eq 'C:\mingw64\bin' -or
$pathEntry -like 'C:\msys64\*\bin' -or
$pathEntry -like 'C:\Program Files\Git\*' -or
$pathEntry -like 'C:\Program Files\PowerShell\*' -or
$pathEntry -like 'C:\hostedtoolcache\windows\node\*' -or
$pathEntry -like 'C:\hostedtoolcache\windows\Python\*' -or
$pathEntry -eq 'D:\a\_temp\install-dotslash\bin' -or
($windowsDir -and ($pathEntry -eq $windowsDir -or $pathEntry -like "${windowsDir}\*"))
) {
Add-StablePathEntry $pathEntry
}
}
$gitCommand = Get-Command git -ErrorAction SilentlyContinue
if ($gitCommand) {
Add-StablePathEntry (Split-Path $gitCommand.Source -Parent)
}
$nodeCommand = Get-Command node -ErrorAction SilentlyContinue
if ($nodeCommand) {
Add-StablePathEntry (Split-Path $nodeCommand.Source -Parent)
}
$python3Command = Get-Command python3 -ErrorAction SilentlyContinue
if ($python3Command) {
Add-StablePathEntry (Split-Path $python3Command.Source -Parent)
}
$pythonCommand = Get-Command python -ErrorAction SilentlyContinue
if ($pythonCommand) {
Add-StablePathEntry (Split-Path $pythonCommand.Source -Parent)
}
$pwshCommand = Get-Command pwsh -ErrorAction SilentlyContinue
if ($pwshCommand) {
Add-StablePathEntry (Split-Path $pwshCommand.Source -Parent)
}
foreach ($mingwPath in @('C:\mingw64\bin', 'C:\msys64\mingw64\bin', 'C:\msys64\ucrt64\bin')) {
if (Test-Path $mingwPath) {
Add-StablePathEntry $mingwPath
}
}
if ($windowsAppsPath) {
Add-StablePathEntry $windowsAppsPath
}
if ($stablePathEntries.Count -eq 0) {
throw 'Failed to derive cache-stable Windows PATH.'
}
if ([string]::IsNullOrWhiteSpace($env:GITHUB_ENV)) {
throw 'GITHUB_ENV must be set.'
}
$stablePath = $stablePathEntries -join ';'
Write-Host 'Derived CODEX_BAZEL_WINDOWS_PATH entries:'
foreach ($pathEntry in $stablePathEntries) {
Write-Host " $pathEntry"
}
"CODEX_BAZEL_WINDOWS_PATH=$stablePath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append

View File

@@ -2,6 +2,16 @@
set -euo pipefail
ci_config=ci-linux
case "${RUNNER_OS:-}" in
macOS)
ci_config=ci-macos
;;
Windows)
ci_config=ci-windows
;;
esac
bazel_lint_args=("$@")
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
has_host_platform_override=0
@@ -34,6 +44,29 @@ if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
bazel_lint_args+=("--skip_incompatible_explicit_targets")
fi
bazel_startup_args=()
if [[ -n "${BAZEL_OUTPUT_USER_ROOT:-}" ]]; then
bazel_startup_args+=("--output_user_root=${BAZEL_OUTPUT_USER_ROOT}")
fi
run_bazel() {
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
MSYS2_ARG_CONV_EXCL='*' bazel "$@"
return
fi
bazel "$@"
}
run_bazel_with_startup_args() {
if [[ ${#bazel_startup_args[@]} -gt 0 ]]; then
run_bazel "${bazel_startup_args[@]}" "$@"
return
fi
run_bazel "$@"
}
read_query_labels() {
local query="$1"
local query_stdout
@@ -41,10 +74,12 @@ read_query_labels() {
query_stdout="$(mktemp)"
query_stderr="$(mktemp)"
if ! ./.github/scripts/run-bazel-query-ci.sh \
if ! run_bazel_with_startup_args \
--noexperimental_remote_repo_contents_cache \
query \
--keep_going \
--output=label \
-- "$query" >"$query_stdout" 2>"$query_stderr"; then
"$query" >"$query_stdout" 2>"$query_stderr"; then
cat "$query_stderr" >&2
rm -f "$query_stdout" "$query_stderr"
exit 1

View File

@@ -3,10 +3,8 @@
set -euo pipefail
print_failed_bazel_test_logs=0
print_failed_bazel_action_summary=0
use_node_test_env=0
remote_download_toplevel=0
windows_msvc_host_platform=0
windows_cross_compile=0
while [[ $# -gt 0 ]]; do
case "$1" in
@@ -14,22 +12,14 @@ while [[ $# -gt 0 ]]; do
print_failed_bazel_test_logs=1
shift
;;
--print-failed-action-summary)
print_failed_bazel_action_summary=1
--use-node-test-env)
use_node_test_env=1
shift
;;
--remote-download-toplevel)
remote_download_toplevel=1
shift
;;
--windows-msvc-host-platform)
windows_msvc_host_platform=1
shift
;;
--windows-cross-compile)
windows_cross_compile=1
shift
;;
--)
shift
break
@@ -42,7 +32,7 @@ while [[ $# -gt 0 ]]; do
done
if [[ $# -eq 0 ]]; then
echo "Usage: $0 [--print-failed-test-logs] [--print-failed-action-summary] [--remote-download-toplevel] [--windows-msvc-host-platform] [--windows-cross-compile] -- <bazel args> -- <targets>" >&2
echo "Usage: $0 [--print-failed-test-logs] [--use-node-test-env] [--remote-download-toplevel] -- <bazel args> -- <targets>" >&2
exit 1
fi
@@ -66,11 +56,7 @@ case "${RUNNER_OS:-}" in
ci_config=ci-macos
;;
Windows)
if [[ $windows_cross_compile -eq 1 ]]; then
ci_config=ci-windows-cross
else
ci_config=ci-windows
fi
ci_config=ci-windows
;;
esac
@@ -78,44 +64,19 @@ print_bazel_test_log_tails() {
local console_log="$1"
local testlogs_dir
local -a bazel_info_cmd=(bazel)
local -a bazel_info_args=(info)
if (( ${#bazel_startup_args[@]} > 0 )); then
bazel_info_cmd+=("${bazel_startup_args[@]}")
fi
# `bazel info` needs the same CI config as the failed test invocation so
# platform-specific output roots match. On Windows, omitting `ci-windows`
# would point at `local_windows-fastbuild` even when the test ran with the
# MSVC host platform under `local_windows_msvc-fastbuild`.
if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then
bazel_info_args+=(
"--config=${ci_config}"
"--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}"
)
fi
# Only pass flags that affect Bazel's output-root selection or repository
# lookup. Test/build-only flags such as execution logs or remote download
# mode can make `bazel info` fail, which would hide the real test log path.
for arg in "${post_config_bazel_args[@]}"; do
case "$arg" in
--host_platform=* | --repo_contents_cache=* | --repository_cache=*)
bazel_info_args+=("$arg")
;;
esac
done
testlogs_dir="$(run_bazel "${bazel_info_cmd[@]:1}" \
--noexperimental_remote_repo_contents_cache \
"${bazel_info_args[@]}" \
bazel-testlogs 2>/dev/null || echo bazel-testlogs)"
testlogs_dir="$(run_bazel "${bazel_info_cmd[@]:1}" info bazel-testlogs 2>/dev/null || echo bazel-testlogs)"
local failed_targets=()
while IFS= read -r target; do
failed_targets+=("$target")
done < <(
grep -E '^(FAIL: //|ERROR: .* Testing //)' "$console_log" \
| sed -E 's#^FAIL: (//[^ ]+).*#\1#; s#^ERROR: .* Testing (//[^ ]+) failed:.*#\1#' \
grep -E '^FAIL: //' "$console_log" \
| sed -E 's#^FAIL: (//[^ ]+).*#\1#' \
| sort -u
)
@@ -126,14 +87,8 @@ print_bazel_test_log_tails() {
for target in "${failed_targets[@]}"; do
local rel_path="${target#//}"
rel_path="${rel_path/://}"
rel_path="${rel_path/:/\/}"
local test_log="${testlogs_dir}/${rel_path}/test.log"
local reported_test_log
reported_test_log="$(grep -F "FAIL: ${target} " "$console_log" | sed -nE 's#.* \(see (.*[\\/]test\.log)\).*#\1#p' | head -n 1 || true)"
if [[ -n "$reported_test_log" ]]; then
reported_test_log="${reported_test_log//\\//}"
test_log="$reported_test_log"
fi
echo "::group::Bazel test log tail for ${target}"
if [[ -f "$test_log" ]]; then
@@ -145,93 +100,6 @@ print_bazel_test_log_tails() {
done
}
print_bazel_action_failure_summary() {
local console_log="$1"
local escaped_summary
local summary
summary="$(
awk '
function clean(line) {
gsub(sprintf("%c", 27) "\\[[0-9;]*m", "", line)
sub(/^.*\t[^\t]*\t[0-9TZ:._-]+ /, "", line)
return line
}
function is_diagnostic(line) {
return line ~ /^(error(\[[^]]+\])?:|warning:|note:|help:)/ ||
line ~ /^[[:space:]]+-->/ ||
line ~ /^[[:space:]]*[0-9]+[[:space:]]+\|/ ||
line ~ /^[[:space:]]*\|/ ||
line ~ /^[[:space:]]+= (note|help):/ ||
line ~ /^[[:space:]]*\^[[:space:]^~-]*$/ ||
line ~ /^For more information/ ||
line ~ /^error: aborting/
}
{
line = clean($0)
}
line ~ /^ERROR: .* failed:/ {
if (printed) {
print ""
}
print line
in_failure = 1
seen_diagnostic = 0
printed = 1
next
}
in_failure && is_diagnostic(line) {
print line
seen_diagnostic = 1
next
}
in_failure && seen_diagnostic && line == "" {
print ""
next
}
in_failure && seen_diagnostic {
in_failure = 0
seen_diagnostic = 0
next
}
' "$console_log"
)"
if [[ -z "$summary" ]]; then
summary="$(grep -E '^ERROR: |^FAILED: ' "$console_log" | tail -n 50 || true)"
fi
if [[ -z "$summary" ]]; then
echo "No Bazel action failures were found in the captured console output."
return
fi
if [[ "${GITHUB_ACTIONS:-}" == "true" ]]; then
escaped_summary="$(
printf '%s' "$summary" \
| awk 'BEGIN { ORS = "" } {
gsub(/%/, "%25")
gsub(/\r/, "%0D")
print sep $0
sep = "%0A"
}'
)"
echo "::error title=Bazel failed action diagnostics::${escaped_summary}"
fi
echo
echo "Bazel failed action diagnostics:"
echo "--------------------------------"
printf '%s\n' "$summary"
echo "--------------------------------"
}
bazel_args=()
bazel_targets=()
found_target_separator=0
@@ -253,56 +121,20 @@ if [[ ${#bazel_args[@]} -eq 0 || ${#bazel_targets[@]} -eq 0 ]]; then
exit 1
fi
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
# Fork PRs do not receive the BuildBuddy secret needed for the remote
# cross-compile config. Preserve the previous local Windows build shape.
windows_msvc_host_platform=1
if [[ $use_node_test_env -eq 1 && "${RUNNER_OS:-}" != "Windows" ]]; then
# Bazel test sandboxes on macOS may resolve an older Homebrew `node`
# before the `actions/setup-node` runtime on PATH.
node_bin="$(which node)"
bazel_args+=("--test_env=CODEX_JS_REPL_NODE_PATH=${node_bin}")
fi
post_config_bazel_args=()
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_msvc_host_platform -eq 1 ]]; then
has_host_platform_override=0
for arg in "${bazel_args[@]}"; do
if [[ "$arg" == --host_platform=* ]]; then
has_host_platform_override=1
break
fi
done
if [[ $has_host_platform_override -eq 0 ]]; then
# Use the MSVC Windows platform for jobs that need helper binaries like
# Rust test wrappers and V8 generators to resolve a compatible toolchain.
# Callers that need a different Windows target platform should pass an
# explicit `--platforms=...` flag.
post_config_bazel_args+=("--host_platform=//:local_windows_msvc")
fi
fi
if [[ $remote_download_toplevel -eq 1 ]]; then
# Override the CI config's remote_download_minimal setting when callers need
# the built artifact to exist on disk after the command completes.
post_config_bazel_args+=(--remote_download_toplevel)
fi
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
# `--enable_platform_specific_config` expands `common:windows` on Windows
# hosts after ordinary rc configs, which can override `ci-windows-cross`'s
# RBE host platform. Repeat the host platform on the command line so V8 and
# other genrules execute on Linux RBE workers instead of Git Bash locally.
#
# Bazel also derives the default genrule shell from the client host. Without
# an explicit shell executable, remote Linux actions can be asked to run
# `C:\Program Files\Git\usr\bin\bash.exe`.
post_config_bazel_args+=(--host_platform=//:rbe --shell_executable=/bin/bash)
fi
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
# The Windows cross-compile config depends on remote execution. Fork PRs do
# not receive the BuildBuddy secret, so fall back to the existing local build
# shape and keep its lower concurrency cap.
post_config_bazel_args+=(--jobs=8)
fi
if [[ -n "${BAZEL_REPO_CONTENTS_CACHE:-}" ]]; then
# Windows self-hosted runners can run multiple Bazel jobs concurrently. Give
# each job its own repo contents cache so they do not fight over the shared
@@ -314,64 +146,28 @@ if [[ -n "${BAZEL_REPOSITORY_CACHE:-}" ]]; then
post_config_bazel_args+=("--repository_cache=${BAZEL_REPOSITORY_CACHE}")
fi
if [[ -n "${CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR:-}" ]]; then
post_config_bazel_args+=(
"--execution_log_compact_file=${CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR}/execution-log-${bazel_args[0]}-${GITHUB_JOB:-local}-$$.zst"
)
fi
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
pass_windows_build_env=1
if [[ $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
# Remote build actions execute on Linux RBE workers. Passing the Windows
# runner's build environment there makes Bazel genrules try to execute
# C:\Program Files\Git\usr\bin\bash.exe on Linux.
pass_windows_build_env=0
fi
windows_action_env_vars=(
INCLUDE
LIB
LIBPATH
PATH
UCRTVersion
UniversalCRTSdkDir
VCINSTALLDIR
VCToolsInstallDir
WindowsLibPath
WindowsSdkBinPath
WindowsSdkDir
WindowsSDKLibVersion
WindowsSDKVersion
)
if [[ $pass_windows_build_env -eq 1 ]]; then
windows_action_env_vars=(
INCLUDE
LIB
LIBPATH
UCRTVersion
UniversalCRTSdkDir
VCINSTALLDIR
VCToolsInstallDir
WindowsLibPath
WindowsSdkBinPath
WindowsSdkDir
WindowsSDKLibVersion
WindowsSDKVersion
)
for env_var in "${windows_action_env_vars[@]}"; do
if [[ -n "${!env_var:-}" ]]; then
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
fi
done
fi
if [[ -z "${CODEX_BAZEL_WINDOWS_PATH:-}" ]]; then
echo "CODEX_BAZEL_WINDOWS_PATH must be set for Windows Bazel CI." >&2
exit 1
fi
if [[ $pass_windows_build_env -eq 1 ]]; then
post_config_bazel_args+=(
"--action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
"--host_action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
)
elif [[ $windows_cross_compile -eq 1 ]]; then
# Remote build actions run on Linux RBE workers. Give their shell snippets
# a Linux PATH while preserving CODEX_BAZEL_WINDOWS_PATH below for local
# Windows test execution.
post_config_bazel_args+=(
"--action_env=PATH=/usr/bin:/bin"
"--host_action_env=PATH=/usr/bin:/bin"
)
fi
post_config_bazel_args+=("--test_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}")
for env_var in "${windows_action_env_vars[@]}"; do
if [[ -n "${!env_var:-}" ]]; then
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
fi
done
fi
bazel_console_log="$(mktemp)"
@@ -443,9 +239,6 @@ else
fi
if [[ ${bazel_status:-0} -ne 0 ]]; then
if [[ $print_failed_bazel_action_summary -eq 1 ]]; then
print_bazel_action_failure_summary "$bazel_console_log"
fi
if [[ $print_failed_bazel_test_logs -eq 1 ]]; then
print_bazel_test_log_tails "$bazel_console_log"
fi

View File

@@ -1,84 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Run Bazel queries with the same CI startup settings as the main build/test
# invocation so target-discovery queries can reuse the same Bazel server.
query_args=()
windows_cross_compile=0
while [[ $# -gt 0 ]]; do
case "$1" in
--windows-cross-compile)
windows_cross_compile=1
shift
;;
--)
shift
break
;;
*)
query_args+=("$1")
shift
;;
esac
done
if [[ $# -ne 1 ]]; then
echo "Usage: $0 [--windows-cross-compile] [<bazel query args>...] -- <query expression>" >&2
exit 1
fi
query_expression="$1"
ci_config=ci-linux
case "${RUNNER_OS:-}" in
macOS)
ci_config=ci-macos
;;
Windows)
if [[ $windows_cross_compile -eq 1 ]]; then
ci_config=ci-windows-cross
else
ci_config=ci-windows
fi
;;
esac
bazel_startup_args=()
if [[ -n "${BAZEL_OUTPUT_USER_ROOT:-}" ]]; then
bazel_startup_args+=("--output_user_root=${BAZEL_OUTPUT_USER_ROOT}")
fi
run_bazel() {
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
MSYS2_ARG_CONV_EXCL='*' bazel "$@"
return
fi
bazel "$@"
}
bazel_query_args=(--noexperimental_remote_repo_contents_cache query)
if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then
bazel_query_args+=(
"--config=${ci_config}"
"--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}"
)
fi
if [[ -n "${BAZEL_REPO_CONTENTS_CACHE:-}" ]]; then
bazel_query_args+=("--repo_contents_cache=${BAZEL_REPO_CONTENTS_CACHE}")
fi
if [[ -n "${BAZEL_REPOSITORY_CACHE:-}" ]]; then
bazel_query_args+=("--repository_cache=${BAZEL_REPOSITORY_CACHE}")
fi
bazel_query_args+=("${query_args[@]}" "$query_expression")
if (( ${#bazel_startup_args[@]} > 0 )); then
run_bazel "${bazel_startup_args[@]}" "${bazel_query_args[@]}"
else
run_bazel "${bazel_query_args[@]}"
fi

View File

@@ -4,7 +4,6 @@ from __future__ import annotations
import argparse
import gzip
import hashlib
import re
import shutil
import subprocess
@@ -13,16 +12,8 @@ import tempfile
import tomllib
from pathlib import Path
from rusty_v8_module_bazel import (
RustyV8ChecksumError,
check_module_bazel,
update_module_bazel,
)
ROOT = Path(__file__).resolve().parents[2]
MODULE_BAZEL = ROOT / "MODULE.bazel"
RUSTY_V8_CHECKSUMS_DIR = ROOT / "third_party" / "v8"
MUSL_RUNTIME_ARCHIVE_LABELS = [
"@llvm//runtimes/libcxx:libcxx.static",
"@llvm//runtimes/libcxx:libcxxabi.static",
@@ -63,10 +54,8 @@ def bazel_output_files(
platform: str,
labels: list[str],
compilation_mode: str = "fastbuild",
bazel_configs: list[str] | None = None,
) -> list[Path]:
expression = "set(" + " ".join(labels) + ")"
bazel_configs = bazel_configs or []
result = subprocess.run(
[
"bazel",
@@ -74,7 +63,6 @@ def bazel_output_files(
"-c",
compilation_mode,
f"--platforms=@llvm//platforms:{platform}",
*[f"--config={config}" for config in bazel_configs],
"--output=files",
expression,
],
@@ -90,9 +78,7 @@ def bazel_build(
platform: str,
labels: list[str],
compilation_mode: str = "fastbuild",
bazel_configs: list[str] | None = None,
) -> None:
bazel_configs = bazel_configs or []
subprocess.run(
[
"bazel",
@@ -100,7 +86,6 @@ def bazel_build(
"-c",
compilation_mode,
f"--platforms=@llvm//platforms:{platform}",
*[f"--config={config}" for config in bazel_configs],
*labels,
],
cwd=ROOT,
@@ -112,14 +97,13 @@ def ensure_bazel_output_files(
platform: str,
labels: list[str],
compilation_mode: str = "fastbuild",
bazel_configs: list[str] | None = None,
) -> list[Path]:
outputs = bazel_output_files(platform, labels, compilation_mode, bazel_configs)
outputs = bazel_output_files(platform, labels, compilation_mode)
if all(path.exists() for path in outputs):
return outputs
bazel_build(platform, labels, compilation_mode, bazel_configs)
outputs = bazel_output_files(platform, labels, compilation_mode, bazel_configs)
bazel_build(platform, labels, compilation_mode)
outputs = bazel_output_files(platform, labels, compilation_mode)
missing = [str(path) for path in outputs if not path.exists()]
if missing:
raise SystemExit(f"missing built outputs for {labels}: {missing}")
@@ -162,24 +146,6 @@ def resolved_v8_crate_version() -> str:
return matches[0]
def rusty_v8_checksum_manifest_path(version: str) -> Path:
return RUSTY_V8_CHECKSUMS_DIR / f"rusty_v8_{version.replace('.', '_')}.sha256"
def command_version(version: str | None) -> str:
if version is not None:
return version
return resolved_v8_crate_version()
def command_manifest_path(manifest: Path | None, version: str) -> Path:
if manifest is None:
return rusty_v8_checksum_manifest_path(version)
if manifest.is_absolute():
return manifest
return ROOT / manifest
def staged_archive_name(target: str, source_path: Path) -> str:
if source_path.suffix == ".lib":
return f"rusty_v8_release_{target}.lib.gz"
@@ -194,9 +160,8 @@ def single_bazel_output_file(
platform: str,
label: str,
compilation_mode: str = "fastbuild",
bazel_configs: list[str] | None = None,
) -> Path:
outputs = ensure_bazel_output_files(platform, [label], compilation_mode, bazel_configs)
outputs = ensure_bazel_output_files(platform, [label], compilation_mode)
if len(outputs) != 1:
raise SystemExit(f"expected exactly one output for {label}, found {outputs}")
return outputs[0]
@@ -206,17 +171,11 @@ def merged_musl_archive(
platform: str,
lib_path: Path,
compilation_mode: str = "fastbuild",
bazel_configs: list[str] | None = None,
) -> Path:
llvm_ar = single_bazel_output_file(platform, LLVM_AR_LABEL, compilation_mode, bazel_configs)
llvm_ranlib = single_bazel_output_file(
platform,
LLVM_RANLIB_LABEL,
compilation_mode,
bazel_configs,
)
llvm_ar = single_bazel_output_file(platform, LLVM_AR_LABEL, compilation_mode)
llvm_ranlib = single_bazel_output_file(platform, LLVM_RANLIB_LABEL, compilation_mode)
runtime_archives = [
single_bazel_output_file(platform, label, compilation_mode, bazel_configs)
single_bazel_output_file(platform, label, compilation_mode)
for label in MUSL_RUNTIME_ARCHIVE_LABELS
]
@@ -247,13 +206,11 @@ def stage_release_pair(
target: str,
output_dir: Path,
compilation_mode: str = "fastbuild",
bazel_configs: list[str] | None = None,
) -> None:
outputs = ensure_bazel_output_files(
platform,
[release_pair_label(target)],
compilation_mode,
bazel_configs,
)
try:
@@ -270,7 +227,7 @@ def stage_release_pair(
staged_library = output_dir / staged_archive_name(target, lib_path)
staged_binding = output_dir / f"src_binding_release_{target}.rs"
source_archive = (
merged_musl_archive(platform, lib_path, compilation_mode, bazel_configs)
merged_musl_archive(platform, lib_path, compilation_mode)
if is_musl_archive_target(target, lib_path)
else lib_path
)
@@ -287,18 +244,8 @@ def stage_release_pair(
shutil.copyfile(binding_path, staged_binding)
staged_checksums = output_dir / f"rusty_v8_release_{target}.sha256"
with staged_checksums.open("w", encoding="utf-8") as checksums:
for path in [staged_library, staged_binding]:
digest = hashlib.sha256()
with path.open("rb") as artifact:
for chunk in iter(lambda: artifact.read(1024 * 1024), b""):
digest.update(chunk)
checksums.write(f"{digest.hexdigest()} {path.name}\n")
print(staged_library)
print(staged_binding)
print(staged_checksums)
def parse_args() -> argparse.Namespace:
@@ -309,12 +256,6 @@ def parse_args() -> argparse.Namespace:
stage_release_pair_parser.add_argument("--platform", required=True)
stage_release_pair_parser.add_argument("--target", required=True)
stage_release_pair_parser.add_argument("--output-dir", required=True)
stage_release_pair_parser.add_argument(
"--bazel-config",
action="append",
default=[],
dest="bazel_configs",
)
stage_release_pair_parser.add_argument(
"--compilation-mode",
default="fastbuild",
@@ -323,24 +264,6 @@ def parse_args() -> argparse.Namespace:
subparsers.add_parser("resolved-v8-crate-version")
check_module_bazel_parser = subparsers.add_parser("check-module-bazel")
check_module_bazel_parser.add_argument("--version")
check_module_bazel_parser.add_argument("--manifest", type=Path)
check_module_bazel_parser.add_argument(
"--module-bazel",
type=Path,
default=MODULE_BAZEL,
)
update_module_bazel_parser = subparsers.add_parser("update-module-bazel")
update_module_bazel_parser.add_argument("--version")
update_module_bazel_parser.add_argument("--manifest", type=Path)
update_module_bazel_parser.add_argument(
"--module-bazel",
type=Path,
default=MODULE_BAZEL,
)
return parser.parse_args()
@@ -352,28 +275,11 @@ def main() -> int:
target=args.target,
output_dir=Path(args.output_dir),
compilation_mode=args.compilation_mode,
bazel_configs=args.bazel_configs,
)
return 0
if args.command == "resolved-v8-crate-version":
print(resolved_v8_crate_version())
return 0
if args.command == "check-module-bazel":
version = command_version(args.version)
manifest_path = command_manifest_path(args.manifest, version)
try:
check_module_bazel(args.module_bazel, manifest_path, version)
except RustyV8ChecksumError as exc:
raise SystemExit(str(exc)) from exc
return 0
if args.command == "update-module-bazel":
version = command_version(args.version)
manifest_path = command_manifest_path(args.manifest, version)
try:
update_module_bazel(args.module_bazel, manifest_path, version)
except RustyV8ChecksumError as exc:
raise SystemExit(str(exc)) from exc
return 0
raise SystemExit(f"unsupported command: {args.command}")

View File

@@ -1,230 +0,0 @@
#!/usr/bin/env python3
from __future__ import annotations
import re
from dataclasses import dataclass
from pathlib import Path
SHA256_RE = re.compile(r"[0-9a-f]{64}")
HTTP_FILE_BLOCK_RE = re.compile(r"(?ms)^http_file\(\n.*?^\)\n?")
class RustyV8ChecksumError(ValueError):
pass
@dataclass(frozen=True)
class RustyV8HttpFile:
start: int
end: int
block: str
name: str
downloaded_file_path: str
sha256: str | None
def parse_checksum_manifest(path: Path) -> dict[str, str]:
try:
lines = path.read_text(encoding="utf-8").splitlines()
except FileNotFoundError as exc:
raise RustyV8ChecksumError(f"missing checksum manifest: {path}") from exc
checksums: dict[str, str] = {}
for line_number, line in enumerate(lines, 1):
if not line.strip():
continue
parts = line.split()
if len(parts) != 2:
raise RustyV8ChecksumError(
f"{path}:{line_number}: expected '<sha256> <filename>'"
)
checksum, filename = parts
if not SHA256_RE.fullmatch(checksum):
raise RustyV8ChecksumError(
f"{path}:{line_number}: invalid SHA-256 digest for {filename}"
)
if not filename or filename in {".", ".."} or "/" in filename:
raise RustyV8ChecksumError(
f"{path}:{line_number}: expected a bare artifact filename"
)
if filename in checksums:
raise RustyV8ChecksumError(
f"{path}:{line_number}: duplicate checksum for {filename}"
)
checksums[filename] = checksum
if not checksums:
raise RustyV8ChecksumError(f"empty checksum manifest: {path}")
return checksums
def string_field(block: str, field: str) -> str | None:
# Matches one-line string fields inside http_file blocks, e.g. `sha256 = "...",`.
match = re.search(rf'^\s*{re.escape(field)}\s*=\s*"([^"]+)",\s*$', block, re.M)
if match:
return match.group(1)
return None
def rusty_v8_http_files(module_bazel: str, version: str) -> list[RustyV8HttpFile]:
version_slug = version.replace(".", "_")
name_prefix = f"rusty_v8_{version_slug}_"
entries = []
for match in HTTP_FILE_BLOCK_RE.finditer(module_bazel):
block = match.group(0)
name = string_field(block, "name")
if not name or not name.startswith(name_prefix):
continue
downloaded_file_path = string_field(block, "downloaded_file_path")
if not downloaded_file_path:
raise RustyV8ChecksumError(
f"MODULE.bazel {name} is missing downloaded_file_path"
)
entries.append(
RustyV8HttpFile(
start=match.start(),
end=match.end(),
block=block,
name=name,
downloaded_file_path=downloaded_file_path,
sha256=string_field(block, "sha256"),
)
)
return entries
def module_entry_set_errors(
entries: list[RustyV8HttpFile],
checksums: dict[str, str],
version: str,
) -> list[str]:
errors = []
if not entries:
errors.append(f"MODULE.bazel has no rusty_v8 http_file entries for {version}")
return errors
module_files: dict[str, RustyV8HttpFile] = {}
duplicate_files = set()
for entry in entries:
if entry.downloaded_file_path in module_files:
duplicate_files.add(entry.downloaded_file_path)
module_files[entry.downloaded_file_path] = entry
for filename in sorted(duplicate_files):
errors.append(f"MODULE.bazel has duplicate http_file entries for {filename}")
for filename in sorted(set(module_files) - set(checksums)):
entry = module_files[filename]
errors.append(f"MODULE.bazel {entry.name} has no checksum in the manifest")
for filename in sorted(set(checksums) - set(module_files)):
errors.append(f"manifest has {filename}, but MODULE.bazel has no http_file")
return errors
def module_checksum_errors(
entries: list[RustyV8HttpFile],
checksums: dict[str, str],
) -> list[str]:
errors = []
for entry in entries:
expected = checksums.get(entry.downloaded_file_path)
if expected is None:
continue
if entry.sha256 is None:
errors.append(f"MODULE.bazel {entry.name} is missing sha256")
elif entry.sha256 != expected:
errors.append(
f"MODULE.bazel {entry.name} has sha256 {entry.sha256}, "
f"expected {expected}"
)
return errors
def raise_checksum_errors(message: str, errors: list[str]) -> None:
if errors:
formatted_errors = "\n".join(f"- {error}" for error in errors)
raise RustyV8ChecksumError(f"{message}:\n{formatted_errors}")
def check_module_bazel_text(
module_bazel: str,
checksums: dict[str, str],
version: str,
) -> None:
entries = rusty_v8_http_files(module_bazel, version)
errors = [
*module_entry_set_errors(entries, checksums, version),
*module_checksum_errors(entries, checksums),
]
raise_checksum_errors("rusty_v8 MODULE.bazel checksum drift", errors)
def block_with_sha256(block: str, checksum: str) -> str:
sha256_line_re = re.compile(r'(?m)^(\s*)sha256\s*=\s*"[0-9a-f]+",\s*$')
if sha256_line_re.search(block):
return sha256_line_re.sub(
lambda match: f'{match.group(1)}sha256 = "{checksum}",',
block,
count=1,
)
downloaded_file_path_match = re.search(
r'(?m)^(\s*)downloaded_file_path\s*=\s*"[^"]+",\n',
block,
)
if not downloaded_file_path_match:
raise RustyV8ChecksumError("http_file block is missing downloaded_file_path")
insert_at = downloaded_file_path_match.end()
indent = downloaded_file_path_match.group(1)
return f'{block[:insert_at]}{indent}sha256 = "{checksum}",\n{block[insert_at:]}'
def update_module_bazel_text(
module_bazel: str,
checksums: dict[str, str],
version: str,
) -> str:
entries = rusty_v8_http_files(module_bazel, version)
errors = module_entry_set_errors(entries, checksums, version)
raise_checksum_errors("cannot update rusty_v8 MODULE.bazel checksums", errors)
updated = []
previous_end = 0
for entry in entries:
updated.append(module_bazel[previous_end : entry.start])
updated.append(
block_with_sha256(entry.block, checksums[entry.downloaded_file_path])
)
previous_end = entry.end
updated.append(module_bazel[previous_end:])
return "".join(updated)
def check_module_bazel(
module_bazel_path: Path,
manifest_path: Path,
version: str,
) -> None:
checksums = parse_checksum_manifest(manifest_path)
module_bazel = module_bazel_path.read_text(encoding="utf-8")
check_module_bazel_text(module_bazel, checksums, version)
print(f"{module_bazel_path} rusty_v8 {version} checksums match {manifest_path}")
def update_module_bazel(
module_bazel_path: Path,
manifest_path: Path,
version: str,
) -> None:
checksums = parse_checksum_manifest(manifest_path)
module_bazel = module_bazel_path.read_text(encoding="utf-8")
updated_module_bazel = update_module_bazel_text(module_bazel, checksums, version)
if updated_module_bazel == module_bazel:
print(f"{module_bazel_path} rusty_v8 {version} checksums are already current")
return
module_bazel_path.write_text(updated_module_bazel, encoding="utf-8")
print(f"updated {module_bazel_path} rusty_v8 {version} checksums")

View File

@@ -1,126 +0,0 @@
#!/usr/bin/env python3
from __future__ import annotations
import textwrap
import unittest
import rusty_v8_module_bazel
class RustyV8BazelTest(unittest.TestCase):
def test_update_module_bazel_replaces_and_inserts_sha256(self) -> None:
module_bazel = textwrap.dedent(
"""\
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
sha256 = "0000000000000000000000000000000000000000000000000000000000000000",
urls = [
"https://example.test/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
],
)
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_binding",
downloaded_file_path = "src_binding_release_x86_64-unknown-linux-musl.rs",
urls = [
"https://example.test/src_binding_release_x86_64-unknown-linux-musl.rs",
],
)
http_file(
name = "rusty_v8_145_0_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
sha256 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
urls = [
"https://example.test/old.gz",
],
)
"""
)
checksums = {
"librusty_v8_release_x86_64-unknown-linux-gnu.a.gz": (
"1111111111111111111111111111111111111111111111111111111111111111"
),
"src_binding_release_x86_64-unknown-linux-musl.rs": (
"2222222222222222222222222222222222222222222222222222222222222222"
),
}
updated = rusty_v8_module_bazel.update_module_bazel_text(
module_bazel,
checksums,
"146.4.0",
)
self.assertEqual(
textwrap.dedent(
"""\
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
sha256 = "1111111111111111111111111111111111111111111111111111111111111111",
urls = [
"https://example.test/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
],
)
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_binding",
downloaded_file_path = "src_binding_release_x86_64-unknown-linux-musl.rs",
sha256 = "2222222222222222222222222222222222222222222222222222222222222222",
urls = [
"https://example.test/src_binding_release_x86_64-unknown-linux-musl.rs",
],
)
http_file(
name = "rusty_v8_145_0_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
sha256 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
urls = [
"https://example.test/old.gz",
],
)
"""
),
updated,
)
rusty_v8_module_bazel.check_module_bazel_text(updated, checksums, "146.4.0")
def test_check_module_bazel_rejects_manifest_drift(self) -> None:
module_bazel = textwrap.dedent(
"""\
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
sha256 = "1111111111111111111111111111111111111111111111111111111111111111",
urls = [
"https://example.test/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
],
)
"""
)
checksums = {
"librusty_v8_release_x86_64-unknown-linux-gnu.a.gz": (
"1111111111111111111111111111111111111111111111111111111111111111"
),
"orphan.gz": (
"2222222222222222222222222222222222222222222222222222222222222222"
),
}
with self.assertRaisesRegex(
rusty_v8_module_bazel.RustyV8ChecksumError,
"manifest has orphan.gz",
):
rusty_v8_module_bazel.check_module_bazel_text(
module_bazel,
checksums,
"146.4.0",
)
if __name__ == "__main__":
unittest.main()

View File

@@ -25,10 +25,7 @@ TOP_LEVEL_NAME_EXCEPTIONS = {
UTILITY_NAME_EXCEPTIONS = {
"path-utils": "codex-utils-path",
}
MANIFEST_FEATURE_EXCEPTIONS = {
"codex-rs/code-mode/Cargo.toml": {"sandbox": ("v8/v8_enable_sandbox",)},
"codex-rs/v8-poc/Cargo.toml": {"sandbox": ("v8/v8_enable_sandbox",)},
}
MANIFEST_FEATURE_EXCEPTIONS = {}
OPTIONAL_DEPENDENCY_EXCEPTIONS = set()
INTERNAL_DEPENDENCY_FEATURE_EXCEPTIONS = {}

View File

@@ -1,89 +0,0 @@
#!/usr/bin/env python3
"""Verify codex-tui does not depend on or import codex-core directly."""
from __future__ import annotations
import re
import sys
import tomllib
from pathlib import Path
ROOT = Path(__file__).resolve().parents[2]
TUI_ROOT = ROOT / "codex-rs" / "tui"
TUI_MANIFEST = TUI_ROOT / "Cargo.toml"
FORBIDDEN_PACKAGE = "codex-core"
FORBIDDEN_SOURCE_PATTERNS = (
re.compile(r"\bcodex_core::"),
re.compile(r"\buse\s+codex_core\b"),
re.compile(r"\bextern\s+crate\s+codex_core\b"),
)
def main() -> int:
failures = []
failures.extend(manifest_failures())
failures.extend(source_failures())
if not failures:
return 0
print("codex-tui must not depend on or import codex-core directly.")
print(
"Use the app-server protocol/client boundary instead; temporary embedded "
"startup gaps belong behind codex_app_server_client::legacy_core."
)
print()
for failure in failures:
print(f"- {failure}")
return 1
def manifest_failures() -> list[str]:
manifest = tomllib.loads(TUI_MANIFEST.read_text())
failures = []
for section_name, dependencies in dependency_sections(manifest):
if FORBIDDEN_PACKAGE in dependencies:
failures.append(
f"{relative_path(TUI_MANIFEST)} declares `{FORBIDDEN_PACKAGE}` "
f"in `[{section_name}]`"
)
return failures
def dependency_sections(manifest: dict) -> list[tuple[str, dict]]:
sections: list[tuple[str, dict]] = []
for section_name in ("dependencies", "dev-dependencies", "build-dependencies"):
dependencies = manifest.get(section_name)
if isinstance(dependencies, dict):
sections.append((section_name, dependencies))
for target_name, target in manifest.get("target", {}).items():
if not isinstance(target, dict):
continue
for section_name in ("dependencies", "dev-dependencies", "build-dependencies"):
dependencies = target.get(section_name)
if isinstance(dependencies, dict):
sections.append((f'target.{target_name}.{section_name}', dependencies))
return sections
def source_failures() -> list[str]:
failures = []
for path in sorted(TUI_ROOT.glob("**/*.rs")):
text = path.read_text()
for line_number, line in enumerate(text.splitlines(), start=1):
if any(pattern.search(line) for pattern in FORBIDDEN_SOURCE_PATTERNS):
failures.append(f"{relative_path(path)}:{line_number} imports `codex_core`")
return failures
def relative_path(path: Path) -> str:
return str(path.relative_to(ROOT))
if __name__ == "__main__":
sys.exit(main())

View File

@@ -8,9 +8,25 @@ FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
curl git python3 ca-certificates && \
curl git python3 ca-certificates xz-utils && \
rm -rf /var/lib/apt/lists/*
COPY codex-rs/node-version.txt /tmp/node-version.txt
RUN set -eux; \
node_arch="$(dpkg --print-architecture)"; \
case "${node_arch}" in \
amd64) node_dist_arch="x64" ;; \
arm64) node_dist_arch="arm64" ;; \
*) echo "unsupported architecture: ${node_arch}"; exit 1 ;; \
esac; \
node_version="$(tr -d '[:space:]' </tmp/node-version.txt)"; \
curl -fsSLO "https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-${node_dist_arch}.tar.xz"; \
tar -xJf "node-v${node_version}-linux-${node_dist_arch}.tar.xz" -C /usr/local --strip-components=1; \
rm "node-v${node_version}-linux-${node_dist_arch}.tar.xz" /tmp/node-version.txt; \
node --version; \
npm --version
# Install dotslash.
RUN curl -LSfs "https://github.com/facebook/dotslash/releases/download/v0.5.8/dotslash-ubuntu-22.04.$(uname -m).tar.gz" | tar fxz - -C /usr/local/bin

View File

@@ -5,15 +5,15 @@ The workflows in this directory are split so that pull requests get fast, review
## Pull Requests
- `bazel.yml` is the main pre-merge verification path for Rust code.
It runs Bazel `test` and Bazel `clippy` on the supported Bazel targets,
including the generated Rust test binaries needed to lint inline `#[cfg(test)]`
code.
It runs Bazel `test` and Bazel `clippy` on the supported Bazel targets.
- `rust-ci.yml` keeps the Cargo-native PR checks intentionally small:
- `cargo fmt --check`
- `cargo shear`
- `argument-comment-lint` on Linux, macOS, and Windows
- `tools/argument-comment-lint` package tests when the lint or its workflow wiring changes
The PR workflow still keeps the Linux lint lane on the default-targets-only invocation for now, but the released linter runs on Linux, macOS, and Windows before merge.
## Post-Merge On `main`
- `bazel.yml` also runs on pushes to `main`.

View File

@@ -17,11 +17,7 @@ concurrency:
cancel-in-progress: ${{ github.ref_name != 'main' }}
jobs:
test:
# PRs use a fast Windows cross-compiled test leg for pre-merge signal.
# Post-merge pushes to main also run the native Windows test job below for
# broader Windows signal without putting PR latency back on the critical
# path. Cargo CI owns V8/code-mode test coverage for now.
timeout-minutes: 30
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
@@ -44,34 +40,24 @@ jobs:
# - os: ubuntu-24.04-arm
# target: aarch64-unknown-linux-gnu
# Windows fast path: build the windows-gnullvm binaries with Linux
# RBE, then run the resulting Windows tests on the Windows runner.
# Cargo CI preserves V8/code-mode coverage while Bazel CI keeps broad
# non-code-mode signal.
# Windows
- os: windows-latest
target: x86_64-pc-windows-gnullvm
runs-on: ${{ matrix.os }}
# Configure a human readable name for each job
name: Bazel test on ${{ matrix.os }} for ${{ matrix.target }}
name: Local Bazel build on ${{ matrix.os }} for ${{ matrix.target }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Check rusty_v8 MODULE.bazel checksums
if: matrix.os == 'ubuntu-24.04' && matrix.target == 'x86_64-unknown-linux-gnu'
shell: bash
run: |
python3 .github/scripts/rusty_v8_bazel.py check-module-bazel
python3 -m unittest discover -s .github/scripts -p test_rusty_v8_bazel.py
- name: Prepare Bazel CI
id: prepare_bazel
uses: ./.github/actions/prepare-bazel-ci
- name: Set up Bazel CI
id: setup_bazel
uses: ./.github/actions/setup-bazel-ci
with:
target: ${{ matrix.target }}
cache-scope: bazel-${{ github.job }}
install-test-prereqs: "true"
- name: Check MODULE.bazel.lock is up to date
if: matrix.os == 'ubuntu-24.04' && matrix.target == 'x86_64-unknown-linux-gnu'
shell: bash
@@ -88,131 +74,32 @@ jobs:
# path. V8 consumers under `//codex-rs/...` still participate
# transitively through `//...`.
-//third_party/v8:all
# V8-backed code-mode tests are covered by Cargo CI. Bazel CI
# cross-compiles in several legs, and those tests are not stable in
# that setup yet.
-//codex-rs/code-mode:code-mode-unit-tests
-//codex-rs/v8-poc:v8-poc-unit-tests
)
bazel_wrapper_args=(
--print-failed-action-summary
--print-failed-test-logs
)
bazel_test_args=(
test
--test_tag_filters=-argument-comment-lint
--test_verbose_timeout_warnings
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
)
if [[ "${RUNNER_OS}" == "Windows" ]]; then
bazel_wrapper_args+=(
--windows-cross-compile
--remote-download-toplevel
)
fi
./.github/scripts/run-bazel-ci.sh \
"${bazel_wrapper_args[@]}" \
-- \
"${bazel_test_args[@]}" \
-- \
"${bazel_targets[@]}"
- name: Upload Bazel execution logs
if: always() && !cancelled()
continue-on-error: true
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
with:
name: bazel-execution-logs-test-${{ matrix.target }}
path: ${{ runner.temp }}/bazel-execution-logs
if-no-files-found: ignore
# Save the job-scoped Bazel repository cache after cache misses. Keep the
# upload non-fatal so cache service issues never fail the job itself.
- name: Save bazel repository cache
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
with:
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
test-windows-native-main:
# Native Windows Bazel tests are slower and frequently approach the
# 30-minute PR budget. Run this only for post-merge commits to main and give
# it a larger timeout.
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
timeout-minutes: 40
runs-on: windows-latest
name: Bazel test on windows-latest for x86_64-pc-windows-gnullvm (native main)
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Prepare Bazel CI
id: prepare_bazel
uses: ./.github/actions/prepare-bazel-ci
with:
target: x86_64-pc-windows-gnullvm
cache-scope: bazel-${{ github.job }}
install-test-prereqs: "true"
- name: bazel test //...
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
bazel_targets=(
//...
# Keep standalone V8 library targets out of the ordinary Bazel CI
# path. V8 consumers under `//codex-rs/...` still participate
# transitively through `//...`.
-//third_party/v8:all
# Keep this aligned with the main Bazel job. The native Windows
# job preserves broad post-merge coverage, but code-mode/V8 tests
# are covered by Cargo CI rather than Bazel for now.
-//codex-rs/code-mode:code-mode-unit-tests
-//codex-rs/v8-poc:v8-poc-unit-tests
)
bazel_test_args=(
test
--test_tag_filters=-argument-comment-lint
--test_verbose_timeout_warnings
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
--build_metadata=TAG_windows_native_main=true
)
./.github/scripts/run-bazel-ci.sh \
--print-failed-action-summary \
--print-failed-test-logs \
--use-node-test-env \
-- \
"${bazel_test_args[@]}" \
test \
--test_tag_filters=-argument-comment-lint \
--test_verbose_timeout_warnings \
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
-- \
"${bazel_targets[@]}"
- name: Upload Bazel execution logs
if: always() && !cancelled()
continue-on-error: true
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
with:
name: bazel-execution-logs-test-windows-native-x86_64-pc-windows-gnullvm
path: ${{ runner.temp }}/bazel-execution-logs
if-no-files-found: ignore
# Save the job-scoped Bazel repository cache after cache misses. Keep the
# upload non-fatal so cache service issues never fail the job itself.
# Save bazel repository cache explicitly; make non-fatal so cache uploading
# never fails the overall job. Only save when key wasn't hit.
- name: Save bazel repository cache
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
if: always() && !cancelled() && steps.setup_bazel.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
with:
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
path: |
~/.cache/bazel-repo-cache
key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }}
clippy:
timeout-minutes: 30
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
@@ -233,175 +120,36 @@ jobs:
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Prepare Bazel CI
id: prepare_bazel
uses: ./.github/actions/prepare-bazel-ci
- name: Set up Bazel CI
id: setup_bazel
uses: ./.github/actions/setup-bazel-ci
with:
target: ${{ matrix.target }}
cache-scope: bazel-${{ github.job }}
- name: bazel build --config=clippy lint targets
- name: bazel build --config=clippy //codex-rs/...
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
bazel_clippy_args=(
--config=clippy
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
--build_metadata=TAG_job=clippy
)
bazel_wrapper_args=()
bazel_target_list_args=()
if [[ "${RUNNER_OS}" == "Windows" ]]; then
# Keep this aligned with the fast Windows Bazel test job: use
# Linux RBE for clippy build actions while targeting Windows
# gnullvm. Fork/community PRs without the BuildBuddy secret fall
# back inside `run-bazel-ci.sh` to the previous local Windows MSVC
# host-platform shape.
bazel_wrapper_args+=(--windows-cross-compile)
bazel_target_list_args+=(--windows-cross-compile)
if [[ -z "${BUILDBUDDY_API_KEY:-}" ]]; then
# The fork fallback can see incompatible explicit Windows-cross
# internal test binaries in the generated target list. Preserve
# the old local-fallback behavior there.
bazel_clippy_args+=(--skip_incompatible_explicit_targets)
fi
fi
bazel_target_lines="$(./scripts/list-bazel-clippy-targets.sh "${bazel_target_list_args[@]}")"
bazel_targets=()
while IFS= read -r target; do
bazel_targets+=("${target}")
done <<< "${bazel_target_lines}"
# Keep the initial Bazel clippy scope on codex-rs and out of the
# V8 proof-of-concept target for now.
./.github/scripts/run-bazel-ci.sh \
--print-failed-action-summary \
"${bazel_wrapper_args[@]}" \
-- \
build \
"${bazel_clippy_args[@]}" \
-- \
"${bazel_targets[@]}"
- name: Upload Bazel execution logs
if: always() && !cancelled()
continue-on-error: true
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
with:
name: bazel-execution-logs-clippy-${{ matrix.target }}
path: ${{ runner.temp }}/bazel-execution-logs
if-no-files-found: ignore
# Save the job-scoped Bazel repository cache after cache misses. Keep the
# upload non-fatal so cache service issues never fail the job itself.
- name: Save bazel repository cache
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
with:
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
verify-release-build:
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-24.04
target: x86_64-unknown-linux-gnu
- os: macos-15-xlarge
target: aarch64-apple-darwin
- os: windows-latest
target: x86_64-pc-windows-gnullvm
runs-on: ${{ matrix.os }}
name: Verify release build on ${{ matrix.os }} for ${{ matrix.target }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Prepare Bazel CI
id: prepare_bazel
uses: ./.github/actions/prepare-bazel-ci
with:
target: ${{ matrix.target }}
cache-scope: bazel-${{ github.job }}
- name: bazel build verify-release-build targets
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
# This job exists to compile Rust code behind
# `cfg(not(debug_assertions))` so PR CI catches failures that would
# otherwise show up only in a release build. We do not need the full
# optimizer and debug-info work that normally comes with a release
# build to get that signal, so keep Bazel in `fastbuild` and disable
# Rust debug assertions explicitly.
bazel_wrapper_args=()
if [[ "${RUNNER_OS}" == "Windows" ]]; then
# This is build-only signal, so use the same Linux-RBE
# cross-compile path as the fast Windows test and clippy jobs.
# Fork/community PRs without the BuildBuddy secret fall back
# inside `run-bazel-ci.sh` to the previous local Windows MSVC
# host-platform shape.
bazel_wrapper_args+=(--windows-cross-compile)
fi
bazel_build_args=(
--compilation_mode=fastbuild
--@rules_rust//rust/settings:extra_rustc_flag=-Cdebug-assertions=no
--@rules_rust//rust/settings:extra_exec_rustc_flag=-Cdebug-assertions=no
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
--build_metadata=TAG_job=verify-release-build
--build_metadata=TAG_rust_debug_assertions=off
)
bazel_target_lines="$(bash ./scripts/list-bazel-release-targets.sh)"
bazel_targets=()
while IFS= read -r target; do
bazel_targets+=("${target}")
done <<< "${bazel_target_lines}"
./.github/scripts/run-bazel-ci.sh \
"${bazel_wrapper_args[@]}" \
-- \
build \
"${bazel_build_args[@]}" \
-- \
"${bazel_targets[@]}"
- name: Verify Bazel builds bwrap
if: runner.os == 'Linux'
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
./.github/scripts/run-bazel-ci.sh \
--remote-download-toplevel \
--print-failed-action-summary \
-- \
build \
--config=clippy \
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
--build_metadata=TAG_job=verify-bwrap \
--build_metadata=TAG_job=clippy \
-- \
//codex-rs/bwrap:bwrap
//codex-rs/... \
-//codex-rs/v8-poc:all
- name: Upload Bazel execution logs
if: always() && !cancelled()
continue-on-error: true
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
with:
name: bazel-execution-logs-verify-release-build-${{ matrix.target }}
path: ${{ runner.temp }}/bazel-execution-logs
if-no-files-found: ignore
# Save the job-scoped Bazel repository cache after cache misses. Keep the
# upload non-fatal so cache service issues never fail the job itself.
# Save bazel repository cache explicitly; make non-fatal so cache uploading
# never fails the overall job. Only save when key wasn't hit.
- name: Save bazel repository cache
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
if: always() && !cancelled() && steps.setup_bazel.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
with:
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
path: |
~/.cache/bazel-repo-cache
key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }}

View File

@@ -17,10 +17,10 @@ jobs:
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
- name: Run cargo-deny
uses: EmbarkStudios/cargo-deny-action@82eb9f621fbc699dd0918f3ea06864c14cc84246 # v2
with:
rust-version: 1.93.0
rust-version: stable
manifest-path: ./codex-rs/Cargo.toml

View File

@@ -17,9 +17,6 @@ jobs:
- name: Verify codex-rs Cargo manifests inherit workspace settings
run: python3 .github/scripts/verify_cargo_workspace_manifests.py
- name: Verify codex-tui does not import codex-core directly
run: python3 .github/scripts/verify_tui_core_boundary.py
- name: Verify Bazel clippy flags match Cargo workspace lints
run: python3 .github/scripts/verify_bazel_clippy_lints.py
@@ -45,19 +42,12 @@ jobs:
GH_TOKEN: ${{ github.token }}
run: |
set -euo pipefail
# Use a recent successful rust-release run that published the full
# cross-platform native payload required by the npm package layout.
# Passing the workflow URL directly avoids relying on old rust-v*
# branches remaining discoverable via `gh run list --branch ...`.
CODEX_VERSION=0.125.0
WORKFLOW_URL="https://github.com/openai/codex/actions/runs/24901475298"
# Use a rust-release version that includes all native binaries.
CODEX_VERSION=0.115.0
OUTPUT_DIR="${RUNNER_TEMP}"
# This reused workflow predates the standalone bwrap artifact.
python3 ./scripts/stage_npm_packages.py \
--release-version "$CODEX_VERSION" \
--workflow-url "$WORKFLOW_URL" \
--package codex \
--allow-missing-native-component bwrap \
--output-dir "$OUTPUT_DIR"
PACK_OUTPUT="${OUTPUT_DIR}/codex-npm-${CODEX_VERSION}.tgz"
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"
@@ -73,5 +63,10 @@ jobs:
- name: Check root README ToC
run: python3 scripts/readme_toc.py README.md
- name: Ensure codex-cli/README.md contains only ASCII and certain Unicode code points
run: ./scripts/asciicheck.py codex-cli/README.md
- name: Check codex-cli/README ToC
run: python3 scripts/readme_toc.py codex-cli/README.md
- name: Prettier (run `pnpm run format:fix` to fix)
run: pnpm run format

View File

@@ -61,7 +61,7 @@ jobs:
# .github/prompts/issue-deduplicator.txt file is obsolete and removed.
- id: codex-all
name: Find duplicates (pass 1, all issues)
uses: openai/codex-action@5c3f4ccdb2b8790f73d6b21751ac00e602aa0c02 # v1.7
uses: openai/codex-action@0b91f4a2703c23df3102c3f0967d3c6db34eedef # v1
with:
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
allow-users: "*"
@@ -195,7 +195,7 @@ jobs:
- id: codex-open
name: Find duplicates (pass 2, open issues)
uses: openai/codex-action@5c3f4ccdb2b8790f73d6b21751ac00e602aa0c02 # v1.7
uses: openai/codex-action@0b91f4a2703c23df3102c3f0967d3c6db34eedef # v1
with:
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
allow-users: "*"

View File

@@ -20,7 +20,7 @@ jobs:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- id: codex
uses: openai/codex-action@5c3f4ccdb2b8790f73d6b21751ac00e602aa0c02 # v1.7
uses: openai/codex-action@0b91f4a2703c23df3102c3f0967d3c6db34eedef # v1
with:
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
allow-users: "*"
@@ -44,7 +44,6 @@ jobs:
6. iOS — Issues with the Codex iOS app.
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
- For agent-area issues, prefer the most specific applicable label. Use "agent" only as a fallback for agent-related issues that do not fit a more specific agent-area label. Prefer "app-server" over "session" or "config" when the issue is about app-server protocol, API, RPC, schema, launch, or bridge behavior. Use "memory" for agentic memory storage/retrieval and "performance" for high process memory utilization or memory leaks.
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
2. mcp — Topics involving Model Context Protocol servers/clients.
3. mcp-server — Problems related to the codex mcp-server command, where codex runs as an MCP server.
@@ -53,30 +52,13 @@ jobs:
6. code-review — Issues related to the code review feature or functionality.
7. safety-check - Issues related to cyber risk detection or trusted access verification.
8. auth - Problems related to authentication, login, or access tokens.
9. exec - Problems related to the "codex exec" command or functionality.
10. hooks - Problems related to event hooks
11. context - Problems related to compaction, context windows, or available context reporting.
12. skills - Problems related to skills or plugins
13. custom-model - Problems that involve using custom model providers, local models, or OSS models.
14. rate-limits - Problems related to token limits, rate limits, or token usage reporting.
15. sandbox - Issues related to local sandbox environments or tool call approvals to override sandbox restrictions.
16. tool-calls - Problems related to specific tool call invocations including unexpected errors, failures, or hangs.
17. TUI - Problems with the terminal user interface (TUI) including keyboard shortcuts, copy & pasting, menus, or screen update issues.
18. app-server - Issues involving the app-server protocol or interfaces, including SDK/API payloads, thread/* and turn/* RPCs, app-server launch behavior, external app/controller bridges, and app-server protocol/schema behavior.
19. connectivity - Network connectivity or endpoint issues, including reconnecting messages, stream dropped/disconnected errors, websocket/SSE/transport failures, timeout/network/VPN/proxy/API endpoint failures, and related retry behavior.
20. subagent - Issues involving subagents, sub-agents, or multi-agent behavior, including spawn_agent, wait_agent, close_agent, worker/explorer roles, delegation, agent teams, lifecycle, model/config inheritance, quotas, and orchestration.
21. session - Issues involving session or thread management, including resume, fork, archive, rename/title, thread history, rollout persistence, compaction, checkpoints, retention, and cross-session state.
22. config - Issues involving config.toml, config keys, config key merging, config updates, profiles, hooks config, project config, agent role TOMLs, instruction/personality config, and config schema behavior.
23. plan - Issues involving plan mode, planning workflows, or plan-specific tools/behavior.
24. computer-use - Issues involving agentic computer use or SkyComputerUseService.
25. browser - Issues involving agentic browser use, IAB, or the built-in browser within the Codex app.
26. memory - Issues involving agentic memory storage and retrieval.
27. imagen - Issues involving image generation.
28. remote - Issues involving remote access, remote control, or SSH.
29. performance - Issues involving slow, laggy performance, high memory utilization, or memory leaks.
30. automations - Issues involving scheduled automation tasks or heartbeats.
31. pets - Issues involving pets avatars and animations.
32. agent - Fallback only for core agent loop or agent-related issues that do not fit app-server, connectivity, subagent, session, config, plan, computer-use, browser, memory, imagen, remote, performance, automations, or pets.
9. codex-exec - Problems related to the "codex exec" command or functionality.
10. context-management - Problems related to compaction, context windows, or available context reporting.
11. custom-model - Problems that involve using custom model providers, local models, or OSS models.
12. rate-limits - Problems related to token limits, rate limits, or token usage reporting.
13. sandbox - Issues related to local sandbox environments or tool call approvals to override sandbox restrictions.
14. tool-calls - Problems related to specific tool call invocations including unexpected errors, failures, or hangs.
15. TUI - Problems with the terminal user interface (TUI) including keyboard shortcuts, copy & pasting, menus, or screen update issues.
Issue number: ${{ github.event.issue.number }}

View File

@@ -3,7 +3,6 @@ on:
push:
branches:
- main
- "**full-ci**"
workflow_dispatch:
# CI builds in debug (dev) for faster signal.
@@ -43,9 +42,6 @@ jobs:
argument_comment_lint_package:
name: Argument comment lint package
runs-on: ubuntu-24.04
env:
CARGO_DYLINT_VERSION: 5.0.0
DYLINT_LINK_VERSION: 5.0.0
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
@@ -62,13 +58,10 @@ jobs:
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
key: argument-comment-lint-${{ runner.os }}-${{ env.CARGO_DYLINT_VERSION }}-${{ env.DYLINT_LINK_VERSION }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }}
key: argument-comment-lint-${{ runner.os }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }}
- name: Install cargo-dylint tooling
if: ${{ steps.cargo_dylint_cache.outputs.cache-hit != 'true' }}
shell: bash
run: |
cargo install --locked cargo-dylint --version "$CARGO_DYLINT_VERSION"
cargo install --locked dylint-link --version "$DYLINT_LINK_VERSION"
run: cargo install --locked cargo-dylint dylint-link
- name: Check Python wrapper syntax
run: python3 -m py_compile tools/argument-comment-lint/wrapper_common.py tools/argument-comment-lint/run.py tools/argument-comment-lint/run-prebuilt-linter.py tools/argument-comment-lint/test_wrapper_common.py
- name: Test Python wrapper helpers
@@ -76,8 +69,6 @@ jobs:
- name: Test argument comment lint package
working-directory: tools/argument-comment-lint
run: cargo test
env:
RUST_MIN_STACK: "8388608" # 8 MiB
argument_comment_lint_prebuilt:
name: Argument comment lint - ${{ matrix.name }}
@@ -423,10 +414,22 @@ jobs:
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
name: Configure musl rusty_v8 artifact overrides and verify checksums
uses: ./.github/actions/setup-rusty-v8-musl
with:
target: ${{ matrix.target }}
name: Configure musl rusty_v8 artifact overrides
env:
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
version="$(python3 "${GITHUB_WORKSPACE}/.github/scripts/rusty_v8_bazel.py" resolved-v8-crate-version)"
release_tag="rusty-v8-v${version}"
base_url="https://github.com/openai/codex/releases/download/${release_tag}"
archive="https://github.com/openai/codex/releases/download/rusty-v8-v${version}/librusty_v8_release_${TARGET}.a.gz"
binding_dir="${RUNNER_TEMP}/rusty_v8"
binding_path="${binding_dir}/src_binding_release_${TARGET}.rs"
mkdir -p "${binding_dir}"
curl -fsSL "${base_url}/src_binding_release_${TARGET}.rs" -o "${binding_path}"
echo "RUSTY_V8_ARCHIVE=${archive}" >> "$GITHUB_ENV"
echo "RUSTY_V8_SRC_BINDING_PATH=${binding_path}" >> "$GITHUB_ENV"
- name: Install cargo-chef
if: ${{ matrix.profile == 'release' }}
@@ -560,6 +563,10 @@ jobs:
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Node.js for js_repl tests
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6
with:
node-version-file: codex-rs/node-version.txt
- name: Install Linux build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
@@ -662,14 +669,12 @@ jobs:
export CODEX_TEST_REMOTE_ENV_CONTAINER_NAME=codex-remote-test-env
source "${GITHUB_WORKSPACE}/scripts/test-remote-env.sh"
echo "CODEX_TEST_REMOTE_ENV=${CODEX_TEST_REMOTE_ENV}" >> "$GITHUB_ENV"
echo "CODEX_TEST_REMOTE_EXEC_SERVER_URL=${CODEX_TEST_REMOTE_EXEC_SERVER_URL}" >> "$GITHUB_ENV"
- name: tests
id: test
run: cargo nextest run --no-fail-fast --target ${{ matrix.target }} --cargo-profile ci-test --timings
env:
RUST_BACKTRACE: 1
RUST_MIN_STACK: "8388608" # 8 MiB
NEXTEST_STATUS_LEVEL: leak
- name: Upload Cargo timings (nextest)

View File

@@ -41,7 +41,6 @@ jobs:
for f in "${files[@]}"; do
[[ $f == codex-rs/* ]] && codex=true
[[ $f == codex-rs/* || $f == tools/argument-comment-lint/* || $f == justfile ]] && argument_comment_lint=true
[[ $f == defs.bzl || $f == workspace_root_test_launcher.sh.tpl || $f == workspace_root_test_launcher.bat.tpl ]] && argument_comment_lint=true
[[ $f == tools/argument-comment-lint/* || $f == .github/workflows/rust-ci.yml || $f == .github/workflows/rust-ci-full.yml ]] && argument_comment_lint_package=true
[[ $f == .github/* ]] && workflows=true
done
@@ -82,7 +81,6 @@ jobs:
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: cargo-shear
version: 1.5.1
- name: cargo shear
run: cargo shear
@@ -91,9 +89,6 @@ jobs:
runs-on: ubuntu-24.04
needs: changed
if: ${{ needs.changed.outputs.argument_comment_lint_package == 'true' }}
env:
CARGO_DYLINT_VERSION: 5.0.0
DYLINT_LINK_VERSION: 5.0.0
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
@@ -117,13 +112,10 @@ jobs:
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
key: argument-comment-lint-${{ runner.os }}-${{ env.CARGO_DYLINT_VERSION }}-${{ env.DYLINT_LINK_VERSION }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }}
key: argument-comment-lint-${{ runner.os }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }}
- name: Install cargo-dylint tooling
if: ${{ steps.cargo_dylint_cache.outputs.cache-hit != 'true' }}
shell: bash
run: |
cargo install --locked cargo-dylint --version "$CARGO_DYLINT_VERSION"
cargo install --locked dylint-link --version "$DYLINT_LINK_VERSION"
run: cargo install --locked cargo-dylint dylint-link
- name: Check Python wrapper syntax
run: python3 -m py_compile tools/argument-comment-lint/wrapper_common.py tools/argument-comment-lint/run.py tools/argument-comment-lint/run-prebuilt-linter.py tools/argument-comment-lint/test_wrapper_common.py
- name: Test Python wrapper helpers
@@ -131,24 +123,23 @@ jobs:
- name: Test argument comment lint package
working-directory: tools/argument-comment-lint
run: cargo test
env:
RUST_MIN_STACK: "8388608" # 8 MiB
argument_comment_lint_prebuilt:
name: Argument comment lint - ${{ matrix.name }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
timeout-minutes: ${{ matrix.timeout_minutes }}
needs: changed
if: ${{ needs.changed.outputs.argument_comment_lint == 'true' || needs.changed.outputs.workflows == 'true' }}
strategy:
fail-fast: false
matrix:
include:
- name: Linux
runner: ubuntu-24.04
timeout_minutes: 30
timeout_minutes: 120
- name: macOS
runner: macos-15-xlarge
timeout_minutes: 30
timeout_minutes: 90
- name: Windows
runner: windows-x64
timeout_minutes: 30
@@ -156,28 +147,43 @@ jobs:
group: codex-runners
labels: codex-windows-x64
steps:
- name: Check whether argument comment lint should run
id: argument_comment_lint_gate
shell: bash
env:
ARGUMENT_COMMENT_LINT: ${{ needs.changed.outputs.argument_comment_lint }}
WORKFLOWS: ${{ needs.changed.outputs.workflows }}
run: |
if [[ "$ARGUMENT_COMMENT_LINT" == "true" || "$WORKFLOWS" == "true" ]]; then
echo "run=true" >> "$GITHUB_OUTPUT"
exit 0
fi
echo "No argument-comment-lint relevant changes."
echo "run=false" >> "$GITHUB_OUTPUT"
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
if: ${{ steps.argument_comment_lint_gate.outputs.run == 'true' }}
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ steps.argument_comment_lint_gate.outputs.run == 'true' }}
uses: ./.github/actions/run-argument-comment-lint
- uses: ./.github/actions/setup-bazel-ci
with:
target: ${{ runner.os }}
buildbuddy-api-key: ${{ secrets.BUILDBUDDY_API_KEY }}
install-test-prereqs: true
- name: Install Linux sandbox build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
sudo DEBIAN_FRONTEND=noninteractive apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ runner.os != 'Windows' }}
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
bazel_targets="$(./tools/argument-comment-lint/list-bazel-targets.sh)"
./.github/scripts/run-bazel-ci.sh \
-- \
build \
--config=argument-comment-lint \
--keep_going \
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
-- \
${bazel_targets}
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ runner.os == 'Windows' }}
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
./.github/scripts/run-argument-comment-lint-bazel.sh \
--config=argument-comment-lint \
--platforms=//:local_windows \
--keep_going \
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
# --- Gatherer job that you mark as the ONLY required status -----------------
results:

View File

@@ -19,9 +19,6 @@ jobs:
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
timeout-minutes: 60
env:
CARGO_DYLINT_VERSION: 5.0.0
DYLINT_LINK_VERSION: 5.0.0
strategy:
fail-fast: false
@@ -68,8 +65,8 @@ jobs:
shell: bash
run: |
install_root="${RUNNER_TEMP}/argument-comment-lint-tools"
cargo install --locked cargo-dylint --version "$CARGO_DYLINT_VERSION" --root "$install_root"
cargo install --locked dylint-link --version "$DYLINT_LINK_VERSION"
cargo install --locked cargo-dylint --root "$install_root"
cargo install --locked dylint-link
echo "INSTALL_ROOT=$install_root" >> "$GITHUB_ENV"
- name: Cargo build

View File

@@ -40,7 +40,7 @@ jobs:
)
url="${base_url%/}/models?client_version=${client_version}"
curl --http1.1 --fail --show-error --location "${headers[@]}" "${url}" | jq '.' > codex-rs/models-manager/models.json
curl --http1.1 --fail --show-error --location "${headers[@]}" "${url}" | jq '.' > codex-rs/core/models.json
- name: Open pull request (if changed)
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8

View File

@@ -24,9 +24,7 @@ jobs:
build-windows-binaries:
name: Build Windows binaries - ${{ matrix.runner }} - ${{ matrix.target }} - ${{ matrix.bundle }}
runs-on: ${{ matrix.runs_on }}
# Windows release builds can exceed an hour on fat-LTO mainline releases,
# so keep the timeout aligned with the top-level release build headroom.
timeout-minutes: 90
timeout-minutes: 60
permissions:
contents: read
defaults:
@@ -42,42 +40,28 @@ jobs:
- runner: windows-x64
target: x86_64-pc-windows-msvc
bundle: primary
binaries: "codex codex-responses-api-proxy"
build_args: --bin codex --bin codex-responses-api-proxy
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
bundle: primary
binaries: "codex codex-responses-api-proxy"
build_args: --bin codex --bin codex-responses-api-proxy
runs_on:
group: codex-runners
labels: codex-windows-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
bundle: helpers
binaries: "codex-windows-sandbox-setup codex-command-runner"
build_args: --bin codex-windows-sandbox-setup --bin codex-command-runner
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
bundle: helpers
binaries: "codex-windows-sandbox-setup codex-command-runner"
runs_on:
group: codex-runners
labels: codex-windows-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
bundle: app-server
binaries: "codex-app-server"
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
bundle: app-server
binaries: "codex-app-server"
build_args: --bin codex-windows-sandbox-setup --bin codex-command-runner
runs_on:
group: codex-runners
labels: codex-windows-arm64
@@ -105,11 +89,7 @@ jobs:
- name: Cargo build (Windows binaries)
shell: bash
run: |
build_args=()
for binary in ${{ matrix.binaries }}; do
build_args+=(--bin "$binary")
done
cargo build --target ${{ matrix.target }} --release --timings "${build_args[@]}"
cargo build --target ${{ matrix.target }} --release --timings ${{ matrix.build_args }}
- name: Upload Cargo timings
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
@@ -123,9 +103,13 @@ jobs:
run: |
output_dir="target/${{ matrix.target }}/release/staged-${{ matrix.bundle }}"
mkdir -p "$output_dir"
for binary in ${{ matrix.binaries }}; do
cp "target/${{ matrix.target }}/release/${binary}.exe" "$output_dir/${binary}.exe"
done
if [[ "${{ matrix.bundle }}" == "primary" ]]; then
cp target/${{ matrix.target }}/release/codex.exe "$output_dir/codex.exe"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$output_dir/codex-responses-api-proxy.exe"
else
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$output_dir/codex-windows-sandbox-setup.exe"
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$output_dir/codex-command-runner.exe"
fi
- name: Upload Windows binaries
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
@@ -139,15 +123,13 @@ jobs:
- build-windows-binaries
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on }}
timeout-minutes: 90
timeout-minutes: 60
permissions:
contents: read
id-token: write
defaults:
run:
working-directory: codex-rs
env:
WINDOWS_BINARIES: "codex codex-responses-api-proxy codex-windows-sandbox-setup codex-command-runner codex-app-server"
strategy:
fail-fast: false
@@ -179,25 +161,19 @@ jobs:
name: windows-binaries-${{ matrix.target }}-helpers
path: codex-rs/target/${{ matrix.target }}/release
- name: Download prebuilt Windows app-server binary
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8
with:
name: windows-binaries-${{ matrix.target }}-app-server
path: codex-rs/target/${{ matrix.target }}/release
- name: Verify binaries
shell: bash
run: |
set -euo pipefail
for binary in ${WINDOWS_BINARIES}; do
ls -lh "target/${{ matrix.target }}/release/${binary}.exe"
done
ls -lh target/${{ matrix.target }}/release/codex.exe
ls -lh target/${{ matrix.target }}/release/codex-responses-api-proxy.exe
ls -lh target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe
ls -lh target/${{ matrix.target }}/release/codex-command-runner.exe
- name: Sign Windows binaries with Azure Trusted Signing
uses: ./.github/actions/windows-code-sign
with:
target: ${{ matrix.target }}
binaries: ${{ env.WINDOWS_BINARIES }}
client-id: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_TRUSTED_SIGNING_TENANT_ID }}
subscription-id: ${{ secrets.AZURE_TRUSTED_SIGNING_SUBSCRIPTION_ID }}
@@ -211,10 +187,10 @@ jobs:
dest="dist/${{ matrix.target }}"
mkdir -p "$dest"
for binary in ${WINDOWS_BINARIES}; do
cp "target/${{ matrix.target }}/release/${binary}.exe" \
"$dest/${binary}-${{ matrix.target }}.exe"
done
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$dest/codex-responses-api-proxy-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$dest/codex-command-runner-${{ matrix.target }}.exe"
- name: Install DotSlash
uses: facebook/install-dotslash@1e4e7b3e07eaca387acb98f1d4720e0bee8dbb6a # v2

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
- uses: dtolnay/rust-toolchain@c2b55edffaf41a251c410bb32bed22afefa800f1 # 1.92
- name: Validate tag matches Cargo.toml version
shell: bash
run: |
@@ -47,11 +47,9 @@ jobs:
build:
needs: tag-check
name: Build - ${{ matrix.runner }} - ${{ matrix.target }} - ${{ matrix.bundle }}
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
# Release builds can take a long time, so leave some headroom to avoid
# having to restart the full workflow due to a timeout.
timeout-minutes: 90
timeout-minutes: 60
permissions:
contents: read
id-token: write
@@ -69,53 +67,16 @@ jobs:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
bundle: primary
artifact_name: aarch64-apple-darwin
binaries: "codex codex-responses-api-proxy"
build_dmg: "true"
- runner: macos-15-xlarge
target: aarch64-apple-darwin
bundle: app-server
artifact_name: aarch64-apple-darwin-app-server
binaries: "codex-app-server"
build_dmg: "false"
- runner: macos-15-xlarge
target: x86_64-apple-darwin
bundle: primary
artifact_name: x86_64-apple-darwin
binaries: "codex codex-responses-api-proxy"
build_dmg: "true"
- runner: macos-15-xlarge
target: x86_64-apple-darwin
bundle: app-server
artifact_name: x86_64-apple-darwin-app-server
binaries: "codex-app-server"
build_dmg: "false"
# Release artifacts intentionally ship MUSL-linked Linux binaries.
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
bundle: primary
artifact_name: x86_64-unknown-linux-musl
binaries: "codex codex-responses-api-proxy bwrap"
build_dmg: "false"
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
bundle: app-server
artifact_name: x86_64-unknown-linux-musl-app-server
binaries: "codex-app-server"
build_dmg: "false"
target: x86_64-unknown-linux-gnu
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
bundle: primary
artifact_name: aarch64-unknown-linux-musl
binaries: "codex codex-responses-api-proxy bwrap"
build_dmg: "false"
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
bundle: app-server
artifact_name: aarch64-unknown-linux-musl-app-server
binaries: "codex-app-server"
build_dmg: "false"
target: aarch64-unknown-linux-gnu
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
@@ -250,43 +211,33 @@ jobs:
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
name: Configure musl rusty_v8 artifact overrides and verify checksums
uses: ./.github/actions/setup-rusty-v8-musl
with:
target: ${{ matrix.target }}
- if: ${{ contains(matrix.target, 'linux') && matrix.bundle == 'primary' }}
name: Build bwrap and export digest
name: Configure musl rusty_v8 artifact overrides
env:
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
target="${{ matrix.target }}"
cargo build --target "$target" --release --timings --bin bwrap
bwrap_path="target/${target}/release/bwrap"
if [[ ! -f "$bwrap_path" ]]; then
echo "bwrap binary ${bwrap_path} not found"
exit 1
fi
digest="$(sha256sum "$bwrap_path" | awk '{print $1}')"
echo "CODEX_BWRAP_SHA256=${digest}" >> "$GITHUB_ENV"
echo "Built bwrap ${bwrap_path} with sha256:${digest}"
version="$(python3 "${GITHUB_WORKSPACE}/.github/scripts/rusty_v8_bazel.py" resolved-v8-crate-version)"
release_tag="rusty-v8-v${version}"
base_url="https://github.com/openai/codex/releases/download/${release_tag}"
archive="https://github.com/openai/codex/releases/download/rusty-v8-v${version}/librusty_v8_release_${TARGET}.a.gz"
binding_dir="${RUNNER_TEMP}/rusty_v8"
binding_path="${binding_dir}/src_binding_release_${TARGET}.rs"
mkdir -p "${binding_dir}"
curl -fsSL "${base_url}/src_binding_release_${TARGET}.rs" -o "${binding_path}"
echo "RUSTY_V8_ARCHIVE=${archive}" >> "$GITHUB_ENV"
echo "RUSTY_V8_SRC_BINDING_PATH=${binding_path}" >> "$GITHUB_ENV"
- name: Cargo build
shell: bash
run: |
build_args=()
for binary in ${{ matrix.binaries }}; do
build_args+=(--bin "$binary")
done
echo "CARGO_PROFILE_RELEASE_LTO: ${CARGO_PROFILE_RELEASE_LTO}"
cargo build --target ${{ matrix.target }} --release --timings "${build_args[@]}"
cargo build --target ${{ matrix.target }} --release --timings --bin codex --bin codex-responses-api-proxy
- name: Upload Cargo timings
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
with:
name: cargo-timings-rust-release-${{ matrix.target }}-${{ matrix.bundle }}
name: cargo-timings-rust-release-${{ matrix.target }}
path: codex-rs/target/**/cargo-timings/cargo-timing.html
if-no-files-found: warn
@@ -296,14 +247,12 @@ jobs:
with:
target: ${{ matrix.target }}
artifacts-dir: ${{ github.workspace }}/codex-rs/target/${{ matrix.target }}/release
binaries: ${{ matrix.binaries }}
- if: ${{ runner.os == 'macOS' }}
name: MacOS code signing (binaries)
uses: ./.github/actions/macos-code-sign
with:
target: ${{ matrix.target }}
binaries: ${{ matrix.binaries }}
sign-binaries: "true"
sign-dmg: "false"
apple-certificate: ${{ secrets.APPLE_CERTIFICATE_P12 }}
@@ -312,7 +261,7 @@ jobs:
apple-notarization-key-id: ${{ secrets.APPLE_NOTARIZATION_KEY_ID }}
apple-notarization-issuer-id: ${{ secrets.APPLE_NOTARIZATION_ISSUER_ID }}
- if: ${{ runner.os == 'macOS' && matrix.build_dmg == 'true' }}
- if: ${{ runner.os == 'macOS' }}
name: Build macOS dmg
shell: bash
run: |
@@ -327,17 +276,23 @@ jobs:
# The previous "MacOS code signing (binaries)" step signs + notarizes the
# built artifacts in `${release_dir}`. This step packages *those same*
# signed binaries into a dmg.
codex_binary_path="${release_dir}/codex"
proxy_binary_path="${release_dir}/codex-responses-api-proxy"
rm -rf "$dmg_root"
mkdir -p "$dmg_root"
for binary in ${{ matrix.binaries }}; do
binary_path="${release_dir}/${binary}"
if [[ ! -f "${binary_path}" ]]; then
echo "Binary ${binary_path} not found"
exit 1
fi
ditto "${binary_path}" "${dmg_root}/${binary}"
done
if [[ ! -f "$codex_binary_path" ]]; then
echo "Binary $codex_binary_path not found"
exit 1
fi
if [[ ! -f "$proxy_binary_path" ]]; then
echo "Binary $proxy_binary_path not found"
exit 1
fi
ditto "$codex_binary_path" "${dmg_root}/codex"
ditto "$proxy_binary_path" "${dmg_root}/codex-responses-api-proxy"
rm -f "$dmg_path"
hdiutil create \
@@ -352,7 +307,7 @@ jobs:
exit 1
fi
- if: ${{ runner.os == 'macOS' && matrix.build_dmg == 'true' }}
- if: ${{ runner.os == 'macOS' }}
name: MacOS code signing (dmg)
uses: ./.github/actions/macos-code-sign
with:
@@ -371,26 +326,15 @@ jobs:
dest="dist/${{ matrix.target }}"
mkdir -p "$dest"
for binary in ${{ matrix.binaries }}; do
cp "target/${{ matrix.target }}/release/${binary}" "$dest/${binary}-${{ matrix.target }}"
if [[ "${{ matrix.target }}" == *linux* ]]; then
cp "target/${{ matrix.target }}/release/${binary}.sigstore" \
"$dest/${binary}-${{ matrix.target }}.sigstore"
fi
done
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy "$dest/codex-responses-api-proxy-${{ matrix.target }}"
if [[ "${{ matrix.target }}" == *linux* && "${{ matrix.bundle }}" == "primary" ]]; then
bundle_root="${RUNNER_TEMP}/codex-${{ matrix.target }}-bundle"
rm -rf "$bundle_root"
mkdir -p "$bundle_root/codex-resources"
cp "$dest/codex-${{ matrix.target }}" "$bundle_root/codex"
cp "$dest/bwrap-${{ matrix.target }}" "$bundle_root/codex-resources/bwrap"
chmod 0755 "$bundle_root/codex" "$bundle_root/codex-resources/bwrap"
tar -C "$bundle_root" -cf - codex codex-resources/bwrap |
zstd -T0 -19 -o "$dest/codex-${{ matrix.target }}-bundle.tar.zst"
if [[ "${{ matrix.target }}" == *linux* ]]; then
cp target/${{ matrix.target }}/release/codex.sigstore "$dest/codex-${{ matrix.target }}.sigstore"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.sigstore "$dest/codex-responses-api-proxy-${{ matrix.target }}.sigstore"
fi
if [[ "${{ matrix.build_dmg }}" == "true" ]]; then
if [[ "${{ matrix.target }}" == *apple-darwin ]]; then
cp target/${{ matrix.target }}/release/codex-${{ matrix.target }}.dmg "$dest/codex-${{ matrix.target }}.dmg"
fi
@@ -413,7 +357,7 @@ jobs:
base="$(basename "$f")"
# Skip files that are already archives (shouldn't happen, but be
# safe).
if [[ "$base" == *.tar.gz || "$base" == *.tar.zst || "$base" == *.zip || "$base" == *.dmg ]]; then
if [[ "$base" == *.tar.gz || "$base" == *.zip || "$base" == *.dmg ]]; then
continue
fi
@@ -432,9 +376,9 @@ jobs:
- uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
with:
name: ${{ matrix.artifact_name }}
# Upload the per-binary .zst files, .tar.gz equivalents, and any
# prebuilt archives staged above.
name: ${{ matrix.target }}
# Upload the per-binary .zst files as well as the new .tar.gz
# equivalents we generated in the previous step.
path: |
codex-rs/dist/${{ matrix.target }}/*
@@ -640,11 +584,14 @@ jobs:
- name: Setup Node.js
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6
with:
# Node 24 bundles npm >= 11.5.1, which trusted publishing requires.
node-version: 24
node-version: 22
registry-url: "https://registry.npmjs.org"
scope: "@openai"
# Trusted publishing requires npm CLI version 11.5.1 or later.
- name: Update npm
run: npm install -g npm@latest
- name: Download npm tarballs from release
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -682,59 +629,11 @@ jobs:
prefix="${NPM_TAG}-"
fi
root_tarball="dist/npm/codex-npm-${VERSION}.tgz"
sdk_tarball="dist/npm/codex-sdk-npm-${VERSION}.tgz"
# Keep this list in sync with CODEX_PLATFORM_PACKAGES in
# codex-cli/scripts/build_npm_package.py. The root wrapper advances
# @openai/codex@latest as soon as it publishes, so every platform
# package it aliases must already exist in the registry first.
platform_tarballs=(
"dist/npm/codex-npm-linux-x64-${VERSION}.tgz"
"dist/npm/codex-npm-linux-arm64-${VERSION}.tgz"
"dist/npm/codex-npm-darwin-x64-${VERSION}.tgz"
"dist/npm/codex-npm-darwin-arm64-${VERSION}.tgz"
"dist/npm/codex-npm-win32-x64-${VERSION}.tgz"
"dist/npm/codex-npm-win32-arm64-${VERSION}.tgz"
)
for required_tarball in "${platform_tarballs[@]}" "${root_tarball}"; do
if [[ ! -f "${required_tarball}" ]]; then
echo "Missing npm tarball: ${required_tarball}"
exit 1
fi
done
shopt -s nullglob
other_tarballs=()
for tarball in dist/npm/*-"${VERSION}".tgz; do
if [[ "${tarball}" == "${root_tarball}" || "${tarball}" == "${sdk_tarball}" ]]; then
continue
fi
is_platform_tarball=false
for platform_tarball in "${platform_tarballs[@]}"; do
if [[ "${tarball}" == "${platform_tarball}" ]]; then
is_platform_tarball=true
break
fi
done
if [[ "${is_platform_tarball}" == true ]]; then
continue
fi
other_tarballs+=("${tarball}")
done
# Publish the platform packages before the root CLI wrapper. The root
# wrapper advances @openai/codex@latest, so it should only publish
# after the optional dependency versions it references exist.
tarballs=(
"${platform_tarballs[@]}"
"${other_tarballs[@]}"
"${root_tarball}"
)
if [[ -f "${sdk_tarball}" ]]; then
tarballs+=("${sdk_tarball}")
tarballs=(dist/npm/*-"${VERSION}".tgz)
if [[ ${#tarballs[@]} -eq 0 ]]; then
echo "No npm tarballs found in dist/npm for version ${VERSION}"
exit 1
fi
for tarball in "${tarballs[@]}"; do

View File

@@ -1,12 +1,20 @@
name: rusty-v8-release
on:
push:
tags:
- "rusty-v8-v*.*.*"
workflow_dispatch:
inputs:
release_tag:
description: Optional release tag. Defaults to rusty-v8-v<resolved_v8_version>.
required: false
type: string
publish:
description: Publish the staged musl artifacts to a GitHub release.
required: false
default: true
type: boolean
concurrency:
group: ${{ github.workflow }}::${{ github.ref_name }}
group: ${{ github.workflow }}::${{ inputs.release_tag || github.run_id }}
cancel-in-progress: false
jobs:
@@ -35,17 +43,15 @@ jobs:
- name: Resolve release tag
id: release_tag
env:
GITHUB_REF_NAME: ${{ github.ref_name }}
RELEASE_TAG_INPUT: ${{ inputs.release_tag }}
V8_VERSION: ${{ steps.v8_version.outputs.version }}
shell: bash
run: |
set -euo pipefail
expected_release_tag="rusty-v8-v${V8_VERSION}"
release_tag="${GITHUB_REF_NAME}"
if [[ "${release_tag}" != "${expected_release_tag}" ]]; then
echo "Tag ${release_tag} does not match resolved v8 crate version ${V8_VERSION}." >&2
exit 1
release_tag="${RELEASE_TAG_INPUT}"
if [[ -z "${release_tag}" ]]; then
release_tag="rusty-v8-v${V8_VERSION}"
fi
echo "release_tag=${release_tag}" >> "$GITHUB_OUTPUT"
@@ -72,9 +78,7 @@ jobs:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Bazel
uses: ./.github/actions/setup-bazel-ci
with:
target: ${{ matrix.target }}
uses: bazelbuild/setup-bazelisk@6ecf4fd8b7d1f9721785f1dd656a689acf9add47 # v3
- name: Set up Python
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6
@@ -105,7 +109,6 @@ jobs:
-c
opt
"--platforms=@llvm//platforms:${PLATFORM}"
--config=v8-release-compat
"${pair_target}"
"${extra_targets[@]}"
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD)
@@ -129,7 +132,6 @@ jobs:
--platform "${PLATFORM}" \
--target "${TARGET}" \
--compilation-mode opt \
--bazel-config v8-release-compat \
--output-dir "dist/${TARGET}"
- name: Upload staged musl artifacts
@@ -139,6 +141,7 @@ jobs:
path: dist/${{ matrix.target }}/*
publish-release:
if: ${{ inputs.publish }}
needs:
- metadata
- build
@@ -148,6 +151,16 @@ jobs:
actions: read
steps:
- name: Ensure publishing from default branch
if: ${{ github.ref_name != github.event.repository.default_branch }}
env:
DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
shell: bash
run: |
set -euo pipefail
echo "Publishing is only allowed from ${DEFAULT_BRANCH}; current ref is ${GITHUB_REF_NAME}." >&2
exit 1
- name: Ensure release tag is new
env:
GH_TOKEN: ${{ github.token }}

View File

@@ -10,7 +10,7 @@ jobs:
runs-on:
group: codex-runners
labels: codex-linux-x64
timeout-minutes: 10
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6

View File

@@ -3,7 +3,6 @@ name: v8-canary
on:
pull_request:
paths:
- ".github/actions/setup-bazel-ci/**"
- ".github/scripts/rusty_v8_bazel.py"
- ".github/workflows/rusty-v8-release.yml"
- ".github/workflows/v8-canary.yml"
@@ -17,7 +16,6 @@ on:
branches:
- main
paths:
- ".github/actions/setup-bazel-ci/**"
- ".github/scripts/rusty_v8_bazel.py"
- ".github/workflows/rusty-v8-release.yml"
- ".github/workflows/v8-canary.yml"
@@ -77,9 +75,7 @@ jobs:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Bazel
uses: ./.github/actions/setup-bazel-ci
with:
target: ${{ matrix.target }}
uses: bazelbuild/setup-bazelisk@6ecf4fd8b7d1f9721785f1dd656a689acf9add47 # v3
- name: Set up Python
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6
@@ -105,7 +101,6 @@ jobs:
bazel_args=(
build
"--platforms=@llvm//platforms:${PLATFORM}"
--config=v8-release-compat
"${pair_target}"
"${extra_targets[@]}"
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD)
@@ -128,7 +123,6 @@ jobs:
python3 .github/scripts/rusty_v8_bazel.py stage-release-pair \
--platform "${PLATFORM}" \
--target "${TARGET}" \
--bazel-config v8-release-compat \
--output-dir "dist/${TARGET}"
- name: Upload staged musl artifacts

2
.gitignore vendored
View File

@@ -52,7 +52,6 @@ yarn-error.log*
# env
.env*
!.env.example
.venv/
# package
*.tgz
@@ -92,3 +91,4 @@ CHANGELOG.ignore.md
# Python bytecode files
__pycache__/
*.pyc

View File

@@ -19,17 +19,9 @@ In the codex-rs folder where the rust code lives:
- You can run `just argument-comment-lint` to run the lint check locally. This is powered by Bazel, so running it the first time can be slow if Bazel is not warmed up, though incremental invocations should take <15s. Most of the time, it is best to update the PR and let CI take responsibility for checking this (or run it asynchronously in the background after submitting the PR). Note CI checks all three platforms, which the local run does not.
- When possible, make `match` statements exhaustive and avoid wildcard arms.
- Newly added traits should include doc comments that explain their role and how implementations are expected to use them.
- Discourage both `#[async_trait]` and `#[allow(async_fn_in_trait)]` in Rust traits.
- Prefer native RPITIT trait methods with explicit `Send` bounds on the returned future, as in `3c7f013f9735` / `#16630`.
- Preferred trait shape:
`fn foo(&self, ...) -> impl std::future::Future<Output = T> + Send;`
- Implementations may still use `async fn foo(&self, ...) -> T` when they satisfy that contract.
- Do not use `#[allow(async_fn_in_trait)]` as a shortcut around spelling the future contract explicitly.
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
- Prefer private modules and explicitly exported public crate API.
- If you change `ConfigToml` or nested config types, run `just write-config-schema` to update `codex-rs/core/config.schema.json`.
- When working with MCP tool calls, prefer using `codex-rs/codex-mcp/src/mcp_connection_manager.rs` to handle mutation of tools and tool calls. Aim to minimize the footprint of changes and leverage existing abstractions rather than plumbing code through multiple levels of function calls.
- If you change Rust dependencies (`Cargo.toml` or `Cargo.lock`), run `just bazel-lock-update` from the
repo root to refresh `MODULE.bazel.lock`, and include that lockfile update in the same change.
- After dependency changes, run `just bazel-lock-check` from the repo root so lockfile drift is caught
@@ -50,8 +42,6 @@ In the codex-rs folder where the rust code lives:
`codex-rs/tui/src/bottom_pane/mod.rs`, and similarly central orchestration modules.
- When extracting code from a large module, move the related tests and module/type docs toward
the new implementation so the invariants stay close to the code that owns them.
- Avoid adding new standalone methods to `codex-rs/tui/src/chatwidget.rs` unless the change is
trivial; prefer new modules/files and keep `chatwidget.rs` focused on orchestration.
- When running Rust commands (e.g. `just fix` or `cargo test`) be patient with the command and never try to kill them using the PID. Rust lock can make the execution slow, this is expected.
Run `just fmt` (in `codex-rs` directory) automatically after you have finished making Rust code changes; do not ask for approval to run it. Additionally, run the tests:

View File

@@ -30,40 +30,6 @@ platform(
parents = ["@platforms//host"],
)
platform(
name = "windows_x86_64_gnullvm",
constraint_values = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
],
)
platform(
name = "windows_x86_64_msvc",
constraint_values = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
],
)
toolchain(
name = "windows_gnullvm_tests_on_msvc_host_toolchain",
exec_compatible_with = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
],
target_compatible_with = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
],
toolchain = "@bazel_tools//tools/test:empty_toolchain",
toolchain_type = "@bazel_tools//tools/test:default_test_toolchain_type",
)
alias(
name = "rbe",
actual = "@rbe_platform",

View File

@@ -1,8 +1,8 @@
module(name = "codex")
bazel_dep(name = "bazel_skylib", version = "1.9.0")
bazel_dep(name = "bazel_skylib", version = "1.8.2")
bazel_dep(name = "platforms", version = "1.0.0")
bazel_dep(name = "llvm", version = "0.7.1")
bazel_dep(name = "llvm", version = "0.6.8")
# The upstream LLVM archive contains a few unix-only symlink entries and is
# missing a couple of MinGW compatibility archives that windows-gnullvm needs
# during extraction and linking, so patch it until upstream grows native support.
@@ -40,17 +40,9 @@ osx.frameworks(names = [
"ColorSync",
"CoreFoundation",
"CoreGraphics",
"CoreImage",
"CoreMedia",
"CoreMIDI",
"CoreServices",
"CoreText",
"CoreVideo",
"DiskArbitration",
"AudioToolbox",
"AVFoundation",
"AVFAudio",
"AVRouting",
"CFNetwork",
"FontServices",
"AudioUnit",
@@ -58,19 +50,11 @@ osx.frameworks(names = [
"CoreAudioTypes",
"Foundation",
"ImageIO",
"IOSurface",
"IOKit",
"Kernel",
"Metal",
"MetalKit",
"OpenGL",
"OSLog",
"QuartzCore",
"ScreenCaptureKit",
"Security",
"SystemConfiguration",
"UniformTypeIdentifiers",
"VideoToolbox",
])
use_repo(osx, "macos_sdk")
@@ -78,8 +62,8 @@ use_repo(osx, "macos_sdk")
bazel_dep(name = "apple_support", version = "2.1.0")
bazel_dep(name = "rules_cc", version = "0.2.16")
bazel_dep(name = "rules_platform", version = "0.1.0")
bazel_dep(name = "rules_rs", version = "0.0.58")
# `rules_rs` still does not model `windows-gnullvm` as a distinct Windows exec
bazel_dep(name = "rules_rs", version = "0.0.43")
# `rules_rs` 0.0.43 does not model `windows-gnullvm` as a distinct Windows exec
# platform, so patch it until upstream grows that support for both x86_64 and
# aarch64.
single_version_override(
@@ -87,9 +71,8 @@ single_version_override(
patch_strip = 1,
patches = [
"//patches:rules_rs_windows_gnullvm_exec.patch",
"//patches:rules_rs_windows_exec_linker.patch",
],
version = "0.0.58",
version = "0.0.43",
)
rules_rust = use_extension("@rules_rs//rs/experimental:rules_rust.bzl", "rules_rust")
@@ -101,12 +84,11 @@ rules_rust.patch(
"//patches:rules_rust_windows_gnullvm_build_script.patch",
"//patches:rules_rust_windows_exec_msvc_build_script_env.patch",
"//patches:rules_rust_windows_bootstrap_process_wrapper_linker.patch",
"//patches:rules_rust_windows_build_script_runner_paths.patch",
"//patches:rules_rust_windows_msvc_direct_link_args.patch",
"//patches:rules_rust_windows_process_wrapper_skip_temp_outputs.patch",
"//patches:rules_rust_windows_exec_bin_target.patch",
"//patches:rules_rust_windows_exec_std.patch",
"//patches:rules_rust_windows_exec_rustc_dev_rlib.patch",
"//patches:rules_rust_repository_set_exec_constraints.patch",
],
strip = 1,
)
@@ -206,18 +188,8 @@ bazel_dep(name = "zstd", version = "1.5.7")
crate.annotation(
crate = "zstd-sys",
gen_build_script = "on",
patch_args = ["-p1"],
patches = [
"//patches:zstd-sys_windows_msvc_include_dirs.patch",
],
)
crate.annotation(
crate = "ring",
patch_args = ["-p1"],
patches = [
"//patches:ring_windows_msvc_include_dirs.patch",
],
gen_build_script = "off",
deps = ["@zstd"],
)
crate.annotation(
build_script_env = {
@@ -242,13 +214,6 @@ inject_repo(crate, "zstd")
use_repo(crate, "argument_comment_lint_crates")
bazel_dep(name = "bzip2", version = "1.0.8.bcr.3")
single_version_override(
module_name = "bzip2",
patch_strip = 1,
patches = [
"//patches:bzip2_windows_stack_args.patch",
],
)
crate.annotation(
crate = "bzip2-sys",
@@ -262,30 +227,20 @@ bazel_dep(name = "zlib", version = "1.3.1.bcr.8")
crate.annotation(
crate = "libz-sys",
gen_build_script = "on",
gen_build_script = "off",
deps = ["@zlib"],
)
inject_repo(crate, "zlib")
bazel_dep(name = "xz", version = "5.4.5.bcr.8")
single_version_override(
module_name = "xz",
patch_strip = 1,
patches = [
"//patches:xz_windows_stack_args.patch",
],
)
# TODO(zbarsky): Enable annotation after fixing windows arm64 builds.
crate.annotation(
crate = "lzma-sys",
gen_build_script = "off",
deps = ["@xz//:lzma"],
gen_build_script = "on",
)
bazel_dep(name = "openssl", version = "3.5.4.bcr.0")
inject_repo(crate, "xz")
crate.annotation(
build_script_data = [
"@openssl//:gen_dir",
@@ -327,18 +282,6 @@ crate.annotation(
"RUSTY_V8_SRC_BINDING_PATH": "$(execpath @v8_targets//:rusty_v8_binding_for_target)",
},
crate = "v8",
# Keep the Rust feature aligned with the source-built Bazel artifacts.
# Windows MSVC still consumes upstream non-sandboxed prebuilts.
crate_features_select = {
"aarch64-apple-darwin": ["v8_enable_sandbox"],
"aarch64-pc-windows-gnullvm": ["v8_enable_sandbox"],
"aarch64-unknown-linux-gnu": ["v8_enable_sandbox"],
"aarch64-unknown-linux-musl": ["v8_enable_sandbox"],
"x86_64-apple-darwin": ["v8_enable_sandbox"],
"x86_64-pc-windows-gnullvm": ["v8_enable_sandbox"],
"x86_64-unknown-linux-gnu": ["v8_enable_sandbox"],
"x86_64-unknown-linux-musl": ["v8_enable_sandbox"],
},
gen_build_script = "on",
patch_args = ["-p1"],
patches = [
@@ -371,23 +314,6 @@ crate.annotation(
inject_repo(crate, "llvm", "llvm-project", "macos_sdk")
crate.annotation(
# Provide the hermetic SDK path so the build script doesn't try to invoke an unavailable `xcrun --show-sdk-path`.
build_script_data = [
"@macos_sdk//sysroot",
],
build_script_env = {
"WEBRTC_SYS_DARWIN_SDK_PATH": "$(location @macos_sdk//sysroot)",
"WEBRTC_SYS_LINK_OUT_DIR": "1",
},
crate = "webrtc-sys",
gen_build_script = "on",
patch_args = ["-p1"],
patches = [
"//patches:webrtc-sys_hermetic_darwin_sysroot.patch",
],
)
# Fix readme inclusions
crate.annotation(
crate = "windows-link",
@@ -433,7 +359,6 @@ http_archive(
http_file(
name = "rusty_v8_146_4_0_aarch64_apple_darwin_archive",
downloaded_file_path = "librusty_v8_release_aarch64-apple-darwin.a.gz",
sha256 = "bfe2c9be32a56c28546f0f965825ee68fbf606405f310cc4e17b448a568cf98a",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_aarch64-apple-darwin.a.gz",
],
@@ -442,7 +367,6 @@ http_file(
http_file(
name = "rusty_v8_146_4_0_aarch64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_aarch64-unknown-linux-gnu.a.gz",
sha256 = "dbf165b07c81bdb054bc046b43d23e69fcf7bcc1a4c1b5b4776983a71062ecd8",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_aarch64-unknown-linux-gnu.a.gz",
],
@@ -451,7 +375,6 @@ http_file(
http_file(
name = "rusty_v8_146_4_0_aarch64_pc_windows_msvc_archive",
downloaded_file_path = "rusty_v8_release_aarch64-pc-windows-msvc.lib.gz",
sha256 = "ed13363659c6d08583ac8fdc40493445c5767d8b94955a4d5d7bb8d5a81f6bf8",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/rusty_v8_release_aarch64-pc-windows-msvc.lib.gz",
],
@@ -460,7 +383,6 @@ http_file(
http_file(
name = "rusty_v8_146_4_0_x86_64_apple_darwin_archive",
downloaded_file_path = "librusty_v8_release_x86_64-apple-darwin.a.gz",
sha256 = "630cd240f1bbecdb071417dc18387ab81cf67c549c1c515a0b4fcf9eba647bb7",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_x86_64-apple-darwin.a.gz",
],
@@ -469,7 +391,6 @@ http_file(
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
sha256 = "e64b4d99e4ae293a2e846244a89b80178ba10382c13fb591c1fa6968f5291153",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
],
@@ -478,7 +399,6 @@ http_file(
http_file(
name = "rusty_v8_146_4_0_x86_64_pc_windows_msvc_archive",
downloaded_file_path = "rusty_v8_release_x86_64-pc-windows-msvc.lib.gz",
sha256 = "90a9a2346acd3685a355e98df85c24dbe406cb124367d16259a4b5d522621862",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/rusty_v8_release_x86_64-pc-windows-msvc.lib.gz",
],
@@ -487,7 +407,6 @@ http_file(
http_file(
name = "rusty_v8_146_4_0_aarch64_unknown_linux_musl_archive",
downloaded_file_path = "librusty_v8_release_aarch64-unknown-linux-musl.a.gz",
sha256 = "27a08ed26c34297bfd93e514692ccc44b85f8b15c6aa39cf34e784f84fb37e8e",
urls = [
"https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/librusty_v8_release_aarch64-unknown-linux-musl.a.gz",
],
@@ -496,7 +415,6 @@ http_file(
http_file(
name = "rusty_v8_146_4_0_aarch64_unknown_linux_musl_binding",
downloaded_file_path = "src_binding_release_aarch64-unknown-linux-musl.rs",
sha256 = "09f8900ced8297c229246c7a50b2e0ec23c54d0a554f369619cc29863f38dd1a",
urls = [
"https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/src_binding_release_aarch64-unknown-linux-musl.rs",
],
@@ -505,7 +423,6 @@ http_file(
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-musl.a.gz",
sha256 = "20d8271ad712323d352c1383c36e3c4b755abc41ece35819c49c75ec7134d2f8",
urls = [
"https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/librusty_v8_release_x86_64-unknown-linux-musl.a.gz",
],
@@ -514,7 +431,6 @@ http_file(
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_binding",
downloaded_file_path = "src_binding_release_x86_64-unknown-linux-musl.rs",
sha256 = "09f8900ced8297c229246c7a50b2e0ec23c54d0a554f369619cc29863f38dd1a",
urls = [
"https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/src_binding_release_x86_64-unknown-linux-musl.rs",
],

281
MODULE.bazel.lock generated

File diff suppressed because one or more lines are too long

3
NOTICE
View File

@@ -4,3 +4,6 @@ Copyright 2025 OpenAI
This project includes code derived from [Ratatui](https://github.com/ratatui/ratatui), licensed under the MIT license.
Copyright (c) 2016-2022 Florian Dehau
Copyright (c) 2023-2025 The Ratatui Developers
This project includes Meriyah parser assets from [meriyah](https://github.com/meriyah/meriyah), licensed under the ISC license.
Copyright (c) 2019 and later, KFlash and others.

View File

@@ -46,7 +46,7 @@ Each archive contains a single entry with the platform baked into the name (e.g.
### Using Codex with your ChatGPT plan
Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Business, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt).
Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt).
You can also use Codex with an API key, but this requires [additional setup](https://developers.openai.com/codex/auth#sign-in-with-an-api-key).

View File

@@ -11,7 +11,3 @@ Our security program is managed through Bugcrowd, and we ask that any validated
## Vulnerability Disclosure Program
Our Vulnerability Program Guidelines are defined on our [Bugcrowd program page](https://bugcrowd.com/engagements/openai).
## How to operate CODEX safely
For details on Codex security boundaries, including sandboxing, approvals, and network controls, see [Agent approvals & security](https://developers.openai.com/codex/agent-approvals-security).

View File

@@ -4,14 +4,20 @@
# version_regex matches against the CLI version (env!("CARGO_PKG_VERSION")); omit to apply to all versions.
# target_app specify which app should display the announcement (cli, vsce, ...).
# Test announcement only for local build version until 2027-05-10 excluded
[[announcements]]
content = "Welcome to Codex! Check out the new onboarding flow."
from_date = "2024-10-01"
to_date = "2024-10-15"
target_app = "cli"
# Test announcement only for local build version until 2026-01-10 excluded (past)
[[announcements]]
content = "This is a test announcement"
version_regex = "^0\\.0\\.0$"
to_date = "2027-05-10"
to_date = "2026-05-10"
[[announcements]]
content = "Update Required - This version will no longer be supported starting May 8th. Please upgrade to the latest version (https://github.com/openai/codex/releases/latest) using your preferred package manager."
# Matches 0.x.y versions from 0.0.y through 0.119.y; excludes 0.120.0 and newer.
version_regex = "^0\\.(?:[0-9]|[1-9][0-9]|1[01][0-9])\\."
to_date = "2026-05-08"
content = "**BREAKING NEWS**: `gpt-5.3-codex` is out! Upgrade to `0.98.0` for a faster, smarter, more steerable agent."
from_date = "2026-02-01"
to_date = "2026-02-16"
version_regex = "^0\\.(?:[0-9]|[1-8][0-9]|9[0-7])\\."

1
codex-cli/.dockerignore Normal file
View File

@@ -0,0 +1 @@
node_modules/

59
codex-cli/Dockerfile Normal file
View File

@@ -0,0 +1,59 @@
FROM node:24-slim
ARG TZ
ENV TZ="$TZ"
# Install basic development tools, ca-certificates, and iptables/ipset, then clean up apt cache to reduce image size
RUN apt-get update && apt-get install -y --no-install-recommends \
aggregate \
ca-certificates \
curl \
dnsutils \
fzf \
gh \
git \
gnupg2 \
iproute2 \
ipset \
iptables \
jq \
less \
man-db \
procps \
unzip \
ripgrep \
zsh \
&& rm -rf /var/lib/apt/lists/*
# Ensure default node user has access to /usr/local/share
RUN mkdir -p /usr/local/share/npm-global && \
chown -R node:node /usr/local/share
ARG USERNAME=node
# Set up non-root user
USER node
# Install global packages
ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global
ENV PATH=$PATH:/usr/local/share/npm-global/bin
# Install codex
COPY dist/codex.tgz codex.tgz
RUN npm install -g codex.tgz \
&& npm cache clean --force \
&& rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/node_modules/.cache \
&& rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/tests \
&& rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/docs
# Inside the container we consider the environment already sufficiently locked
# down, therefore instruct Codex CLI to allow running without sandboxing.
ENV CODEX_UNSAFE_ALLOW_NO_SANDBOX=1
# Copy and set up firewall script as root.
USER root
COPY scripts/init_firewall.sh /usr/local/bin/
RUN chmod 500 /usr/local/bin/init_firewall.sh
# Drop back to non-root.
USER node

736
codex-cli/README.md Normal file
View File

@@ -0,0 +1,736 @@
<h1 align="center">OpenAI Codex CLI</h1>
<p align="center">Lightweight coding agent that runs in your terminal</p>
<p align="center"><code>npm i -g @openai/codex</code></p>
> [!IMPORTANT]
> This is the documentation for the _legacy_ TypeScript implementation of the Codex CLI. It has been superseded by the _Rust_ implementation. See the [README in the root of the Codex repository](https://github.com/openai/codex/blob/main/README.md) for details.
![Codex demo GIF using: codex "explain this codebase to me"](../.github/demo.gif)
---
<details>
<summary><strong>Table of contents</strong></summary>
<!-- Begin ToC -->
- [Experimental technology disclaimer](#experimental-technology-disclaimer)
- [Quickstart](#quickstart)
- [Why Codex?](#why-codex)
- [Security model & permissions](#security-model--permissions)
- [Platform sandboxing details](#platform-sandboxing-details)
- [System requirements](#system-requirements)
- [CLI reference](#cli-reference)
- [Memory & project docs](#memory--project-docs)
- [Non-interactive / CI mode](#non-interactive--ci-mode)
- [Tracing / verbose logging](#tracing--verbose-logging)
- [Recipes](#recipes)
- [Installation](#installation)
- [Configuration guide](#configuration-guide)
- [Basic configuration parameters](#basic-configuration-parameters)
- [Custom AI provider configuration](#custom-ai-provider-configuration)
- [History configuration](#history-configuration)
- [Configuration examples](#configuration-examples)
- [Full configuration example](#full-configuration-example)
- [Custom instructions](#custom-instructions)
- [Environment variables setup](#environment-variables-setup)
- [FAQ](#faq)
- [Zero data retention (ZDR) usage](#zero-data-retention-zdr-usage)
- [Codex open source fund](#codex-open-source-fund)
- [Contributing](#contributing)
- [Development workflow](#development-workflow)
- [Git hooks with Husky](#git-hooks-with-husky)
- [Debugging](#debugging)
- [Writing high-impact code changes](#writing-high-impact-code-changes)
- [Opening a pull request](#opening-a-pull-request)
- [Review process](#review-process)
- [Community values](#community-values)
- [Getting help](#getting-help)
- [Contributor license agreement (CLA)](#contributor-license-agreement-cla)
- [Quick fixes](#quick-fixes)
- [Releasing `codex`](#releasing-codex)
- [Alternative build options](#alternative-build-options)
- [Nix flake development](#nix-flake-development)
- [Security & responsible AI](#security--responsible-ai)
- [License](#license)
<!-- End ToC -->
</details>
---
## Experimental technology disclaimer
Codex CLI is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome:
- Bug reports
- Feature requests
- Pull requests
- Good vibes
Help us improve by filing issues or submitting PRs (see the section below for how to contribute)!
## Quickstart
Install globally:
```shell
npm install -g @openai/codex
```
Next, set your OpenAI API key as an environment variable:
```shell
export OPENAI_API_KEY="your-api-key-here"
```
> **Note:** This command sets the key only for your current terminal session. You can add the `export` line to your shell's configuration file (e.g., `~/.zshrc`) but we recommend setting for the session. **Tip:** You can also place your API key into a `.env` file at the root of your project:
>
> ```env
> OPENAI_API_KEY=your-api-key-here
> ```
>
> The CLI will automatically load variables from `.env` (via `dotenv/config`).
<details>
<summary><strong>Use <code>--provider</code> to use other models</strong></summary>
> Codex also allows you to use other providers that support the OpenAI Chat Completions API. You can set the provider in the config file or use the `--provider` flag. The possible options for `--provider` are:
>
> - openai (default)
> - openrouter
> - azure
> - gemini
> - ollama
> - mistral
> - deepseek
> - xai
> - groq
> - arceeai
> - any other provider that is compatible with the OpenAI API
>
> If you use a provider other than OpenAI, you will need to set the API key for the provider in the config file or in the environment variable as:
>
> ```shell
> export <provider>_API_KEY="your-api-key-here"
> ```
>
> If you use a provider not listed above, you must also set the base URL for the provider:
>
> ```shell
> export <provider>_BASE_URL="https://your-provider-api-base-url"
> ```
</details>
<br />
Run interactively:
```shell
codex
```
Or, run with a prompt as input (and optionally in `Full Auto` mode):
```shell
codex "explain this codebase to me"
```
```shell
codex --approval-mode full-auto "create the fanciest todo-list app"
```
That's it - Codex will scaffold a file, run it inside a sandbox, install any
missing dependencies, and show you the live result. Approve the changes and
they'll be committed to your working directory.
---
## Why Codex?
Codex CLI is built for developers who already **live in the terminal** and want
ChatGPT-level reasoning **plus** the power to actually run code, manipulate
files, and iterate - all under version control. In short, it's _chat-driven
development_ that understands and executes your repo.
- **Zero setup** - bring your OpenAI API key and it just works!
- **Full auto-approval, while safe + secure** by running network-disabled and directory-sandboxed
- **Multimodal** - pass in screenshots or diagrams to implement features ✨
And it's **fully open-source** so you can see and contribute to how it develops!
---
## Security model & permissions
Codex lets you decide _how much autonomy_ the agent receives and auto-approval policy via the
`--approval-mode` flag (or the interactive onboarding prompt):
| Mode | What the agent may do without asking | Still requires approval |
| ------------------------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
| **Suggest** <br>(default) | <li>Read any file in the repo | <li>**All** file writes/patches<li> **Any** arbitrary shell commands (aside from reading files) |
| **Auto Edit** | <li>Read **and** apply-patch writes to files | <li>**All** shell commands |
| **Full Auto** | <li>Read/write files <li> Execute shell commands (network disabled, writes limited to your workdir) | - |
In **Full Auto** every command is run **network-disabled** and confined to the
current working directory (plus temporary files) for defense-in-depth. Codex
will also show a warning/confirmation if you start in **auto-edit** or
**full-auto** while the directory is _not_ tracked by Git, so you always have a
safety net.
Coming soon: you'll be able to whitelist specific commands to auto-execute with
the network enabled, once we're confident in additional safeguards.
### Platform sandboxing details
The hardening mechanism Codex uses depends on your OS:
- **macOS 12+** - commands are wrapped with **Apple Seatbelt** (`sandbox-exec`).
- Everything is placed in a read-only jail except for a small set of
writable roots (`$PWD`, `$TMPDIR`, `~/.codex`, etc.).
- Outbound network is _fully blocked_ by default - even if a child process
tries to `curl` somewhere it will fail.
- **Linux** - there is no sandboxing by default.
We recommend using Docker for sandboxing, where Codex launches itself inside a **minimal
container image** and mounts your repo _read/write_ at the same path. A
custom `iptables`/`ipset` firewall script denies all egress except the
OpenAI API. This gives you deterministic, reproducible runs without needing
root on the host. You can use the [`run_in_container.sh`](../codex-cli/scripts/run_in_container.sh) script to set up the sandbox.
---
## System requirements
| Requirement | Details |
| --------------------------- | --------------------------------------------------------------- |
| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
| Node.js | **16 or newer** (Node 20 LTS recommended) |
| Git (optional, recommended) | 2.23+ for built-in PR helpers |
| RAM | 4-GB minimum (8-GB recommended) |
> Never run `sudo npm install -g`; fix npm permissions instead.
---
## CLI reference
| Command | Purpose | Example |
| ------------------------------------ | ----------------------------------- | ------------------------------------ |
| `codex` | Interactive REPL | `codex` |
| `codex "..."` | Initial prompt for interactive REPL | `codex "fix lint errors"` |
| `codex -q "..."` | Non-interactive "quiet mode" | `codex -q --json "explain utils.ts"` |
| `codex completion <bash\|zsh\|fish>` | Print shell completion script | `codex completion bash` |
Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`.
---
## Memory & project docs
You can give Codex extra instructions and guidance using `AGENTS.md` files. Codex looks for `AGENTS.md` files in the following places, and merges them top-down:
1. `~/.codex/AGENTS.md` - personal global guidance
2. `AGENTS.md` at repo root - shared project notes
3. `AGENTS.md` in the current working directory - sub-folder/feature specifics
Disable loading of these files with `--no-project-doc` or the environment variable `CODEX_DISABLE_PROJECT_DOC=1`.
---
## Non-interactive / CI mode
Run Codex head-less in pipelines. Example GitHub Action step:
```yaml
- name: Update changelog via Codex
run: |
npm install -g @openai/codex
export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}"
codex -a auto-edit --quiet "update CHANGELOG for next release"
```
Set `CODEX_QUIET_MODE=1` to silence interactive UI noise.
## Tracing / verbose logging
Setting the environment variable `DEBUG=true` prints full API request and response details:
```shell
DEBUG=true codex
```
---
## Recipes
Below are a few bite-size examples you can copy-paste. Replace the text in quotes with your own task. See the [prompting guide](https://github.com/openai/codex/blob/main/codex-cli/examples/prompting_guide.md) for more tips and usage patterns.
| ✨ | What you type | What happens |
| --- | ------------------------------------------------------------------------------- | -------------------------------------------------------------------------- |
| 1 | `codex "Refactor the Dashboard component to React Hooks"` | Codex rewrites the class component, runs `npm test`, and shows the diff. |
| 2 | `codex "Generate SQL migrations for adding a users table"` | Infers your ORM, creates migration files, and runs them in a sandboxed DB. |
| 3 | `codex "Write unit tests for utils/date.ts"` | Generates tests, executes them, and iterates until they pass. |
| 4 | `codex "Bulk-rename *.jpeg -> *.jpg with git mv"` | Safely renames files and updates imports/usages. |
| 5 | `codex "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step-by-step human explanation. |
| 6 | `codex "Carefully review this repo, and propose 3 high impact well-scoped PRs"` | Suggests impactful PRs in the current codebase. |
| 7 | `codex "Look for vulnerabilities and create a security review report"` | Finds and explains security bugs. |
---
## Installation
<details open>
<summary><strong>From npm (Recommended)</strong></summary>
```bash
npm install -g @openai/codex
# or
yarn global add @openai/codex
# or
bun install -g @openai/codex
# or
pnpm add -g @openai/codex
```
</details>
<details>
<summary><strong>Build from source</strong></summary>
```bash
# Clone the repository and navigate to the CLI package
git clone https://github.com/openai/codex.git
cd codex/codex-cli
# Enable corepack
corepack enable
# Install dependencies and build
pnpm install
pnpm build
# Linux-only: download prebuilt sandboxing binaries (requires gh and zstd).
./scripts/install_native_deps.sh
# Get the usage and the options
node ./dist/cli.js --help
# Run the locally-built CLI directly
node ./dist/cli.js
# Or link the command globally for convenience
pnpm link
```
</details>
---
## Configuration guide
Codex configuration files can be placed in the `~/.codex/` directory, supporting both YAML and JSON formats.
### Basic configuration parameters
| Parameter | Type | Default | Description | Available Options |
| ------------------- | ------- | ---------- | -------------------------------- | ---------------------------------------------------------------------------------------------- |
| `model` | string | `o4-mini` | AI model to use | Any model name supporting OpenAI API |
| `approvalMode` | string | `suggest` | AI assistant's permission mode | `suggest` (suggestions only)<br>`auto-edit` (automatic edits)<br>`full-auto` (fully automatic) |
| `fullAutoErrorMode` | string | `ask-user` | Error handling in full-auto mode | `ask-user` (prompt for user input)<br>`ignore-and-continue` (ignore and proceed) |
| `notify` | boolean | `true` | Enable desktop notifications | `true`/`false` |
### Custom AI provider configuration
In the `providers` object, you can configure multiple AI service providers. Each provider requires the following parameters:
| Parameter | Type | Description | Example |
| --------- | ------ | --------------------------------------- | ----------------------------- |
| `name` | string | Display name of the provider | `"OpenAI"` |
| `baseURL` | string | API service URL | `"https://api.openai.com/v1"` |
| `envKey` | string | Environment variable name (for API key) | `"OPENAI_API_KEY"` |
### History configuration
In the `history` object, you can configure conversation history settings:
| Parameter | Type | Description | Example Value |
| ------------------- | ------- | ------------------------------------------------------ | ------------- |
| `maxSize` | number | Maximum number of history entries to save | `1000` |
| `saveHistory` | boolean | Whether to save history | `true` |
| `sensitivePatterns` | array | Patterns of sensitive information to filter in history | `[]` |
### Configuration examples
1. YAML format (save as `~/.codex/config.yaml`):
```yaml
model: o4-mini
approvalMode: suggest
fullAutoErrorMode: ask-user
notify: true
```
2. JSON format (save as `~/.codex/config.json`):
```json
{
"model": "o4-mini",
"approvalMode": "suggest",
"fullAutoErrorMode": "ask-user",
"notify": true
}
```
### Full configuration example
Below is a comprehensive example of `config.json` with multiple custom providers:
```json
{
"model": "o4-mini",
"provider": "openai",
"providers": {
"openai": {
"name": "OpenAI",
"baseURL": "https://api.openai.com/v1",
"envKey": "OPENAI_API_KEY"
},
"azure": {
"name": "AzureOpenAI",
"baseURL": "https://YOUR_PROJECT_NAME.openai.azure.com/openai",
"envKey": "AZURE_OPENAI_API_KEY"
},
"openrouter": {
"name": "OpenRouter",
"baseURL": "https://openrouter.ai/api/v1",
"envKey": "OPENROUTER_API_KEY"
},
"gemini": {
"name": "Gemini",
"baseURL": "https://generativelanguage.googleapis.com/v1beta/openai",
"envKey": "GEMINI_API_KEY"
},
"ollama": {
"name": "Ollama",
"baseURL": "http://localhost:11434/v1",
"envKey": "OLLAMA_API_KEY"
},
"mistral": {
"name": "Mistral",
"baseURL": "https://api.mistral.ai/v1",
"envKey": "MISTRAL_API_KEY"
},
"deepseek": {
"name": "DeepSeek",
"baseURL": "https://api.deepseek.com",
"envKey": "DEEPSEEK_API_KEY"
},
"xai": {
"name": "xAI",
"baseURL": "https://api.x.ai/v1",
"envKey": "XAI_API_KEY"
},
"groq": {
"name": "Groq",
"baseURL": "https://api.groq.com/openai/v1",
"envKey": "GROQ_API_KEY"
},
"arceeai": {
"name": "ArceeAI",
"baseURL": "https://conductor.arcee.ai/v1",
"envKey": "ARCEEAI_API_KEY"
}
},
"history": {
"maxSize": 1000,
"saveHistory": true,
"sensitivePatterns": []
}
}
```
### Custom instructions
You can create a `~/.codex/AGENTS.md` file to define custom guidance for the agent:
```markdown
- Always respond with emojis
- Only use git commands when explicitly requested
```
### Environment variables setup
For each AI provider, you need to set the corresponding API key in your environment variables. For example:
```bash
# OpenAI
export OPENAI_API_KEY="your-api-key-here"
# Azure OpenAI
export AZURE_OPENAI_API_KEY="your-azure-api-key-here"
export AZURE_OPENAI_API_VERSION="2025-04-01-preview" (Optional)
# OpenRouter
export OPENROUTER_API_KEY="your-openrouter-key-here"
# Similarly for other providers
```
---
## FAQ
<details>
<summary>OpenAI released a model called Codex in 2021 - is this related?</summary>
In 2021, OpenAI released Codex, an AI system designed to generate code from natural language prompts. That original Codex model was deprecated as of March 2023 and is separate from the CLI tool.
</details>
<details>
<summary>Which models are supported?</summary>
Any model available with [Responses API](https://platform.openai.com/docs/api-reference/responses). The default is `o4-mini`, but pass `--model gpt-4.1` or set `model: gpt-4.1` in your config file to override.
</details>
<details>
<summary>Why does <code>o3</code> or <code>o4-mini</code> not work for me?</summary>
It's possible that your [API account needs to be verified](https://help.openai.com/en/articles/10910291-api-organization-verification) in order to start streaming responses and seeing chain of thought summaries from the API. If you're still running into issues, please let us know!
</details>
<details>
<summary>How do I stop Codex from editing my files?</summary>
Codex runs model-generated commands in a sandbox. If a proposed command or file change doesn't look right, you can simply type **n** to deny the command or give the model feedback.
</details>
<details>
<summary>Does it work on Windows?</summary>
Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - Codex is regularly tested on macOS and Linux with Node 20+, and also supports Node 16.
</details>
---
## Zero data retention (ZDR) usage
Codex CLI **does** support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. If your OpenAI organization has Zero Data Retention enabled and you still encounter errors such as:
```
OpenAI rejected the request. Error details: Status: 400, Code: unsupported_parameter, Type: invalid_request_error, Message: 400 Previous response cannot be used for this organization due to Zero Data Retention.
```
You may need to upgrade to a more recent version with: `npm i -g @openai/codex@latest`
---
## Codex open source fund
We're excited to launch a **$1 million initiative** supporting open source projects that use Codex CLI and other OpenAI models.
- Grants are awarded up to **$25,000** API credits.
- Applications are reviewed **on a rolling basis**.
**Interested? [Apply here](https://openai.com/form/codex-open-source-fund/).**
---
## Contributing
This project is under active development and the code will likely change pretty significantly. We'll update this message once that's complete!
More broadly we welcome contributions - whether you are opening your very first pull request or you're a seasoned maintainer. At the same time we care about reliability and long-term maintainability, so the bar for merging code is intentionally **high**. The guidelines below spell out what "high-quality" means in practice and should make the whole process transparent and friendly.
### Development workflow
- Create a _topic branch_ from `main` - e.g. `feat/interactive-prompt`.
- Keep your changes focused. Multiple unrelated fixes should be opened as separate PRs.
- Use `pnpm test:watch` during development for super-fast feedback.
- We use **Vitest** for unit tests, **ESLint** + **Prettier** for style, and **TypeScript** for type-checking.
- Before pushing, run the full test/type/lint suite:
### Git hooks with Husky
This project uses [Husky](https://typicode.github.io/husky/) to enforce code quality checks:
- **Pre-commit hook**: Automatically runs lint-staged to format and lint files before committing
- **Pre-push hook**: Runs tests and type checking before pushing to the remote
These hooks help maintain code quality and prevent pushing code with failing tests. For more details, see [HUSKY.md](./HUSKY.md).
```bash
pnpm test && pnpm run lint && pnpm run typecheck
```
- If you have **not** yet signed the Contributor License Agreement (CLA), add a PR comment containing the exact text
```text
I have read the CLA Document and I hereby sign the CLA
```
The CLA-Assistant bot will turn the PR status green once all authors have signed.
```bash
# Watch mode (tests rerun on change)
pnpm test:watch
# Type-check without emitting files
pnpm typecheck
# Automatically fix lint + prettier issues
pnpm lint:fix
pnpm format:fix
```
### Debugging
To debug the CLI with a visual debugger, do the following in the `codex-cli` folder:
- Run `pnpm run build` to build the CLI, which will generate `cli.js.map` alongside `cli.js` in the `dist` folder.
- Run the CLI with `node --inspect-brk ./dist/cli.js` The program then waits until a debugger is attached before proceeding. Options:
- In VS Code, choose **Debug: Attach to Node Process** from the command palette and choose the option in the dropdown with debug port `9229` (likely the first option)
- Go to <chrome://inspect> in Chrome and find **localhost:9229** and click **trace**
### Writing high-impact code changes
1. **Start with an issue.** Open a new one or comment on an existing discussion so we can agree on the solution before code is written.
2. **Add or update tests.** Every new feature or bug-fix should come with test coverage that fails before your change and passes afterwards. 100% coverage is not required, but aim for meaningful assertions.
3. **Document behaviour.** If your change affects user-facing behaviour, update the README, inline help (`codex --help`), or relevant example projects.
4. **Keep commits atomic.** Each commit should compile and the tests should pass. This makes reviews and potential rollbacks easier.
### Opening a pull request
- Fill in the PR template (or include similar information) - **What? Why? How?**
- Run **all** checks locally (`npm test && npm run lint && npm run typecheck`). CI failures that could have been caught locally slow down the process.
- Make sure your branch is up-to-date with `main` and that you have resolved merge conflicts.
- Mark the PR as **Ready for review** only when you believe it is in a merge-able state.
### Review process
1. One maintainer will be assigned as a primary reviewer.
2. We may ask for changes - please do not take this personally. We value the work, we just also value consistency and long-term maintainability.
3. When there is consensus that the PR meets the bar, a maintainer will squash-and-merge.
### Community values
- **Be kind and inclusive.** Treat others with respect; we follow the [Contributor Covenant](https://www.contributor-covenant.org/).
- **Assume good intent.** Written communication is hard - err on the side of generosity.
- **Teach & learn.** If you spot something confusing, open an issue or PR with improvements.
### Getting help
If you run into problems setting up the project, would like feedback on an idea, or just want to say _hi_ - please open a Discussion or jump into the relevant issue. We are happy to help.
Together we can make Codex CLI an incredible tool. **Happy hacking!** :rocket:
### Contributor license agreement (CLA)
All contributors **must** accept the CLA. The process is lightweight:
1. Open your pull request.
2. Paste the following comment (or reply `recheck` if you've signed before):
```text
I have read the CLA Document and I hereby sign the CLA
```
3. The CLA-Assistant bot records your signature in the repo and marks the status check as passed.
No special Git commands, email attachments, or commit footers required.
#### Quick fixes
| Scenario | Command |
| ----------------- | ------------------------------------------------ |
| Amend last commit | `git commit --amend -s --no-edit && git push -f` |
The **DCO check** blocks merges until every commit in the PR carries the footer (with squash this is just the one).
### Releasing `codex`
To publish a new version of the CLI you first need to stage the npm package. A
helper script in `codex-cli/scripts/` does all the heavy lifting. Inside the
`codex-cli` folder run:
```bash
# Classic, JS implementation that includes small, native binaries for Linux sandboxing.
pnpm stage-release
# Optionally specify the temp directory to reuse between runs.
RELEASE_DIR=$(mktemp -d)
pnpm stage-release --tmp "$RELEASE_DIR"
# "Fat" package that additionally bundles the native Rust CLI binaries for
# Linux. End-users can then opt-in at runtime by setting CODEX_RUST=1.
pnpm stage-release --native
```
Go to the folder where the release is staged and verify that it works as intended. If so, run the following from the temp folder:
```
cd "$RELEASE_DIR"
npm publish
```
### Alternative build options
#### Nix flake development
Prerequisite: Nix >= 2.4 with flakes enabled (`experimental-features = nix-command flakes` in `~/.config/nix/nix.conf`).
Enter a Nix development shell:
```bash
# Use either one of the commands according to which implementation you want to work with
nix develop .#codex-cli # For entering codex-cli specific shell
nix develop .#codex-rs # For entering codex-rs specific shell
```
This shell includes Node.js, installs dependencies, builds the CLI, and provides a `codex` command alias.
Build and run the CLI directly:
```bash
# Use either one of the commands according to which implementation you want to work with
nix build .#codex-cli # For building codex-cli
nix build .#codex-rs # For building codex-rs
./result/bin/codex --help
```
Run the CLI via the flake app:
```bash
# Use either one of the commands according to which implementation you want to work with
nix run .#codex-cli # For running codex-cli
nix run .#codex-rs # For running codex-rs
```
Use direnv with flakes
If you have direnv installed, you can use the following `.envrc` to automatically enter the Nix shell when you `cd` into the project directory:
```bash
cd codex-rs
echo "use flake ../flake.nix#codex-cli" >> .envrc && direnv allow
cd codex-cli
echo "use flake ../flake.nix#codex-rs" >> .envrc && direnv allow
```
---
## Security & responsible AI
Have you discovered a vulnerability or have concerns about model output? Please e-mail **security@openai.com** and we will respond promptly.
---
## License
This repository is licensed under the [Apache-2.0 License](LICENSE).

View File

@@ -18,5 +18,5 @@
"url": "git+https://github.com/openai/codex.git",
"directory": "codex-cli"
},
"packageManager": "pnpm@10.33.0+sha512.10568bb4a6afb58c9eb3630da90cc9516417abebd3fabbe6739f0ae795728da1491e9db5a544c76ad8eb7570f5c4bb3d6c637b2cb41bfdcdb47fa823c8649319"
"packageManager": "pnpm@10.29.3+sha512.498e1fb4cca5aa06c1dcf2611e6fafc50972ffe7189998c409e90de74566444298ffe43e6cd2acdc775ba1aa7cc5e092a8b7054c811ba8c5770f84693d33d2dc"
}

View File

@@ -0,0 +1,16 @@
#!/bin/bash
set -euo pipefail
SCRIPT_DIR=$(realpath "$(dirname "$0")")
trap "popd >> /dev/null" EXIT
pushd "$SCRIPT_DIR/.." >> /dev/null || {
echo "Error: Failed to change directory to $SCRIPT_DIR/.."
exit 1
}
pnpm install
pnpm run build
rm -rf ./dist/openai-codex-*.tgz
pnpm pack --pack-destination ./dist
mv ./dist/openai-codex-*.tgz ./dist/codex.tgz
docker build -t codex -f "./Dockerfile" .

View File

@@ -69,8 +69,8 @@ PACKAGE_EXPANSIONS: dict[str, list[str]] = {
PACKAGE_NATIVE_COMPONENTS: dict[str, list[str]] = {
"codex": [],
"codex-linux-x64": ["bwrap", "codex", "rg"],
"codex-linux-arm64": ["bwrap", "codex", "rg"],
"codex-linux-x64": ["codex", "rg"],
"codex-linux-arm64": ["codex", "rg"],
"codex-darwin-x64": ["codex", "rg"],
"codex-darwin-arm64": ["codex", "rg"],
"codex-win32-x64": ["codex", "rg", "codex-windows-sandbox-setup", "codex-command-runner"],
@@ -87,7 +87,6 @@ PACKAGE_TARGET_FILTERS: dict[str, str] = {
PACKAGE_CHOICES = tuple(PACKAGE_NATIVE_COMPONENTS)
COMPONENT_DEST_DIR: dict[str, str] = {
"bwrap": "codex-resources",
"codex": "codex",
"codex-responses-api-proxy": "codex-responses-api-proxy",
"codex-windows-sandbox-setup": "codex",
@@ -138,16 +137,6 @@ def parse_args() -> argparse.Namespace:
type=Path,
help="Directory containing pre-installed native binaries to bundle (vendor root).",
)
parser.add_argument(
"--allow-missing-native-component",
dest="allow_missing_native_components",
action="append",
default=[],
help=(
"Native component that may be absent from --vendor-src. Intended for CI "
"compatibility with older artifact workflows; releases should not use this."
),
)
return parser.parse_args()
@@ -188,7 +177,6 @@ def main() -> int:
staging_dir,
native_components,
target_filter={target_filter} if target_filter else None,
allow_missing_components=set(args.allow_missing_native_components),
)
if release_version:
@@ -377,14 +365,12 @@ def copy_native_binaries(
staging_dir: Path,
components: list[str],
target_filter: set[str] | None = None,
allow_missing_components: set[str] | None = None,
) -> None:
vendor_src = vendor_src.resolve()
if not vendor_src.exists():
raise RuntimeError(f"Vendor source directory not found: {vendor_src}")
components_set = {component for component in components if component in COMPONENT_DEST_DIR}
allow_missing_components = allow_missing_components or set()
if not components_set:
return
@@ -413,8 +399,6 @@ def copy_native_binaries(
src_component_dir = target_dir / dest_dir_name
if not src_component_dir.exists():
if component in allow_missing_components:
continue
raise RuntimeError(
f"Missing native component '{component}' in vendor source: {src_component_dir}"
)

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env python3
"""Install Codex native binaries (Rust CLI, bwrap, and ripgrep helpers)."""
"""Install Codex native binaries (Rust CLI plus ripgrep helpers)."""
import argparse
from contextlib import contextmanager
@@ -42,15 +42,8 @@ class BinaryComponent:
WINDOWS_TARGETS = tuple(target for target in BINARY_TARGETS if "windows" in target)
LINUX_TARGETS = tuple(target for target in BINARY_TARGETS if "linux" in target)
BINARY_COMPONENTS = {
"bwrap": BinaryComponent(
artifact_prefix="bwrap",
dest_dir="codex-resources",
binary_basename="bwrap",
targets=LINUX_TARGETS,
),
"codex": BinaryComponent(
artifact_prefix="codex",
dest_dir="codex",
@@ -142,7 +135,7 @@ def parse_args() -> argparse.Namespace:
choices=tuple(list(BINARY_COMPONENTS) + ["rg"]),
help=(
"Limit installation to the specified components."
" May be repeated. Defaults to bwrap, codex, codex-windows-sandbox-setup,"
" May be repeated. Defaults to codex, codex-windows-sandbox-setup,"
" codex-command-runner, and rg."
),
)
@@ -166,7 +159,6 @@ def main() -> int:
vendor_dir.mkdir(parents=True, exist_ok=True)
components = args.components or [
"bwrap",
"codex",
"codex-windows-sandbox-setup",
"codex-command-runner",

View File

@@ -92,4 +92,4 @@ quoted_args=""
for arg in "$@"; do
quoted_args+=" $(printf '%q' "$arg")"
done
docker exec -it "$CONTAINER_NAME" bash -c "cd \"/app$WORK_DIR\" && codex --sandbox workspace-write --ask-for-approval on-request ${quoted_args}"
docker exec -it "$CONTAINER_NAME" bash -c "cd \"/app$WORK_DIR\" && codex --full-auto ${quoted_args}"

View File

@@ -1,11 +1,6 @@
[advisories]
# Reviewed 2026-04-15. Keep this list in sync with ../deny.toml.
ignore = [
"RUSTSEC-2024-0388", # derivative 2.2.0 via starlark; upstream crate is unmaintained
"RUSTSEC-2025-0057", # fxhash 0.2.1 via starlark_map; upstream crate is unmaintained
"RUSTSEC-2024-0436", # paste 1.0.15 via starlark/ratatui; upstream crate is unmaintained
"RUSTSEC-2024-0320", # yaml-rust via syntect; remove when syntect drops or updates it
"RUSTSEC-2025-0141", # bincode via syntect; remove when syntect drops or updates it
"RUSTSEC-2026-0118", # hickory-proto via rama-dns/rama-tcp; remove when rama updates to hickory 0.26.1 or hickory-net
"RUSTSEC-2026-0119", # hickory-proto via rama-dns/rama-tcp; remove when rama updates to hickory 0.26.1 or hickory-net
]

View File

@@ -8,11 +8,6 @@ max-threads = 1
[test-groups.app_server_integration]
max-threads = 1
[test-groups.core_apply_patch_cli_integration]
max-threads = 1
[test-groups.windows_sandbox_legacy_sessions]
max-threads = 1
[[profile.default.overrides]]
# Do not add new tests here
@@ -34,13 +29,7 @@ filter = 'package(codex-app-server) & kind(test)'
test-group = 'app_server_integration'
[[profile.default.overrides]]
# These tests exercise full Codex turns and apply_patch execution, and they are
# sensitive to Windows runner process-startup stalls when many cases launch at once.
filter = 'package(codex-core) & kind(test) & test(apply_patch_cli)'
test-group = 'core_apply_patch_cli_integration'
[[profile.default.overrides]]
# These tests create restricted-token Windows child processes and private desktops.
# Serialize them to avoid exhausting Windows session/global desktop resources in CI.
filter = 'package(codex-windows-sandbox) & test(legacy_)'
test-group = 'windows_sandbox_legacy_sessions'
# Schema fixture generation can take longer than the default timeout on slower
# Windows runners when app-server protocol fixture sets grow.
filter = 'test(schema_fixtures_match_generated)'
slow-timeout = { period = "1m", terminate-after = 2 }

View File

@@ -17,7 +17,7 @@ jobs:
working-directory: codex-rs
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
- uses: dtolnay/rust-toolchain@stable
- name: Install cargo-audit
uses: taiki-e/install-action@v2
with:

View File

@@ -1,5 +1,6 @@
exports_files([
"clippy.toml",
"node-version.txt",
])
filegroup(

3081
codex-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +1,10 @@
[workspace]
members = [
"aws-auth",
"analytics",
"agent-graph-store",
"agent-identity",
"backend-client",
"builtin-mcps",
"bwrap",
"ansi-escape",
"async-utils",
"app-server",
"app-server-transport",
"app-server-client",
"app-server-protocol",
"app-server-test-client",
@@ -19,7 +13,6 @@ members = [
"arg0",
"feedback",
"features",
"install-context",
"codex-backend-openapi-models",
"code-mode",
"cloud-requirements",
@@ -27,26 +20,21 @@ members = [
"cloud-tasks-client",
"cloud-tasks-mock-client",
"cli",
"collaboration-mode-templates",
"connectors",
"config",
"device-key",
"config-schema",
"shell-command",
"shell-escalation",
"skills",
"core",
"core-api",
"core-plugins",
"core-skills",
"hooks",
"instructions",
"secrets",
"exec",
"file-system",
"exec-server",
"execpolicy",
"execpolicy-legacy",
"external-agent-migration",
"external-agent-sessions",
"keyring-store",
"file-search",
"linux-sandbox",
@@ -54,21 +42,13 @@ members = [
"login",
"codex-mcp",
"mcp-server",
"memories/mcp",
"memories/read",
"memories/write",
"model-provider-info",
"models-manager",
"network-proxy",
"ollama",
"process-hardening",
"protocol",
"realtime-webrtc",
"rollout",
"rollout-trace",
"rmcp-client",
"responses-api-proxy",
"response-debug-context",
"sandboxing",
"stdio-to-uds",
"otel",
@@ -102,13 +82,8 @@ members = [
"codex-api",
"state",
"terminal-detection",
"test-binary-support",
"thread-manager-sample",
"thread-store",
"uds",
"codex-experimental-api-macros",
"plugin",
"model-provider",
]
resolver = "2"
@@ -125,13 +100,9 @@ license = "Apache-2.0"
# Internal
app_test_support = { path = "app-server/tests/common" }
codex-analytics = { path = "analytics" }
codex-agent-graph-store = { path = "agent-graph-store" }
codex-agent-identity = { path = "agent-identity" }
codex-ansi-escape = { path = "ansi-escape" }
codex-api = { path = "codex-api" }
codex-aws-auth = { path = "aws-auth" }
codex-app-server = { path = "app-server" }
codex-app-server-transport = { path = "app-server-transport" }
codex-app-server-client = { path = "app-server-client" }
codex-app-server-protocol = { path = "app-server-protocol" }
codex-app-server-test-client = { path = "app-server-test-client" }
@@ -139,60 +110,43 @@ codex-apply-patch = { path = "apply-patch" }
codex-arg0 = { path = "arg0" }
codex-async-utils = { path = "async-utils" }
codex-backend-client = { path = "backend-client" }
codex-builtin-mcps = { path = "builtin-mcps" }
codex-chatgpt = { path = "chatgpt" }
codex-cli = { path = "cli" }
codex-client = { path = "codex-client" }
codex-collaboration-mode-templates = { path = "collaboration-mode-templates" }
codex-cloud-requirements = { path = "cloud-requirements" }
codex-cloud-tasks-client = { path = "cloud-tasks-client" }
codex-cloud-tasks-mock-client = { path = "cloud-tasks-mock-client" }
codex-code-mode = { path = "code-mode" }
codex-config = { path = "config" }
codex-config-schema = { path = "config-schema" }
codex-connectors = { path = "connectors" }
codex-core = { path = "core" }
codex-core-api = { path = "core-api" }
codex-core-plugins = { path = "core-plugins" }
codex-core-skills = { path = "core-skills" }
codex-device-key = { path = "device-key" }
codex-exec = { path = "exec" }
codex-file-system = { path = "file-system" }
codex-exec-server = { path = "exec-server" }
codex-execpolicy = { path = "execpolicy" }
codex-external-agent-migration = { path = "external-agent-migration" }
codex-external-agent-sessions = { path = "external-agent-sessions" }
codex-experimental-api-macros = { path = "codex-experimental-api-macros" }
codex-features = { path = "features" }
codex-feedback = { path = "feedback" }
codex-install-context = { path = "install-context" }
codex-file-search = { path = "file-search" }
codex-git-utils = { path = "git-utils" }
codex-hooks = { path = "hooks" }
codex-instructions = { path = "instructions" }
codex-keyring-store = { path = "keyring-store" }
codex-linux-sandbox = { path = "linux-sandbox" }
codex-lmstudio = { path = "lmstudio" }
codex-login = { path = "login" }
codex-message-history = { path = "message-history" }
codex-memories-mcp = { path = "memories/mcp" }
codex-memories-read = { path = "memories/read" }
codex-memories-write = { path = "memories/write" }
codex-mcp = { path = "codex-mcp" }
codex-mcp-server = { path = "mcp-server" }
codex-model-provider-info = { path = "model-provider-info" }
codex-models-manager = { path = "models-manager" }
codex-network-proxy = { path = "network-proxy" }
codex-ollama = { path = "ollama" }
codex-otel = { path = "otel" }
codex-plugin = { path = "plugin" }
codex-model-provider = { path = "model-provider" }
codex-process-hardening = { path = "process-hardening" }
codex-protocol = { path = "protocol" }
codex-realtime-webrtc = { path = "realtime-webrtc" }
codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-response-debug-context = { path = "response-debug-context" }
codex-rmcp-client = { path = "rmcp-client" }
codex-rollout = { path = "rollout" }
codex-rollout-trace = { path = "rollout-trace" }
codex-sandboxing = { path = "sandboxing" }
codex-secrets = { path = "secrets" }
codex-shell-command = { path = "shell-command" }
@@ -201,11 +155,8 @@ codex-skills = { path = "skills" }
codex-state = { path = "state" }
codex-stdio-to-uds = { path = "stdio-to-uds" }
codex-terminal-detection = { path = "terminal-detection" }
codex-test-binary-support = { path = "test-binary-support" }
codex-thread-store = { path = "thread-store" }
codex-tools = { path = "tools" }
codex-tui = { path = "tui" }
codex-uds = { path = "uds" }
codex-utils-absolute-path = { path = "utils/absolute-path" }
codex-utils-approval-presets = { path = "utils/approval-presets" }
codex-utils-cache = { path = "utils/cache" }
@@ -243,13 +194,8 @@ arc-swap = "1.9.0"
assert_cmd = "2"
assert_matches = "1.5.0"
async-channel = "2.3.1"
async-io = "2.6.0"
async-stream = "0.3.6"
async-trait = "0.1.89"
aws-config = "1"
aws-credential-types = "1"
aws-sigv4 = "1"
aws-types = "1"
axum = { version = "0.8", default-features = false }
base64 = "0.22.1"
bm25 = "2.3.2"
@@ -261,27 +207,20 @@ clap_complete = "4"
color-eyre = "0.6.3"
constant_time_eq = "0.3.1"
crossbeam-channel = "0.5.15"
crypto_box = { version = "0.9.1", features = ["seal"] }
crossterm = "0.28.1"
csv = "1.3.1"
ctor = "0.6.3"
deno_core_icudata = "0.77.0"
derive_more = "2"
diffy = "0.4.2"
dirs = "6"
dns-lookup = "3.0.1"
dotenvy = "0.15.7"
dunce = "1.0.4"
ed25519-dalek = { version = "2.2.0", features = ["pkcs8"] }
encoding_rs = "0.8.35"
env-flags = "0.1.1"
env_logger = "0.11.9"
eventsource-stream = "0.2.3"
flate2 = "1.1.8"
futures = { version = "0.3", default-features = false }
gethostname = "1.1.0"
gix = { version = "0.81.0", default-features = false, features = ["sha1"] }
glob = "0.3"
globset = "0.4"
hmac = "0.12.1"
http = "1.3.1"
@@ -319,7 +258,6 @@ os_info = "3.12.0"
owo-colors = "4.3.0"
path-absolutize = "3.1.1"
pathdiff = "0.2"
p256 = "0.13.2"
portable-pty = "0.9.0"
predicates = "3"
pretty_assertions = "1.4.1"
@@ -328,13 +266,9 @@ quick-xml = "0.38.4"
rand = "0.9"
ratatui = "0.29.0"
ratatui-macros = "0.6.0"
rcgen = { version = "0.14.7", default-features = false, features = [
"aws_lc_rs",
"pem",
] }
regex = "1.12.3"
regex-lite = "0.1.8"
reqwest = { version = "0.12", features = ["cookies"] }
reqwest = "0.12"
rmcp = { version = "0.15.0", default-features = false }
runfiles = { git = "https://github.com/dzbarsky/rules_rust", rev = "b56cbaa8465e74127f1ea216f813cd377295ad81" }
rustls = { version = "0.23", default-features = false, features = [
@@ -374,7 +308,6 @@ strum_macros = "0.28.0"
supports-color = "3.0.2"
syntect = "5"
sys-locale = "0.3.2"
tar = { version = "=0.4.45", default-features = false }
tempfile = "3.23.0"
test-log = "0.2.19"
textwrap = "0.16.2"
@@ -396,8 +329,6 @@ tracing-appender = "0.2.3"
tracing-opentelemetry = "0.32.0"
tracing-subscriber = "0.3.22"
tracing-test = "0.2.5"
tonic = { version = "0.14.3", default-features = false, features = ["channel", "codegen"] }
tonic-prost = "0.14.3"
tree-sitter = "0.25.10"
tree-sitter-bash = "0.25"
ts-rs = "11"
@@ -413,9 +344,7 @@ vt100 = "0.16.2"
walkdir = "2.5.0"
webbrowser = "1.0"
which = "8"
whoami = "1.6.1"
wildmatch = "2.6.1"
winapi-util = "0.1.11"
zip = "2.4.2"
zstd = "0.13"
@@ -426,8 +355,6 @@ zeroize = "1.8.2"
rust = {}
[workspace.lints.clippy]
await_holding_invalid_type = "deny"
await_holding_lock = "deny"
expect_used = "deny"
identity_op = "deny"
manual_clamp = "deny"
@@ -466,8 +393,6 @@ unwrap_used = "deny"
# silence the false positive here instead of deleting a real dependency.
[workspace.metadata.cargo-shear]
ignored = [
"codex-agent-graph-store",
"codex-memories-mcp",
"icu_provider",
"openssl-sys",
"codex-utils-readiness",
@@ -475,17 +400,6 @@ ignored = [
"codex-v8-poc",
]
[profile.dev]
# Keep line tables/backtraces while avoiding expensive full variable debug info
# across local dev builds.
debug = 1
[profile.dev-small]
inherits = "dev"
opt-level = 0
debug = 0
strip = true
[profile.release]
lto = "fat"
split-debuginfo = "off"
@@ -504,8 +418,8 @@ opt-level = 0
[patch.crates-io]
# Uncomment to debug local changes.
# ratatui = { path = "../../ratatui" }
crossterm = { git = "https://github.com/nornagon/crossterm", rev = "87db8bfa6dc99427fd3b071681b07fc31c6ce995" }
ratatui = { git = "https://github.com/nornagon/ratatui", rev = "9b2ad1298408c45918ee9f8241a6f95498cdbed2" }
crossterm = { git = "https://github.com/nornagon/crossterm", branch = "nornagon/color-query" }
ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" }
tokio-tungstenite = { git = "https://github.com/openai-oss-forks/tokio-tungstenite", rev = "132f5b39c862e3a970f731d709608b3e6276d5f6" }
tungstenite = { git = "https://github.com/openai-oss-forks/tungstenite-rs", rev = "9200079d3b54a1ff51072e24d81fd354f085156f" }

View File

@@ -1,6 +1,6 @@
# Codex CLI (Rust Implementation)
We provide Codex CLI as a standalone executable to ensure a zero-dependency install.
We provide Codex CLI as a standalone, native executable to ensure a zero-dependency install.
## Installing Codex
@@ -50,7 +50,8 @@ You can enable notifications by configuring a script that is run whenever the ag
### `codex exec` to run Codex programmatically/non-interactively
To run Codex non-interactively, run `codex exec PROMPT` (you can also pass the prompt via `stdin`) and Codex will work on your task until it decides that it is done and exits. If you provide both a prompt argument and piped stdin, Codex appends stdin as a `<stdin>` block after the prompt so patterns like `echo "my output" | codex exec "Summarize this concisely"` work naturally. Output is printed to the terminal directly. You can set the `RUST_LOG` environment variable to see more about what's going on.
To run Codex non-interactively, run `codex exec PROMPT` (you can also pass the prompt via `stdin`) and Codex will work on your task until it decides that it is done and exits. Output is printed to the terminal directly. You can set the `RUST_LOG` environment variable to see more about what's going on.
Use `codex exec --fork <SESSION_ID> PROMPT` to fork an existing session without launching the interactive picker/UI.
Use `codex exec --ephemeral ...` to run without persisting session rollout files to disk.
### Experimenting with the Codex Sandbox
@@ -59,22 +60,19 @@ To test to see what happens when a command is run under the sandbox provided by
```
# macOS
codex sandbox macos [--log-denials] [COMMAND]...
codex sandbox macos [--full-auto] [--log-denials] [COMMAND]...
# Linux
codex sandbox linux [COMMAND]...
codex sandbox linux [--full-auto] [COMMAND]...
# Windows
codex sandbox windows [COMMAND]...
codex sandbox windows [--full-auto] [COMMAND]...
# Legacy aliases
codex debug seatbelt [--log-denials] [COMMAND]...
codex debug landlock [COMMAND]...
codex debug seatbelt [--full-auto] [--log-denials] [COMMAND]...
codex debug landlock [--full-auto] [COMMAND]...
```
To try a writable legacy sandbox mode with these commands, pass an explicit config override such
as `-c 'sandbox_mode="workspace-write"'`.
### Selecting a sandbox policy via `--sandbox`
The Rust CLI exposes a dedicated `--sandbox` (`-s`) flag that lets you pick the sandbox policy **without** having to reach for the generic `-c/--config` option:
@@ -97,7 +95,7 @@ In `workspace-write`, Codex also includes `~/.codex/memories` in its writable ro
This folder is the root of a Cargo workspace. It contains quite a bit of experimental code, but here are the key crates:
- [`core/`](./core) contains the business logic for Codex. Ultimately, we hope this becomes a library crate that is generally useful for building other Rust/native applications that use Codex.
- [`core/`](./core) contains the business logic for Codex. Ultimately, we hope this to be a library crate that is generally useful for building other Rust/native applications that use Codex.
- [`exec/`](./exec) "headless" CLI for use in automation.
- [`tui/`](./tui) CLI that launches a fullscreen TUI built with [Ratatui](https://ratatui.rs/).
- [`cli/`](./cli) CLI multitool that provides the aforementioned CLIs via subcommands.

View File

@@ -1,6 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "agent-graph-store",
crate_name = "codex_agent_graph_store",
)

View File

@@ -1,25 +0,0 @@
[package]
edition.workspace = true
license.workspace = true
name = "codex-agent-graph-store"
version.workspace = true
[lib]
name = "codex_agent_graph_store"
path = "src/lib.rs"
[lints]
workspace = true
[dependencies]
async-trait = { workspace = true }
codex-protocol = { workspace = true }
codex-state = { workspace = true }
serde = { workspace = true, features = ["derive"] }
thiserror = { workspace = true }
[dev-dependencies]
pretty_assertions = { workspace = true }
serde_json = { workspace = true }
tempfile = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }

View File

@@ -1,20 +0,0 @@
/// Result type returned by agent graph store operations.
pub type AgentGraphStoreResult<T> = Result<T, AgentGraphStoreError>;
/// Error type shared by agent graph store implementations.
#[derive(Debug, thiserror::Error)]
pub enum AgentGraphStoreError {
/// The caller supplied invalid request data.
#[error("invalid agent graph store request: {message}")]
InvalidRequest {
/// User-facing explanation of the invalid request.
message: String,
},
/// Catch-all for implementation failures that do not fit a more specific category.
#[error("agent graph store internal error: {message}")]
Internal {
/// User-facing explanation of the implementation failure.
message: String,
},
}

View File

@@ -1,12 +0,0 @@
//! Storage-neutral parent/child topology for thread-spawned agents.
mod error;
mod local;
mod store;
mod types;
pub use error::AgentGraphStoreError;
pub use error::AgentGraphStoreResult;
pub use local::LocalAgentGraphStore;
pub use store::AgentGraphStore;
pub use types::ThreadSpawnEdgeStatus;

View File

@@ -1,325 +0,0 @@
use async_trait::async_trait;
use codex_protocol::ThreadId;
use codex_state::StateRuntime;
use std::sync::Arc;
use crate::AgentGraphStore;
use crate::AgentGraphStoreError;
use crate::AgentGraphStoreResult;
use crate::ThreadSpawnEdgeStatus;
/// SQLite-backed implementation of [`AgentGraphStore`] using an existing state runtime.
#[derive(Clone)]
pub struct LocalAgentGraphStore {
state_db: Arc<StateRuntime>,
}
impl std::fmt::Debug for LocalAgentGraphStore {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("LocalAgentGraphStore")
.field("codex_home", &self.state_db.codex_home())
.finish_non_exhaustive()
}
}
impl LocalAgentGraphStore {
/// Create a local graph store from an already-initialized state runtime.
pub fn new(state_db: Arc<StateRuntime>) -> Self {
Self { state_db }
}
}
#[async_trait]
impl AgentGraphStore for LocalAgentGraphStore {
async fn upsert_thread_spawn_edge(
&self,
parent_thread_id: ThreadId,
child_thread_id: ThreadId,
status: ThreadSpawnEdgeStatus,
) -> AgentGraphStoreResult<()> {
self.state_db
.upsert_thread_spawn_edge(parent_thread_id, child_thread_id, to_state_status(status))
.await
.map_err(internal_error)
}
async fn set_thread_spawn_edge_status(
&self,
child_thread_id: ThreadId,
status: ThreadSpawnEdgeStatus,
) -> AgentGraphStoreResult<()> {
self.state_db
.set_thread_spawn_edge_status(child_thread_id, to_state_status(status))
.await
.map_err(internal_error)
}
async fn list_thread_spawn_children(
&self,
parent_thread_id: ThreadId,
status_filter: Option<ThreadSpawnEdgeStatus>,
) -> AgentGraphStoreResult<Vec<ThreadId>> {
if let Some(status) = status_filter {
return self
.state_db
.list_thread_spawn_children_with_status(parent_thread_id, to_state_status(status))
.await
.map_err(internal_error);
}
self.state_db
.list_thread_spawn_children(parent_thread_id)
.await
.map_err(internal_error)
}
async fn list_thread_spawn_descendants(
&self,
root_thread_id: ThreadId,
status_filter: Option<ThreadSpawnEdgeStatus>,
) -> AgentGraphStoreResult<Vec<ThreadId>> {
match status_filter {
Some(status) => self
.state_db
.list_thread_spawn_descendants_with_status(root_thread_id, to_state_status(status))
.await
.map_err(internal_error),
None => self
.state_db
.list_thread_spawn_descendants(root_thread_id)
.await
.map_err(internal_error),
}
}
}
fn to_state_status(status: ThreadSpawnEdgeStatus) -> codex_state::DirectionalThreadSpawnEdgeStatus {
match status {
ThreadSpawnEdgeStatus::Open => codex_state::DirectionalThreadSpawnEdgeStatus::Open,
ThreadSpawnEdgeStatus::Closed => codex_state::DirectionalThreadSpawnEdgeStatus::Closed,
}
}
fn internal_error(err: impl std::fmt::Display) -> AgentGraphStoreError {
AgentGraphStoreError::Internal {
message: err.to_string(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use codex_state::DirectionalThreadSpawnEdgeStatus;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
struct TestRuntime {
state_db: Arc<StateRuntime>,
_codex_home: TempDir,
}
fn thread_id(suffix: u128) -> ThreadId {
ThreadId::from_string(&format!("00000000-0000-0000-0000-{suffix:012}"))
.expect("valid thread id")
}
async fn state_runtime() -> TestRuntime {
let codex_home = TempDir::new().expect("tempdir should be created");
let state_db =
StateRuntime::init(codex_home.path().to_path_buf(), "test-provider".to_string())
.await
.expect("state db should initialize");
TestRuntime {
state_db,
_codex_home: codex_home,
}
}
#[tokio::test]
async fn local_store_upserts_and_lists_direct_children_with_status_filters() {
let fixture = state_runtime().await;
let state_db = fixture.state_db;
let store = LocalAgentGraphStore::new(state_db.clone());
let parent_thread_id = thread_id(/*suffix*/ 1);
let first_child_thread_id = thread_id(/*suffix*/ 2);
let second_child_thread_id = thread_id(/*suffix*/ 3);
store
.upsert_thread_spawn_edge(
parent_thread_id,
second_child_thread_id,
ThreadSpawnEdgeStatus::Closed,
)
.await
.expect("closed child edge should insert");
store
.upsert_thread_spawn_edge(
parent_thread_id,
first_child_thread_id,
ThreadSpawnEdgeStatus::Open,
)
.await
.expect("open child edge should insert");
let all_children = store
.list_thread_spawn_children(parent_thread_id, /*status_filter*/ None)
.await
.expect("all children should load");
assert_eq!(
all_children,
vec![first_child_thread_id, second_child_thread_id]
);
let open_children = store
.list_thread_spawn_children(parent_thread_id, Some(ThreadSpawnEdgeStatus::Open))
.await
.expect("open children should load");
let state_open_children = state_db
.list_thread_spawn_children_with_status(
parent_thread_id,
DirectionalThreadSpawnEdgeStatus::Open,
)
.await
.expect("state open children should load");
assert_eq!(open_children, state_open_children);
assert_eq!(open_children, vec![first_child_thread_id]);
let closed_children = store
.list_thread_spawn_children(parent_thread_id, Some(ThreadSpawnEdgeStatus::Closed))
.await
.expect("closed children should load");
assert_eq!(closed_children, vec![second_child_thread_id]);
}
#[tokio::test]
async fn local_store_updates_edge_status() {
let fixture = state_runtime().await;
let state_db = fixture.state_db;
let store = LocalAgentGraphStore::new(state_db);
let parent_thread_id = thread_id(/*suffix*/ 10);
let child_thread_id = thread_id(/*suffix*/ 11);
store
.upsert_thread_spawn_edge(
parent_thread_id,
child_thread_id,
ThreadSpawnEdgeStatus::Open,
)
.await
.expect("child edge should insert");
store
.set_thread_spawn_edge_status(child_thread_id, ThreadSpawnEdgeStatus::Closed)
.await
.expect("child edge should close");
let open_children = store
.list_thread_spawn_children(parent_thread_id, Some(ThreadSpawnEdgeStatus::Open))
.await
.expect("open children should load");
assert_eq!(open_children, Vec::<ThreadId>::new());
let closed_children = store
.list_thread_spawn_children(parent_thread_id, Some(ThreadSpawnEdgeStatus::Closed))
.await
.expect("closed children should load");
assert_eq!(closed_children, vec![child_thread_id]);
}
#[tokio::test]
async fn local_store_lists_descendants_breadth_first_with_status_filters() {
let fixture = state_runtime().await;
let state_db = fixture.state_db;
let store = LocalAgentGraphStore::new(state_db.clone());
let root_thread_id = thread_id(/*suffix*/ 20);
let later_child_thread_id = thread_id(/*suffix*/ 22);
let earlier_child_thread_id = thread_id(/*suffix*/ 21);
let closed_grandchild_thread_id = thread_id(/*suffix*/ 23);
let open_grandchild_thread_id = thread_id(/*suffix*/ 24);
let closed_child_thread_id = thread_id(/*suffix*/ 25);
let closed_great_grandchild_thread_id = thread_id(/*suffix*/ 26);
for (parent_thread_id, child_thread_id, status) in [
(
root_thread_id,
later_child_thread_id,
ThreadSpawnEdgeStatus::Open,
),
(
root_thread_id,
earlier_child_thread_id,
ThreadSpawnEdgeStatus::Open,
),
(
earlier_child_thread_id,
open_grandchild_thread_id,
ThreadSpawnEdgeStatus::Open,
),
(
later_child_thread_id,
closed_grandchild_thread_id,
ThreadSpawnEdgeStatus::Closed,
),
(
root_thread_id,
closed_child_thread_id,
ThreadSpawnEdgeStatus::Closed,
),
(
closed_child_thread_id,
closed_great_grandchild_thread_id,
ThreadSpawnEdgeStatus::Closed,
),
] {
store
.upsert_thread_spawn_edge(parent_thread_id, child_thread_id, status)
.await
.expect("edge should insert");
}
let all_descendants = store
.list_thread_spawn_descendants(root_thread_id, /*status_filter*/ None)
.await
.expect("all descendants should load");
assert_eq!(
all_descendants,
vec![
earlier_child_thread_id,
later_child_thread_id,
closed_child_thread_id,
closed_grandchild_thread_id,
open_grandchild_thread_id,
closed_great_grandchild_thread_id,
]
);
let open_descendants = store
.list_thread_spawn_descendants(root_thread_id, Some(ThreadSpawnEdgeStatus::Open))
.await
.expect("open descendants should load");
let state_open_descendants = state_db
.list_thread_spawn_descendants_with_status(
root_thread_id,
DirectionalThreadSpawnEdgeStatus::Open,
)
.await
.expect("state open descendants should load");
assert_eq!(open_descendants, state_open_descendants);
assert_eq!(
open_descendants,
vec![
earlier_child_thread_id,
later_child_thread_id,
open_grandchild_thread_id,
]
);
let closed_descendants = store
.list_thread_spawn_descendants(root_thread_id, Some(ThreadSpawnEdgeStatus::Closed))
.await
.expect("closed descendants should load");
assert_eq!(
closed_descendants,
vec![closed_child_thread_id, closed_great_grandchild_thread_id]
);
}
}

View File

@@ -1,55 +0,0 @@
use async_trait::async_trait;
use codex_protocol::ThreadId;
use crate::AgentGraphStoreResult;
use crate::ThreadSpawnEdgeStatus;
/// Storage-neutral boundary for persisted thread-spawn parent/child topology.
///
/// Implementations are expected to return stable ordering for list methods so callers can merge
/// persisted graph state with live in-memory state without introducing nondeterministic output.
#[async_trait]
pub trait AgentGraphStore: Send + Sync {
/// Insert or replace the directional parent/child edge for a spawned thread.
///
/// `child_thread_id` has at most one persisted parent. Re-inserting the same child should
/// update both the parent and status to match the supplied values.
async fn upsert_thread_spawn_edge(
&self,
parent_thread_id: ThreadId,
child_thread_id: ThreadId,
status: ThreadSpawnEdgeStatus,
) -> AgentGraphStoreResult<()>;
/// Update the persisted lifecycle status of a spawned thread's incoming edge.
///
/// Implementations should treat missing children as a successful no-op.
async fn set_thread_spawn_edge_status(
&self,
child_thread_id: ThreadId,
status: ThreadSpawnEdgeStatus,
) -> AgentGraphStoreResult<()>;
/// List direct spawned children of a parent thread.
///
/// When `status_filter` is `Some`, only child edges with that exact status are returned. When
/// it is `None`, all direct child edges are returned regardless of status, including statuses
/// that may be added by a future store implementation.
async fn list_thread_spawn_children(
&self,
parent_thread_id: ThreadId,
status_filter: Option<ThreadSpawnEdgeStatus>,
) -> AgentGraphStoreResult<Vec<ThreadId>>;
/// List spawned descendants breadth-first by depth, then by thread id.
///
/// `status_filter` is applied to every traversed edge, not just to the returned descendants.
/// For example, `Some(Open)` walks only open edges, so descendants under a closed edge are not
/// included even if their own incoming edge is open. `None` walks and returns every persisted
/// edge regardless of status.
async fn list_thread_spawn_descendants(
&self,
root_thread_id: ThreadId,
status_filter: Option<ThreadSpawnEdgeStatus>,
) -> AgentGraphStoreResult<Vec<ThreadId>>;
}

View File

@@ -1,42 +0,0 @@
use serde::Deserialize;
use serde::Serialize;
/// Lifecycle status attached to a directional thread-spawn edge.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ThreadSpawnEdgeStatus {
/// The child thread is still live or resumable as an open spawned agent.
Open,
/// The child thread has been closed from the parent/child graph's perspective.
Closed,
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn thread_spawn_edge_status_serializes_as_snake_case() {
assert_eq!(
serde_json::to_string(&ThreadSpawnEdgeStatus::Open)
.expect("open status should serialize"),
"\"open\""
);
assert_eq!(
serde_json::to_string(&ThreadSpawnEdgeStatus::Closed)
.expect("closed status should serialize"),
"\"closed\""
);
assert_eq!(
serde_json::from_str::<ThreadSpawnEdgeStatus>("\"open\"")
.expect("open status should deserialize"),
ThreadSpawnEdgeStatus::Open
);
assert_eq!(
serde_json::from_str::<ThreadSpawnEdgeStatus>("\"closed\"")
.expect("closed status should deserialize"),
ThreadSpawnEdgeStatus::Closed
);
}
}

View File

@@ -1,6 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "agent-identity",
crate_name = "codex_agent_identity",
)

View File

@@ -1,30 +0,0 @@
[package]
edition.workspace = true
license.workspace = true
name = "codex-agent-identity"
version.workspace = true
[lib]
doctest = false
name = "codex_agent_identity"
path = "src/lib.rs"
[lints]
workspace = true
[dependencies]
anyhow = { workspace = true }
base64 = { workspace = true }
chrono = { workspace = true }
codex-protocol = { workspace = true }
crypto_box = { workspace = true }
ed25519-dalek = { workspace = true }
jsonwebtoken = { workspace = true }
rand = { workspace = true }
reqwest = { workspace = true, features = ["json"] }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
sha2 = { workspace = true }
[dev-dependencies]
pretty_assertions = { workspace = true }

Some files were not shown because too many files have changed in this diff Show More