Compare commits

..

1 Commits

Author SHA1 Message Date
Michael Bolin
0d2ceb199f Release 0.34.0 2025-09-10 14:15:41 -07:00
4670 changed files with 73870 additions and 873629 deletions

View File

@@ -1,4 +0,0 @@
# Without this, Bazel will consider BUILD.bazel files in
# .git/sl/origbackups (which can be populated by Sapling SCM).
.git
codex-rs/target

View File

@@ -1,62 +0,0 @@
common --repo_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
common --repo_env=BAZEL_NO_APPLE_CPP_TOOLCHAIN=1
# Dummy xcode config so we don't need to build xcode_locator in repo rule.
common --xcode_version_config=//:disable_xcode
common --disk_cache=~/.cache/bazel-disk-cache
common --repo_contents_cache=~/.cache/bazel-repo-contents-cache
common --repository_cache=~/.cache/bazel-repo-cache
common --remote_cache_compression
startup --experimental_remote_repo_contents_cache
common --experimental_platform_in_output_dir
# Runfiles strategy rationale: codex-rs/utils/cargo-bin/README.md
common --noenable_runfiles
common --enable_platform_specific_config
common:linux --host_platform=//:local_linux
common:windows --host_platform=//:local_windows
common --@rules_cc//cc/toolchains/args/archiver_flags:use_libtool_on_macos=False
common --@llvm//config:experimental_stub_libgcc_s
# We need to use the sh toolchain on windows so we don't send host bash paths to the linux executor.
common:windows --@rules_rust//rust/settings:experimental_use_sh_toolchain_for_bootstrap_process_wrapper
# TODO(zbarsky): rules_rust doesn't implement this flag properly with remote exec...
# common --@rules_rust//rust/settings:pipelined_compilation
common --incompatible_strict_action_env
# Not ideal, but We need to allow dotslash to be found
common:linux --test_env=PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
common:macos --test_env=PATH=/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
# Pass through some env vars Windows needs to use powershell?
common:windows --test_env=PATH
common:windows --test_env=SYSTEMROOT
common:windows --test_env=COMSPEC
common:windows --test_env=WINDIR
common --test_output=errors
common --bes_results_url=https://app.buildbuddy.io/invocation/
common --bes_backend=grpcs://remote.buildbuddy.io
common --remote_cache=grpcs://remote.buildbuddy.io
common --remote_download_toplevel
common --nobuild_runfile_links
common --remote_timeout=3600
common --noexperimental_throttle_remote_action_building
common --experimental_remote_execution_keepalive
common --grpc_keepalive_time=30s
# This limits both in-flight executions and concurrent downloads. Even with high number
# of jobs execution will still be limited by CPU cores, so this just pays a bit of
# memory in exchange for higher download concurrency.
common --jobs=30
common:remote --extra_execution_platforms=//:rbe
common:remote --remote_executor=grpcs://remote.buildbuddy.io
common:remote --jobs=800
# TODO(team): Evaluate if this actually helps, zbarsky is not sure, everything seems bottlenecked on `core` either way.
# Enable pipelined compilation since we are not bound by local CPU count.
#common:remote --@rules_rust//rust/settings:pipelined_compilation

View File

@@ -1 +0,0 @@
9.0.0

View File

@@ -1,5 +1 @@
iTerm
iTerm2
psuedo
te
TE

View File

@@ -1,6 +1,6 @@
[codespell]
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new,*meriyah.umd.min.js
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl
check-hidden = true
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm,te,TE
ignore-words-list = ratatui,ser

View File

@@ -1,185 +0,0 @@
---
name: babysit-pr
description: Babysit a GitHub pull request after creation by continuously polling CI checks/workflow runs, new review comments, and mergeability state until the PR is ready to merge (or merged/closed). Diagnose failures, retry likely flaky failures up to 3 times, auto-fix/push branch-related issues when appropriate, and stop only when user help is required (for example CI infrastructure issues, exhausted flaky retries, or ambiguous/blocking situations). Use when the user asks Codex to monitor a PR, watch CI, handle review comments, or keep an eye on failures and feedback on an open PR.
---
# PR Babysitter
## Objective
Babysit a PR persistently until one of these terminal outcomes occurs:
- The PR is merged or closed.
- CI is successful, there are no unaddressed review comments surfaced by the watcher, required review approval is not blocking merge, and there are no potential merge conflicts (PR is mergeable / not reporting conflict risk).
- A situation requires user help (for example CI infrastructure issues, repeated flaky failures after retry budget is exhausted, permission problems, or ambiguity that cannot be resolved safely).
Do not stop merely because a single snapshot returns `idle` while checks are still pending.
## Inputs
Accept any of the following:
- No PR argument: infer the PR from the current branch (`--pr auto`)
- PR number
- PR URL
## Core Workflow
1. When the user asks to "monitor"/"watch"/"babysit" a PR, start with the watcher's continuous mode (`--watch`) unless you are intentionally doing a one-shot diagnostic snapshot.
2. Run the watcher script to snapshot PR/CI/review state (or consume each streamed snapshot from `--watch`).
3. Inspect the `actions` list in the JSON response.
4. If `diagnose_ci_failure` is present, inspect failed run logs and classify the failure.
5. If the failure is likely caused by the current branch, patch code locally, commit, and push.
6. If `process_review_comment` is present, inspect surfaced review items and decide whether to address them.
7. If a review item is actionable and correct, patch code locally, commit, and push.
8. If the failure is likely flaky/unrelated and `retry_failed_checks` is present, rerun failed jobs with `--retry-failed-now`.
9. If both actionable review feedback and `retry_failed_checks` are present, prioritize review feedback first; a new commit will retrigger CI, so avoid rerunning flaky checks on the old SHA unless you intentionally defer the review change.
10. On every loop, verify mergeability / merge-conflict status (for example via `gh pr view`) in addition to CI and review state.
11. After any push or rerun action, immediately return to step 1 and continue polling on the updated SHA/state.
12. If you had been using `--watch` before pausing to patch/commit/push, relaunch `--watch` yourself in the same turn immediately after the push (do not wait for the user to re-invoke the skill).
13. Repeat polling until the PR is green + review-clean + mergeable, `stop_pr_closed` appears, or a user-help-required blocker is reached.
14. Maintain terminal/session ownership: while babysitting is active, keep consuming watcher output in the same turn; do not leave a detached `--watch` process running and then end the turn as if monitoring were complete.
## Commands
### One-shot snapshot
```bash
python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr auto --once
```
### Continuous watch (JSONL)
```bash
python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr auto --watch
```
### Trigger flaky retry cycle (only when watcher indicates)
```bash
python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr auto --retry-failed-now
```
### Explicit PR target
```bash
python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr <number-or-url> --once
```
## CI Failure Classification
Use `gh` commands to inspect failed runs before deciding to rerun.
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
- `gh run view <run-id> --log-failed`
Prefer treating failures as branch-related when logs point to changed code (compile/test/lint/typecheck/snapshots/static analysis in touched areas).
Prefer treating failures as flaky/unrelated when logs show transient infra/external issues (timeouts, runner provisioning failures, registry/network outages, GitHub Actions infra errors).
If classification is ambiguous, perform one manual diagnosis attempt before choosing rerun.
Read `.codex/skills/babysit-pr/references/heuristics.md` for a concise checklist.
## Review Comment Handling
The watcher surfaces review items from:
- PR issue comments
- Inline review comments
- Review submissions (COMMENT / APPROVED / CHANGES_REQUESTED)
It intentionally surfaces Codex reviewer bot feedback (for example comments/reviews from `chatgpt-codex-connector[bot]`) in addition to human reviewer feedback. Most unrelated bot noise should still be ignored.
For safety, the watcher only auto-surfaces trusted human review authors (for example repo OWNER/MEMBER/COLLABORATOR, plus the authenticated operator) and approved review bots such as Codex.
On a fresh watcher state file, existing pending review feedback may be surfaced immediately (not only comments that arrive after monitoring starts). This is intentional so already-open review comments are not missed.
When you agree with a comment and it is actionable:
1. Patch code locally.
2. Commit with `codex: address PR review feedback (#<n>)`.
3. Push to the PR head branch.
4. Resume watching on the new SHA immediately (do not stop after reporting the push).
5. If monitoring was running in `--watch` mode, restart `--watch` immediately after the push in the same turn; do not wait for the user to ask again.
If you disagree or the comment is non-actionable/already addressed, record it as handled by continuing the watcher loop (the script de-duplicates surfaced items via state after surfacing them).
If a code review comment/thread is already marked as resolved in GitHub, treat it as non-actionable and safely ignore it unless new unresolved follow-up feedback appears.
## Git Safety Rules
- Work only on the PR head branch.
- Avoid destructive git commands.
- Do not switch branches unless necessary to recover context.
- Before editing, check for unrelated uncommitted changes. If present, stop and ask the user.
- After each successful fix, commit and `git push`, then re-run the watcher.
- If you interrupted a live `--watch` session to make the fix, restart `--watch` immediately after the push in the same turn.
- Do not run multiple concurrent `--watch` processes for the same PR/state file; keep one watcher session active and reuse it until it stops or you intentionally restart it.
- A push is not a terminal outcome; continue the monitoring loop unless a strict stop condition is met.
Commit message defaults:
- `codex: fix CI failure on PR #<n>`
- `codex: address PR review feedback (#<n>)`
## Monitoring Loop Pattern
Use this loop in a live Codex session:
1. Run `--once`.
2. Read `actions`.
3. First check whether the PR is now merged or otherwise closed; if so, report that terminal state and stop polling immediately.
4. Check CI summary, new review items, and mergeability/conflict status.
5. Diagnose CI failures and classify branch-related vs flaky/unrelated.
6. Process actionable review comments before flaky reruns when both are present; if a review fix requires a commit, push it and skip rerunning failed checks on the old SHA.
7. Retry failed checks only when `retry_failed_checks` is present and you are not about to replace the current SHA with a review/CI fix commit.
8. If you pushed a commit or triggered a rerun, report the action briefly and continue polling (do not stop).
9. After a review-fix push, proactively restart continuous monitoring (`--watch`) in the same turn unless a strict stop condition has already been reached.
10. If everything is passing, mergeable, not blocked on required review approval, and there are no unaddressed review items, report success and stop.
11. If blocked on a user-help-required issue (infra outage, exhausted flaky retries, unclear reviewer request, permissions), report the blocker and stop.
12. Otherwise sleep according to the polling cadence below and repeat.
When the user explicitly asks to monitor/watch/babysit a PR, prefer `--watch` so polling continues autonomously in one command. Use repeated `--once` snapshots only for debugging, local testing, or when the user explicitly asks for a one-shot check.
Do not stop to ask the user whether to continue polling; continue autonomously until a strict stop condition is met or the user explicitly interrupts.
Do not hand control back to the user after a review-fix push just because a new SHA was created; restarting the watcher and re-entering the poll loop is part of the same babysitting task.
If a `--watch` process is still running and no strict stop condition has been reached, the babysitting task is still in progress; keep streaming/consuming watcher output instead of ending the turn.
## Polling Cadence
Use adaptive polling and continue monitoring even after CI turns green:
- While CI is not green (pending/running/queued or failing): poll every 1 minute.
- After CI turns green: start at every 1 minute, then back off exponentially when there is no change (for example 1m, 2m, 4m, 8m, 16m, 32m), capping at every 1 hour.
- Reset the green-state polling interval back to 1 minute whenever anything changes (new commit/SHA, check status changes, new review comments, mergeability changes, review decision changes).
- If CI stops being green again (new commit, rerun, or regression): return to 1-minute polling.
- If any poll shows the PR is merged or otherwise closed: stop polling immediately and report the terminal state.
## Stop Conditions (Strict)
Stop only when one of the following is true:
- PR merged or closed (stop as soon as a poll/snapshot confirms this).
- PR is ready to merge: CI succeeded, no surfaced unaddressed review comments, not blocked on required review approval, and no merge conflict risk.
- User intervention is required and Codex cannot safely proceed alone.
Keep polling when:
- `actions` contains only `idle` but checks are still pending.
- CI is still running/queued.
- Review state is quiet but CI is not terminal.
- CI is green but mergeability is unknown/pending.
- CI is green and mergeable, but the PR is still open and you are waiting for possible new review comments or merge-conflict changes per the green-state cadence.
- The PR is green but blocked on review approval (`REVIEW_REQUIRED` / similar); continue polling on the green-state cadence and surface any new review comments without asking for confirmation to keep watching.
## Output Expectations
Provide concise progress updates while monitoring and a final summary that includes:
- During long unchanged monitoring periods, avoid emitting a full update on every poll; summarize only status changes plus occasional heartbeat updates.
- Treat push confirmations, intermediate CI snapshots, and review-action updates as progress updates only; do not emit the final summary or end the babysitting session unless a strict stop condition is met.
- A user request to "monitor" is not satisfied by a couple of sample polls; remain in the loop until a strict stop condition or an explicit user interruption.
- A review-fix commit + push is not a completion event; immediately resume live monitoring (`--watch`) in the same turn and continue reporting progress updates.
- When CI first transitions to all green for the current SHA, emit a one-time celebratory progress update (do not repeat it on every green poll). Preferred style: `🚀 CI is all green! 33/33 passed. Still on watch for review approval.`
- Do not send the final summary while a watcher terminal is still running unless the watcher has emitted/confirmed a strict stop condition; otherwise continue with progress updates.
- Final PR SHA
- CI status summary
- Mergeability / conflict status
- Fixes pushed
- Flaky retry cycles used
- Remaining unresolved failures or review comments
## References
- Heuristics and decision tree: `.codex/skills/babysit-pr/references/heuristics.md`
- GitHub CLI/API details used by the watcher: `.codex/skills/babysit-pr/references/github-api-notes.md`

View File

@@ -1,4 +0,0 @@
interface:
display_name: "PR Babysitter"
short_description: "Watch PR CI, reviews, and merge conflicts"
default_prompt: "Babysit the current PR: monitor CI, reviewer comments, and merge-conflict status (prefer the watchers --watch mode for live monitoring); fix valid issues, push updates, and rerun flaky failures up to 3 times. Keep exactly one watcher session active for the PR (do not leave duplicate --watch terminals running). If you pause monitoring to patch review/CI feedback, restart --watch yourself immediately after the push in the same turn. If a watcher is still running and no strict stop condition has been reached, the task is still in progress: keep consuming watcher output and sending progress updates instead of ending the turn. Continue polling autonomously after any push/rerun until a strict terminal stop condition is reached or the user interrupts."

View File

@@ -1,72 +0,0 @@
# GitHub CLI / API Notes For `babysit-pr`
## Primary commands used
### PR metadata
- `gh pr view --json number,url,state,mergedAt,closedAt,headRefName,headRefOid,headRepository,headRepositoryOwner`
Used to resolve PR number, URL, branch, head SHA, and closed/merged state.
### PR checks summary
- `gh pr checks --json name,state,bucket,link,workflow,event,startedAt,completedAt`
Used to compute pending/failed/passed counts and whether the current CI round is terminal.
### Workflow runs for head SHA
- `gh api repos/{owner}/{repo}/actions/runs -X GET -f head_sha=<sha> -f per_page=100`
Used to discover failed workflow runs and rerunnable run IDs.
### Failed log inspection
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
- `gh run view <run-id> --log-failed`
Used by Codex to classify branch-related vs flaky/unrelated failures.
### Retry failed jobs only
- `gh run rerun <run-id> --failed`
Reruns only failed jobs (and dependencies) for a workflow run.
## Review-related endpoints
- Issue comments on PR:
- `gh api repos/{owner}/{repo}/issues/<pr_number>/comments?per_page=100`
- Inline PR review comments:
- `gh api repos/{owner}/{repo}/pulls/<pr_number>/comments?per_page=100`
- Review submissions:
- `gh api repos/{owner}/{repo}/pulls/<pr_number>/reviews?per_page=100`
## JSON fields consumed by the watcher
### `gh pr view`
- `number`
- `url`
- `state`
- `mergedAt`
- `closedAt`
- `headRefName`
- `headRefOid`
### `gh pr checks`
- `bucket` (`pass`, `fail`, `pending`, `skipping`)
- `state`
- `name`
- `workflow`
- `link`
### Actions runs API (`workflow_runs[]`)
- `id`
- `name`
- `status`
- `conclusion`
- `html_url`
- `head_sha`

View File

@@ -1,58 +0,0 @@
# CI / Review Heuristics
## CI classification checklist
Treat as **branch-related** when logs clearly indicate a regression caused by the PR branch:
- Compile/typecheck/lint failures in files or modules touched by the branch
- Deterministic unit/integration test failures in changed areas
- Snapshot output changes caused by UI/text changes in the branch
- Static analysis violations introduced by the latest push
- Build script/config changes in the PR causing a deterministic failure
Treat as **likely flaky or unrelated** when evidence points to transient or external issues:
- DNS/network/registry timeout errors while fetching dependencies
- Runner image provisioning or startup failures
- GitHub Actions infrastructure/service outages
- Cloud/service rate limits or transient API outages
- Non-deterministic failures in unrelated integration tests with known flake patterns
If uncertain, inspect failed logs once before choosing rerun.
## Decision tree (fix vs rerun vs stop)
1. If PR is merged/closed: stop.
2. If there are failed checks:
- Diagnose first.
- If branch-related: fix locally, commit, push.
- If likely flaky/unrelated and all checks for the current SHA are terminal: rerun failed jobs.
- If checks are still pending: wait.
3. If flaky reruns for the same SHA reach the configured limit (default 3): stop and report persistent failure.
4. Independently, process any new human review comments.
## Review comment agreement criteria
Address the comment when:
- The comment is technically correct.
- The change is actionable in the current branch.
- The requested change does not conflict with the users intent or recent guidance.
- The change can be made safely without unrelated refactors.
Do not auto-fix when:
- The comment is ambiguous and needs clarification.
- The request conflicts with explicit user instructions.
- The proposed change requires product/design decisions the user has not made.
- The codebase is in a dirty/unrelated state that makes safe editing uncertain.
## Stop-and-ask conditions
Stop and ask the user instead of continuing automatically when:
- The local worktree has unrelated uncommitted changes.
- `gh` auth/permissions fail.
- The PR branch cannot be pushed.
- CI failures persist after the flaky retry budget.
- Reviewer feedback requires a product decision or cross-team coordination.

View File

@@ -1,805 +0,0 @@
#!/usr/bin/env python3
"""Watch GitHub PR CI and review activity for Codex PR babysitting workflows."""
import argparse
import json
import os
import re
import subprocess
import sys
import tempfile
import time
from pathlib import Path
from urllib.parse import urlparse
FAILED_RUN_CONCLUSIONS = {
"failure",
"timed_out",
"cancelled",
"action_required",
"startup_failure",
"stale",
}
PENDING_CHECK_STATES = {
"QUEUED",
"IN_PROGRESS",
"PENDING",
"WAITING",
"REQUESTED",
}
REVIEW_BOT_LOGIN_KEYWORDS = {
"codex",
}
TRUSTED_AUTHOR_ASSOCIATIONS = {
"OWNER",
"MEMBER",
"COLLABORATOR",
}
MERGE_BLOCKING_REVIEW_DECISIONS = {
"REVIEW_REQUIRED",
"CHANGES_REQUESTED",
}
MERGE_CONFLICT_OR_BLOCKING_STATES = {
"BLOCKED",
"DIRTY",
"DRAFT",
"UNKNOWN",
}
GREEN_STATE_MAX_POLL_SECONDS = 60 * 60
class GhCommandError(RuntimeError):
pass
def parse_args():
parser = argparse.ArgumentParser(
description=(
"Normalize PR/CI/review state for Codex PR babysitting and optionally "
"trigger flaky reruns."
)
)
parser.add_argument("--pr", default="auto", help="auto, PR number, or PR URL")
parser.add_argument("--repo", help="Optional OWNER/REPO override")
parser.add_argument("--poll-seconds", type=int, default=30, help="Watch poll interval")
parser.add_argument(
"--max-flaky-retries",
type=int,
default=3,
help="Max rerun cycles per head SHA before stop recommendation",
)
parser.add_argument("--state-file", help="Path to state JSON file")
parser.add_argument("--once", action="store_true", help="Emit one snapshot and exit")
parser.add_argument("--watch", action="store_true", help="Continuously emit JSONL snapshots")
parser.add_argument(
"--retry-failed-now",
action="store_true",
help="Rerun failed jobs for current failed workflow runs when policy allows",
)
parser.add_argument(
"--json",
action="store_true",
help="Emit machine-readable output (default behavior for --once and --retry-failed-now)",
)
args = parser.parse_args()
if args.poll_seconds <= 0:
parser.error("--poll-seconds must be > 0")
if args.max_flaky_retries < 0:
parser.error("--max-flaky-retries must be >= 0")
if args.watch and args.retry_failed_now:
parser.error("--watch cannot be combined with --retry-failed-now")
if not args.once and not args.watch and not args.retry_failed_now:
args.once = True
return args
def _format_gh_error(cmd, err):
stdout = (err.stdout or "").strip()
stderr = (err.stderr or "").strip()
parts = [f"GitHub CLI command failed: {' '.join(cmd)}"]
if stdout:
parts.append(f"stdout: {stdout}")
if stderr:
parts.append(f"stderr: {stderr}")
return "\n".join(parts)
def gh_text(args, repo=None):
cmd = ["gh"]
# `gh api` does not accept `-R/--repo` on all gh versions. The watcher's
# API calls use explicit endpoints (e.g. repos/{owner}/{repo}/...), so the
# repo flag is unnecessary there.
if repo and (not args or args[0] != "api"):
cmd.extend(["-R", repo])
cmd.extend(args)
try:
proc = subprocess.run(cmd, check=True, capture_output=True, text=True)
except FileNotFoundError as err:
raise GhCommandError("`gh` command not found") from err
except subprocess.CalledProcessError as err:
raise GhCommandError(_format_gh_error(cmd, err)) from err
return proc.stdout
def gh_json(args, repo=None):
raw = gh_text(args, repo=repo).strip()
if not raw:
return None
try:
return json.loads(raw)
except json.JSONDecodeError as err:
raise GhCommandError(f"Failed to parse JSON from gh output for {' '.join(args)}") from err
def parse_pr_spec(pr_spec):
if pr_spec == "auto":
return {"mode": "auto", "value": None}
if re.fullmatch(r"\d+", pr_spec):
return {"mode": "number", "value": pr_spec}
parsed = urlparse(pr_spec)
if parsed.scheme and parsed.netloc and "/pull/" in parsed.path:
return {"mode": "url", "value": pr_spec}
raise ValueError("--pr must be 'auto', a PR number, or a PR URL")
def pr_view_fields():
return (
"number,url,state,mergedAt,closedAt,headRefName,headRefOid,"
"headRepository,headRepositoryOwner,mergeable,mergeStateStatus,reviewDecision"
)
def checks_fields():
return "name,state,bucket,link,workflow,event,startedAt,completedAt"
def resolve_pr(pr_spec, repo_override=None):
parsed = parse_pr_spec(pr_spec)
cmd = ["pr", "view"]
if parsed["value"] is not None:
cmd.append(parsed["value"])
cmd.extend(["--json", pr_view_fields()])
data = gh_json(cmd, repo=repo_override)
if not isinstance(data, dict):
raise GhCommandError("Unexpected PR payload from `gh pr view`")
pr_url = str(data.get("url") or "")
repo = (
repo_override
or extract_repo_from_pr_url(pr_url)
or extract_repo_from_pr_view(data)
)
if not repo:
raise GhCommandError("Unable to determine OWNER/REPO for the PR")
state = str(data.get("state") or "")
merged = bool(data.get("mergedAt"))
closed = bool(data.get("closedAt")) or state.upper() == "CLOSED"
return {
"number": int(data["number"]),
"url": pr_url,
"repo": repo,
"head_sha": str(data.get("headRefOid") or ""),
"head_branch": str(data.get("headRefName") or ""),
"state": state,
"merged": merged,
"closed": closed,
"mergeable": str(data.get("mergeable") or ""),
"merge_state_status": str(data.get("mergeStateStatus") or ""),
"review_decision": str(data.get("reviewDecision") or ""),
}
def extract_repo_from_pr_view(data):
head_repo = data.get("headRepository")
head_owner = data.get("headRepositoryOwner")
owner = None
name = None
if isinstance(head_owner, dict):
owner = head_owner.get("login") or head_owner.get("name")
elif isinstance(head_owner, str):
owner = head_owner
if isinstance(head_repo, dict):
name = head_repo.get("name")
repo_owner = head_repo.get("owner")
if not owner and isinstance(repo_owner, dict):
owner = repo_owner.get("login") or repo_owner.get("name")
elif isinstance(head_repo, str):
name = head_repo
if owner and name:
return f"{owner}/{name}"
return None
def extract_repo_from_pr_url(pr_url):
parsed = urlparse(pr_url)
parts = [p for p in parsed.path.split("/") if p]
if len(parts) >= 4 and parts[2] == "pull":
return f"{parts[0]}/{parts[1]}"
return None
def load_state(path):
if path.exists():
try:
data = json.loads(path.read_text())
except json.JSONDecodeError as err:
raise RuntimeError(f"State file is not valid JSON: {path}") from err
if not isinstance(data, dict):
raise RuntimeError(f"State file must contain an object: {path}")
return data, False
return {
"pr": {},
"started_at": None,
"last_seen_head_sha": None,
"retries_by_sha": {},
"seen_issue_comment_ids": [],
"seen_review_comment_ids": [],
"seen_review_ids": [],
"last_snapshot_at": None,
}, True
def save_state(path, state):
path.parent.mkdir(parents=True, exist_ok=True)
payload = json.dumps(state, indent=2, sort_keys=True) + "\n"
fd, tmp_name = tempfile.mkstemp(prefix=f"{path.name}.", suffix=".tmp", dir=path.parent)
tmp_path = Path(tmp_name)
try:
with os.fdopen(fd, "w", encoding="utf-8") as tmp_file:
tmp_file.write(payload)
os.replace(tmp_path, path)
except Exception:
try:
tmp_path.unlink(missing_ok=True)
except OSError:
pass
raise
def default_state_file_for(pr):
repo_slug = pr["repo"].replace("/", "-")
return Path(f"/tmp/codex-babysit-pr-{repo_slug}-pr{pr['number']}.json")
def get_pr_checks(pr_spec, repo):
parsed = parse_pr_spec(pr_spec)
cmd = ["pr", "checks"]
if parsed["value"] is not None:
cmd.append(parsed["value"])
cmd.extend(["--json", checks_fields()])
data = gh_json(cmd, repo=repo)
if data is None:
return []
if not isinstance(data, list):
raise GhCommandError("Unexpected payload from `gh pr checks`")
return data
def is_pending_check(check):
bucket = str(check.get("bucket") or "").lower()
state = str(check.get("state") or "").upper()
return bucket == "pending" or state in PENDING_CHECK_STATES
def summarize_checks(checks):
pending_count = 0
failed_count = 0
passed_count = 0
for check in checks:
bucket = str(check.get("bucket") or "").lower()
if is_pending_check(check):
pending_count += 1
if bucket == "fail":
failed_count += 1
if bucket == "pass":
passed_count += 1
return {
"pending_count": pending_count,
"failed_count": failed_count,
"passed_count": passed_count,
"all_terminal": pending_count == 0,
}
def get_workflow_runs_for_sha(repo, head_sha):
endpoint = f"repos/{repo}/actions/runs"
data = gh_json(
["api", endpoint, "-X", "GET", "-f", f"head_sha={head_sha}", "-f", "per_page=100"],
repo=repo,
)
if not isinstance(data, dict):
raise GhCommandError("Unexpected payload from actions runs API")
runs = data.get("workflow_runs") or []
if not isinstance(runs, list):
raise GhCommandError("Expected `workflow_runs` to be a list")
return runs
def failed_runs_from_workflow_runs(runs, head_sha):
failed_runs = []
for run in runs:
if not isinstance(run, dict):
continue
if str(run.get("head_sha") or "") != head_sha:
continue
conclusion = str(run.get("conclusion") or "")
if conclusion not in FAILED_RUN_CONCLUSIONS:
continue
failed_runs.append(
{
"run_id": run.get("id"),
"workflow_name": run.get("name") or run.get("display_title") or "",
"status": str(run.get("status") or ""),
"conclusion": conclusion,
"html_url": str(run.get("html_url") or ""),
}
)
failed_runs.sort(key=lambda item: (str(item.get("workflow_name") or ""), str(item.get("run_id") or "")))
return failed_runs
def get_authenticated_login():
data = gh_json(["api", "user"])
if not isinstance(data, dict) or not data.get("login"):
raise GhCommandError("Unable to determine authenticated GitHub login from `gh api user`")
return str(data["login"])
def comment_endpoints(repo, pr_number):
return {
"issue_comment": f"repos/{repo}/issues/{pr_number}/comments",
"review_comment": f"repos/{repo}/pulls/{pr_number}/comments",
"review": f"repos/{repo}/pulls/{pr_number}/reviews",
}
def gh_api_list_paginated(endpoint, repo=None, per_page=100):
items = []
page = 1
while True:
sep = "&" if "?" in endpoint else "?"
page_endpoint = f"{endpoint}{sep}per_page={per_page}&page={page}"
payload = gh_json(["api", page_endpoint], repo=repo)
if payload is None:
break
if not isinstance(payload, list):
raise GhCommandError(f"Unexpected paginated payload from gh api {endpoint}")
items.extend(payload)
if len(payload) < per_page:
break
page += 1
return items
def normalize_issue_comments(items):
out = []
for item in items:
if not isinstance(item, dict):
continue
out.append(
{
"kind": "issue_comment",
"id": str(item.get("id") or ""),
"author": extract_login(item.get("user")),
"author_association": str(item.get("author_association") or ""),
"created_at": str(item.get("created_at") or ""),
"body": str(item.get("body") or ""),
"path": None,
"line": None,
"url": str(item.get("html_url") or ""),
}
)
return out
def normalize_review_comments(items):
out = []
for item in items:
if not isinstance(item, dict):
continue
line = item.get("line")
if line is None:
line = item.get("original_line")
out.append(
{
"kind": "review_comment",
"id": str(item.get("id") or ""),
"author": extract_login(item.get("user")),
"author_association": str(item.get("author_association") or ""),
"created_at": str(item.get("created_at") or ""),
"body": str(item.get("body") or ""),
"path": item.get("path"),
"line": line,
"url": str(item.get("html_url") or ""),
}
)
return out
def normalize_reviews(items):
out = []
for item in items:
if not isinstance(item, dict):
continue
out.append(
{
"kind": "review",
"id": str(item.get("id") or ""),
"author": extract_login(item.get("user")),
"author_association": str(item.get("author_association") or ""),
"created_at": str(item.get("submitted_at") or item.get("created_at") or ""),
"body": str(item.get("body") or ""),
"path": None,
"line": None,
"url": str(item.get("html_url") or ""),
}
)
return out
def extract_login(user_obj):
if isinstance(user_obj, dict):
return str(user_obj.get("login") or "")
return ""
def is_bot_login(login):
return bool(login) and login.endswith("[bot]")
def is_actionable_review_bot_login(login):
if not is_bot_login(login):
return False
lower_login = login.lower()
return any(keyword in lower_login for keyword in REVIEW_BOT_LOGIN_KEYWORDS)
def is_trusted_human_review_author(item, authenticated_login):
author = str(item.get("author") or "")
if not author:
return False
if authenticated_login and author == authenticated_login:
return True
association = str(item.get("author_association") or "").upper()
return association in TRUSTED_AUTHOR_ASSOCIATIONS
def fetch_new_review_items(pr, state, fresh_state, authenticated_login=None):
repo = pr["repo"]
pr_number = pr["number"]
endpoints = comment_endpoints(repo, pr_number)
issue_payload = gh_api_list_paginated(endpoints["issue_comment"], repo=repo)
review_comment_payload = gh_api_list_paginated(endpoints["review_comment"], repo=repo)
review_payload = gh_api_list_paginated(endpoints["review"], repo=repo)
issue_items = normalize_issue_comments(issue_payload)
review_comment_items = normalize_review_comments(review_comment_payload)
review_items = normalize_reviews(review_payload)
all_items = issue_items + review_comment_items + review_items
seen_issue = {str(x) for x in state.get("seen_issue_comment_ids") or []}
seen_review_comment = {str(x) for x in state.get("seen_review_comment_ids") or []}
seen_review = {str(x) for x in state.get("seen_review_ids") or []}
# On a brand-new state file, surface existing review activity instead of
# silently treating it as seen. This avoids missing already-pending review
# feedback when monitoring starts after comments were posted.
new_items = []
for item in all_items:
item_id = item.get("id")
if not item_id:
continue
author = item.get("author") or ""
if not author:
continue
if is_bot_login(author):
if not is_actionable_review_bot_login(author):
continue
elif not is_trusted_human_review_author(item, authenticated_login):
continue
kind = item["kind"]
if kind == "issue_comment" and item_id in seen_issue:
continue
if kind == "review_comment" and item_id in seen_review_comment:
continue
if kind == "review" and item_id in seen_review:
continue
new_items.append(item)
if kind == "issue_comment":
seen_issue.add(item_id)
elif kind == "review_comment":
seen_review_comment.add(item_id)
elif kind == "review":
seen_review.add(item_id)
new_items.sort(key=lambda item: (item.get("created_at") or "", item.get("kind") or "", item.get("id") or ""))
state["seen_issue_comment_ids"] = sorted(seen_issue)
state["seen_review_comment_ids"] = sorted(seen_review_comment)
state["seen_review_ids"] = sorted(seen_review)
return new_items
def current_retry_count(state, head_sha):
retries = state.get("retries_by_sha") or {}
value = retries.get(head_sha, 0)
try:
return int(value)
except (TypeError, ValueError):
return 0
def set_retry_count(state, head_sha, count):
retries = state.get("retries_by_sha")
if not isinstance(retries, dict):
retries = {}
retries[head_sha] = int(count)
state["retries_by_sha"] = retries
def unique_actions(actions):
out = []
seen = set()
for action in actions:
if action not in seen:
out.append(action)
seen.add(action)
return out
def is_pr_ready_to_merge(pr, checks_summary, new_review_items):
if pr["closed"] or pr["merged"]:
return False
if not checks_summary["all_terminal"]:
return False
if checks_summary["failed_count"] > 0 or checks_summary["pending_count"] > 0:
return False
if new_review_items:
return False
if str(pr.get("mergeable") or "") != "MERGEABLE":
return False
if str(pr.get("merge_state_status") or "") in MERGE_CONFLICT_OR_BLOCKING_STATES:
return False
if str(pr.get("review_decision") or "") in MERGE_BLOCKING_REVIEW_DECISIONS:
return False
return True
def recommend_actions(pr, checks_summary, failed_runs, new_review_items, retries_used, max_retries):
actions = []
if pr["closed"] or pr["merged"]:
if new_review_items:
actions.append("process_review_comment")
actions.append("stop_pr_closed")
return unique_actions(actions)
if is_pr_ready_to_merge(pr, checks_summary, new_review_items):
actions.append("stop_ready_to_merge")
return unique_actions(actions)
if new_review_items:
actions.append("process_review_comment")
has_failed_pr_checks = checks_summary["failed_count"] > 0
if has_failed_pr_checks:
if checks_summary["all_terminal"] and retries_used >= max_retries:
actions.append("stop_exhausted_retries")
else:
actions.append("diagnose_ci_failure")
if checks_summary["all_terminal"] and failed_runs and retries_used < max_retries:
actions.append("retry_failed_checks")
if not actions:
actions.append("idle")
return unique_actions(actions)
def collect_snapshot(args):
pr = resolve_pr(args.pr, repo_override=args.repo)
state_path = Path(args.state_file) if args.state_file else default_state_file_for(pr)
state, fresh_state = load_state(state_path)
if not state.get("started_at"):
state["started_at"] = int(time.time())
# `gh pr checks -R <repo>` requires an explicit PR/branch/url argument.
# After resolving `--pr auto`, reuse the concrete PR number.
checks = get_pr_checks(str(pr["number"]), repo=pr["repo"])
checks_summary = summarize_checks(checks)
workflow_runs = get_workflow_runs_for_sha(pr["repo"], pr["head_sha"])
failed_runs = failed_runs_from_workflow_runs(workflow_runs, pr["head_sha"])
authenticated_login = get_authenticated_login()
new_review_items = fetch_new_review_items(
pr,
state,
fresh_state=fresh_state,
authenticated_login=authenticated_login,
)
retries_used = current_retry_count(state, pr["head_sha"])
actions = recommend_actions(
pr,
checks_summary,
failed_runs,
new_review_items,
retries_used,
args.max_flaky_retries,
)
state["pr"] = {"repo": pr["repo"], "number": pr["number"]}
state["last_seen_head_sha"] = pr["head_sha"]
state["last_snapshot_at"] = int(time.time())
save_state(state_path, state)
snapshot = {
"pr": pr,
"checks": checks_summary,
"failed_runs": failed_runs,
"new_review_items": new_review_items,
"actions": actions,
"retry_state": {
"current_sha_retries_used": retries_used,
"max_flaky_retries": args.max_flaky_retries,
},
}
return snapshot, state_path
def retry_failed_now(args):
snapshot, state_path = collect_snapshot(args)
pr = snapshot["pr"]
checks_summary = snapshot["checks"]
failed_runs = snapshot["failed_runs"]
retries_used = snapshot["retry_state"]["current_sha_retries_used"]
max_retries = snapshot["retry_state"]["max_flaky_retries"]
result = {
"snapshot": snapshot,
"state_file": str(state_path),
"rerun_attempted": False,
"rerun_count": 0,
"rerun_run_ids": [],
"reason": None,
}
if pr["closed"] or pr["merged"]:
result["reason"] = "pr_closed"
return result
if checks_summary["failed_count"] <= 0:
result["reason"] = "no_failed_pr_checks"
return result
if not failed_runs:
result["reason"] = "no_failed_runs"
return result
if not checks_summary["all_terminal"]:
result["reason"] = "checks_still_pending"
return result
if retries_used >= max_retries:
result["reason"] = "retry_budget_exhausted"
return result
for run in failed_runs:
run_id = run.get("run_id")
if run_id in (None, ""):
continue
gh_text(["run", "rerun", str(run_id), "--failed"], repo=pr["repo"])
result["rerun_run_ids"].append(run_id)
if result["rerun_run_ids"]:
state, _ = load_state(state_path)
new_count = current_retry_count(state, pr["head_sha"]) + 1
set_retry_count(state, pr["head_sha"], new_count)
state["last_snapshot_at"] = int(time.time())
save_state(state_path, state)
result["rerun_attempted"] = True
result["rerun_count"] = len(result["rerun_run_ids"])
result["reason"] = "rerun_triggered"
else:
result["reason"] = "failed_runs_missing_ids"
return result
def print_json(obj):
sys.stdout.write(json.dumps(obj, sort_keys=True) + "\n")
sys.stdout.flush()
def print_event(event, payload):
print_json({"event": event, "payload": payload})
def is_ci_green(snapshot):
checks = snapshot.get("checks") or {}
return (
bool(checks.get("all_terminal"))
and int(checks.get("failed_count") or 0) == 0
and int(checks.get("pending_count") or 0) == 0
)
def snapshot_change_key(snapshot):
pr = snapshot.get("pr") or {}
checks = snapshot.get("checks") or {}
review_items = snapshot.get("new_review_items") or []
return (
str(pr.get("head_sha") or ""),
str(pr.get("state") or ""),
str(pr.get("mergeable") or ""),
str(pr.get("merge_state_status") or ""),
str(pr.get("review_decision") or ""),
int(checks.get("passed_count") or 0),
int(checks.get("failed_count") or 0),
int(checks.get("pending_count") or 0),
tuple(
(str(item.get("kind") or ""), str(item.get("id") or ""))
for item in review_items
if isinstance(item, dict)
),
tuple(snapshot.get("actions") or []),
)
def run_watch(args):
poll_seconds = args.poll_seconds
last_change_key = None
while True:
snapshot, state_path = collect_snapshot(args)
print_event(
"snapshot",
{
"snapshot": snapshot,
"state_file": str(state_path),
"next_poll_seconds": poll_seconds,
},
)
actions = set(snapshot.get("actions") or [])
if (
"stop_pr_closed" in actions
or "stop_exhausted_retries" in actions
or "stop_ready_to_merge" in actions
):
print_event("stop", {"actions": snapshot.get("actions"), "pr": snapshot.get("pr")})
return 0
current_change_key = snapshot_change_key(snapshot)
changed = current_change_key != last_change_key
green = is_ci_green(snapshot)
if not green:
poll_seconds = args.poll_seconds
elif changed or last_change_key is None:
poll_seconds = args.poll_seconds
else:
poll_seconds = min(poll_seconds * 2, GREEN_STATE_MAX_POLL_SECONDS)
last_change_key = current_change_key
time.sleep(poll_seconds)
def main():
args = parse_args()
try:
if args.retry_failed_now:
print_json(retry_failed_now(args))
return 0
if args.watch:
return run_watch(args)
snapshot, state_path = collect_snapshot(args)
snapshot["state_file"] = str(state_path)
print_json(snapshot)
return 0
except (GhCommandError, RuntimeError, ValueError) as err:
sys.stderr.write(f"gh_pr_watch.py error: {err}\n")
return 1
except KeyboardInterrupt:
sys.stderr.write("gh_pr_watch.py interrupted\n")
return 130
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,16 +0,0 @@
---
name: remote-tests
description: How to run tests using remote executor.
---
Some codex integration tests support a running against a remote executor.
This means that when CODEX_TEST_REMOTE_ENV environment variable is set they will attempt to start an executor process in a docker container CODEX_TEST_REMOTE_ENV points to and use it in tests.
Docker container is built and initialized via ./scripts/test-remote-env.sh
Currently running remote tests is only supported on Linux, so you need to use a devbox to run them
You can list devboxes via `applied_devbox ls`, pick the one with `codex` in the name.
Connect to devbox via `ssh <devbox_name>`.
Reuse the same checkout of codex in `~/code/codex`. Reset files if needed. Multiple checkouts take longer to build and take up more space.
Check whether the SHA and modified files are in sync between remote and local.

View File

@@ -1,14 +0,0 @@
---
name: test-tui
description: Guide for testing Codex TUI interactively
---
You can start and use Codex TUI to verify changes.
Important notes:
Start interactively.
Always set RUST_LOG="trace" when starting the process.
Pass `-c log_dir=<some_temp_dir>` argument to have logs written to a specific directory to help with debugging.
When sending a test message programmatically, send text first, then send Enter in a separate write (do not send text + Enter in one burst).
Use `just codex` target to run - `just codex -c ...`

View File

@@ -11,7 +11,7 @@ RUN apt-get update && \
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential curl git ca-certificates \
pkg-config libcap-dev clang musl-tools libssl-dev just && \
pkg-config clang musl-tools libssl-dev just && \
rm -rf /var/lib/apt/lists/*
# Ubuntu 24.04 ships with user 'ubuntu' already created with UID 1000.

View File

@@ -1,54 +0,0 @@
name: 🖥️ Codex App Bug
description: Report an issue with the Codex App
labels:
- app
body:
- type: markdown
attributes:
value: |
Before submitting a new issue, please search for existing issues to see if your issue has already been reported.
If it has, please add a 👍 reaction (no need to leave a comment) to the existing issue instead of creating a new one.
- type: input
id: version
attributes:
label: What version of the Codex App are you using (From “About Codex” dialog)?
validations:
required: true
- type: input
id: plan
attributes:
label: What subscription do you have?
validations:
required: true
- type: input
id: platform
attributes:
label: What platform is your computer?
description: |
For macOS and Linux: copy the output of `uname -mprs`
For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console
- type: textarea
id: actual
attributes:
label: What issue are you seeing?
description: Please include the full error messages and prompts with PII redacted. If possible, please provide text instead of a screenshot.
validations:
required: true
- type: textarea
id: steps
attributes:
label: What steps can reproduce the bug?
description: Explain the bug and provide a code snippet that can reproduce it. Please include session id, token limit usage, context window usage if applicable.
validations:
required: true
- type: textarea
id: expected
attributes:
label: What is the expected behavior?
description: If possible, please provide text instead of a screenshot.
- type: textarea
id: notes
attributes:
label: Additional information
description: Is there anything else you think we should know?

56
.github/ISSUE_TEMPLATE/2-bug-report.yml vendored Normal file
View File

@@ -0,0 +1,56 @@
name: 🪲 Bug Report
description: Report an issue that should be fixed
labels:
- bug
- needs triage
body:
- type: markdown
attributes:
value: |
Thank you for submitting a bug report! It helps make Codex better for everyone.
If you need help or support using Codex, and are not reporting a bug, please post on [codex/discussions](https://github.com/openai/codex/discussions), where you can ask questions or engage with others on ideas for how to improve codex.
Make sure you are running the [latest](https://npmjs.com/package/@openai/codex) version of Codex CLI. The bug you are experiencing may already have been fixed.
Please try to include as much information as possible.
- type: input
id: version
attributes:
label: What version of Codex is running?
description: Copy the output of `codex --version`
- type: input
id: model
attributes:
label: Which model were you using?
description: Like `gpt-4.1`, `o4-mini`, `o3`, etc.
- type: input
id: platform
attributes:
label: What platform is your computer?
description: |
For MacOS and Linux: copy the output of `uname -mprs`
For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console
- type: textarea
id: steps
attributes:
label: What steps can reproduce the bug?
description: Explain the bug and provide a code snippet that can reproduce it.
validations:
required: true
- type: textarea
id: expected
attributes:
label: What is the expected behavior?
description: If possible, please provide text instead of a screenshot.
- type: textarea
id: actual
attributes:
label: What do you see instead?
description: If possible, please provide text instead of a screenshot.
- type: textarea
id: notes
attributes:
label: Additional information
description: Is there anything else you think we should know?

View File

@@ -1,70 +0,0 @@
name: 💻 CLI Bug
description: Report an issue in the Codex CLI
labels:
- bug
- needs triage
body:
- type: markdown
attributes:
value: |
Before submitting a new issue, please search for existing issues to see if your issue has already been reported.
If it has, please add a 👍 reaction (no need to leave a comment) to the existing issue instead of creating a new one.
Make sure you are running the [latest](https://npmjs.com/package/@openai/codex) version of Codex CLI. The bug you are experiencing may already have been fixed.
- type: input
id: version
attributes:
label: What version of Codex CLI is running?
description: use `codex --version`
validations:
required: true
- type: input
id: plan
attributes:
label: What subscription do you have?
validations:
required: true
- type: input
id: model
attributes:
label: Which model were you using?
description: Like `gpt-5.2`, `gpt-5.2-codex`, etc.
- type: input
id: platform
attributes:
label: What platform is your computer?
description: |
For macOS and Linux: copy the output of `uname -mprs`
For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console
- type: input
id: terminal
attributes:
label: What terminal emulator and version are you using (if applicable)?
description: Also note any multiplexer in use (screen / tmux / zellij)
description: |
E.g, VSCode, Terminal.app, iTerm2, Ghostty, Windows Terminal (WSL / PowerShell)
- type: textarea
id: actual
attributes:
label: What issue are you seeing?
description: Please include the full error messages and prompts with PII redacted. If possible, please provide text instead of a screenshot.
validations:
required: true
- type: textarea
id: steps
attributes:
label: What steps can reproduce the bug?
description: Explain the bug and provide a code snippet that can reproduce it. Please include thread id if applicable.
validations:
required: true
- type: textarea
id: expected
attributes:
label: What is the expected behavior?
description: If possible, please provide text instead of a screenshot.
- type: textarea
id: notes
attributes:
label: Additional information
description: Is there anything else you think we should know?

View File

@@ -1,37 +0,0 @@
name: 🪲 Other Bug
description: Report an issue in Codex Web, integrations, or other Codex components
labels:
- bug
body:
- type: markdown
attributes:
value: |
Before submitting a new issue, please search for existing issues to see if your issue has already been reported.
If it has, please add a 👍 reaction (no need to leave a comment) to the existing issue instead of creating a new one.
If you need help or support using Codex and are not reporting a bug, please post on [codex/discussions](https://github.com/openai/codex/discussions), where you can ask questions or engage with others on ideas for how to improve codex.
- type: textarea
id: actual
attributes:
label: What issue are you seeing?
description: Please include the full error messages and prompts with PII redacted. If possible, please provide text instead of a screenshot.
validations:
required: true
- type: textarea
id: steps
attributes:
label: What steps can reproduce the bug?
description: Explain the bug and provide a code snippet that can reproduce it.
validations:
required: true
- type: textarea
id: expected
attributes:
label: What is the expected behavior?
description: If possible, please provide text instead of a screenshot.
- type: textarea
id: notes
attributes:
label: Additional information
description: Is there anything else you think we should know?

View File

@@ -2,6 +2,7 @@ name: 🎁 Feature Request
description: Propose a new feature for Codex
labels:
- enhancement
- needs triage
body:
- type: markdown
attributes:
@@ -12,19 +13,17 @@ body:
1. Search existing issues for similar features. If you find one, 👍 it rather than opening a new one.
2. The Codex team will try to balance the varying needs of the community when prioritizing or rejecting new features. Not all features will be accepted. See [Contributing](https://github.com/openai/codex#contributing) for more details.
- type: input
id: variant
attributes:
label: What variant of Codex are you using?
description: (e.g., App, IDE Extension, CLI, Web)
validations:
required: true
- type: textarea
id: feature
attributes:
label: What feature would you like to see?
validations:
required: true
- type: textarea
id: author
attributes:
label: Are you interested in implementing this feature?
description: Please wait for acknowledgement before implementing or opening a PR.
- type: textarea
id: notes
attributes:

View File

@@ -1,7 +1,8 @@
name: 🧑‍💻 IDE Extension Bug
description: Report an issue with the IDE extension
name: 🧑‍💻 VS Code Extension
description: Report an issue with the VS Code extension
labels:
- extension
- needs triage
body:
- type: markdown
attributes:
@@ -12,36 +13,19 @@ body:
- type: input
id: version
attributes:
label: What version of the IDE extension are you using?
validations:
required: true
- type: input
id: plan
attributes:
label: What subscription do you have?
validations:
required: true
label: What version of the VS Code extension are you using?
- type: input
id: ide
attributes:
label: Which IDE are you using?
description: Like `VS Code`, `Cursor`, `Windsurf`, etc.
validations:
required: true
- type: input
id: platform
attributes:
label: What platform is your computer?
description: |
For macOS and Linux: copy the output of `uname -mprs`
For MacOS and Linux: copy the output of `uname -mprs`
For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console
- type: textarea
id: actual
attributes:
label: What issue are you seeing?
description: Please include the full error messages and prompts with PII redacted. If possible, please provide text instead of a screenshot.
validations:
required: true
- type: textarea
id: steps
attributes:
@@ -54,6 +38,11 @@ body:
attributes:
label: What is the expected behavior?
description: If possible, please provide text instead of a screenshot.
- type: textarea
id: actual
attributes:
label: What do you see instead?
description: If possible, please provide text instead of a screenshot.
- type: textarea
id: notes
attributes:

View File

@@ -1,45 +0,0 @@
name: linux-code-sign
description: Sign Linux artifacts with cosign.
inputs:
target:
description: Target triple for the artifacts to sign.
required: true
artifacts-dir:
description: Absolute path to the directory containing built binaries to sign.
required: true
runs:
using: composite
steps:
- name: Install cosign
uses: sigstore/cosign-installer@v3.7.0
- name: Cosign Linux artifacts
shell: bash
env:
ARTIFACTS_DIR: ${{ inputs.artifacts-dir }}
COSIGN_EXPERIMENTAL: "1"
COSIGN_YES: "true"
COSIGN_OIDC_CLIENT_ID: "sigstore"
COSIGN_OIDC_ISSUER: "https://oauth2.sigstore.dev/auth"
run: |
set -euo pipefail
dest="$ARTIFACTS_DIR"
if [[ ! -d "$dest" ]]; then
echo "Destination $dest does not exist"
exit 1
fi
for binary in codex codex-responses-api-proxy; do
artifact="${dest}/${binary}"
if [[ ! -f "$artifact" ]]; then
echo "Binary $artifact not found"
exit 1
fi
cosign sign-blob \
--yes \
--bundle "${artifact}.sigstore" \
"$artifact"
done

View File

@@ -1,253 +0,0 @@
name: macos-code-sign
description: Configure, sign, notarize, and clean up macOS code signing artifacts.
inputs:
target:
description: Rust compilation target triple (e.g. aarch64-apple-darwin).
required: true
sign-binaries:
description: Whether to sign and notarize the macOS binaries.
required: false
default: "true"
sign-dmg:
description: Whether to sign and notarize the macOS dmg.
required: false
default: "true"
apple-certificate:
description: Base64-encoded Apple signing certificate (P12).
required: true
apple-certificate-password:
description: Password for the signing certificate.
required: true
apple-notarization-key-p8:
description: Base64-encoded Apple notarization key (P8).
required: true
apple-notarization-key-id:
description: Apple notarization key ID.
required: true
apple-notarization-issuer-id:
description: Apple notarization issuer ID.
required: true
runs:
using: composite
steps:
- name: Configure Apple code signing
shell: bash
env:
KEYCHAIN_PASSWORD: actions
APPLE_CERTIFICATE: ${{ inputs.apple-certificate }}
APPLE_CERTIFICATE_PASSWORD: ${{ inputs.apple-certificate-password }}
run: |
set -euo pipefail
if [[ -z "${APPLE_CERTIFICATE:-}" ]]; then
echo "APPLE_CERTIFICATE is required for macOS signing"
exit 1
fi
if [[ -z "${APPLE_CERTIFICATE_PASSWORD:-}" ]]; then
echo "APPLE_CERTIFICATE_PASSWORD is required for macOS signing"
exit 1
fi
cert_path="${RUNNER_TEMP}/apple_signing_certificate.p12"
echo "$APPLE_CERTIFICATE" | base64 -d > "$cert_path"
keychain_path="${RUNNER_TEMP}/codex-signing.keychain-db"
security create-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
security set-keychain-settings -lut 21600 "$keychain_path"
security unlock-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
keychain_args=()
cleanup_keychain() {
if ((${#keychain_args[@]} > 0)); then
security list-keychains -s "${keychain_args[@]}" || true
security default-keychain -s "${keychain_args[0]}" || true
else
security list-keychains -s || true
fi
if [[ -f "$keychain_path" ]]; then
security delete-keychain "$keychain_path" || true
fi
}
while IFS= read -r keychain; do
[[ -n "$keychain" ]] && keychain_args+=("$keychain")
done < <(security list-keychains | sed 's/^[[:space:]]*//;s/[[:space:]]*$//;s/"//g')
if ((${#keychain_args[@]} > 0)); then
security list-keychains -s "$keychain_path" "${keychain_args[@]}"
else
security list-keychains -s "$keychain_path"
fi
security default-keychain -s "$keychain_path"
security import "$cert_path" -k "$keychain_path" -P "$APPLE_CERTIFICATE_PASSWORD" -T /usr/bin/codesign -T /usr/bin/security
security set-key-partition-list -S apple-tool:,apple: -s -k "$KEYCHAIN_PASSWORD" "$keychain_path" > /dev/null
codesign_hashes=()
while IFS= read -r hash; do
[[ -n "$hash" ]] && codesign_hashes+=("$hash")
done < <(security find-identity -v -p codesigning "$keychain_path" \
| sed -n 's/.*\([0-9A-F]\{40\}\).*/\1/p' \
| sort -u)
if ((${#codesign_hashes[@]} == 0)); then
echo "No signing identities found in $keychain_path"
cleanup_keychain
rm -f "$cert_path"
exit 1
fi
if ((${#codesign_hashes[@]} > 1)); then
echo "Multiple signing identities found in $keychain_path:"
printf ' %s\n' "${codesign_hashes[@]}"
cleanup_keychain
rm -f "$cert_path"
exit 1
fi
APPLE_CODESIGN_IDENTITY="${codesign_hashes[0]}"
rm -f "$cert_path"
echo "APPLE_CODESIGN_IDENTITY=$APPLE_CODESIGN_IDENTITY" >> "$GITHUB_ENV"
echo "APPLE_CODESIGN_KEYCHAIN=$keychain_path" >> "$GITHUB_ENV"
echo "::add-mask::$APPLE_CODESIGN_IDENTITY"
- name: Sign macOS binaries
if: ${{ inputs.sign-binaries == 'true' }}
shell: bash
env:
TARGET: ${{ inputs.target }}
run: |
set -euo pipefail
if [[ -z "${APPLE_CODESIGN_IDENTITY:-}" ]]; then
echo "APPLE_CODESIGN_IDENTITY is required for macOS signing"
exit 1
fi
keychain_args=()
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" && -f "${APPLE_CODESIGN_KEYCHAIN}" ]]; then
keychain_args+=(--keychain "${APPLE_CODESIGN_KEYCHAIN}")
fi
entitlements_path="$GITHUB_ACTION_PATH/codex.entitlements.plist"
for binary in codex codex-responses-api-proxy; do
path="codex-rs/target/${TARGET}/release/${binary}"
codesign --force --options runtime --timestamp --entitlements "$entitlements_path" --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$path"
done
- name: Notarize macOS binaries
if: ${{ inputs.sign-binaries == 'true' }}
shell: bash
env:
TARGET: ${{ inputs.target }}
APPLE_NOTARIZATION_KEY_P8: ${{ inputs.apple-notarization-key-p8 }}
APPLE_NOTARIZATION_KEY_ID: ${{ inputs.apple-notarization-key-id }}
APPLE_NOTARIZATION_ISSUER_ID: ${{ inputs.apple-notarization-issuer-id }}
run: |
set -euo pipefail
for var in APPLE_NOTARIZATION_KEY_P8 APPLE_NOTARIZATION_KEY_ID APPLE_NOTARIZATION_ISSUER_ID; do
if [[ -z "${!var:-}" ]]; then
echo "$var is required for notarization"
exit 1
fi
done
notary_key_path="${RUNNER_TEMP}/notarytool.key.p8"
echo "$APPLE_NOTARIZATION_KEY_P8" | base64 -d > "$notary_key_path"
cleanup_notary() {
rm -f "$notary_key_path"
}
trap cleanup_notary EXIT
source "$GITHUB_ACTION_PATH/notary_helpers.sh"
notarize_binary() {
local binary="$1"
local source_path="codex-rs/target/${TARGET}/release/${binary}"
local archive_path="${RUNNER_TEMP}/${binary}.zip"
if [[ ! -f "$source_path" ]]; then
echo "Binary $source_path not found"
exit 1
fi
rm -f "$archive_path"
ditto -c -k --keepParent "$source_path" "$archive_path"
notarize_submission "$binary" "$archive_path" "$notary_key_path"
}
notarize_binary "codex"
notarize_binary "codex-responses-api-proxy"
- name: Sign and notarize macOS dmg
if: ${{ inputs.sign-dmg == 'true' }}
shell: bash
env:
TARGET: ${{ inputs.target }}
APPLE_NOTARIZATION_KEY_P8: ${{ inputs.apple-notarization-key-p8 }}
APPLE_NOTARIZATION_KEY_ID: ${{ inputs.apple-notarization-key-id }}
APPLE_NOTARIZATION_ISSUER_ID: ${{ inputs.apple-notarization-issuer-id }}
run: |
set -euo pipefail
for var in APPLE_CODESIGN_IDENTITY APPLE_NOTARIZATION_KEY_P8 APPLE_NOTARIZATION_KEY_ID APPLE_NOTARIZATION_ISSUER_ID; do
if [[ -z "${!var:-}" ]]; then
echo "$var is required"
exit 1
fi
done
notary_key_path="${RUNNER_TEMP}/notarytool.key.p8"
echo "$APPLE_NOTARIZATION_KEY_P8" | base64 -d > "$notary_key_path"
cleanup_notary() {
rm -f "$notary_key_path"
}
trap cleanup_notary EXIT
source "$GITHUB_ACTION_PATH/notary_helpers.sh"
dmg_name="codex-${TARGET}.dmg"
dmg_path="codex-rs/target/${TARGET}/release/${dmg_name}"
if [[ ! -f "$dmg_path" ]]; then
echo "dmg $dmg_path not found"
exit 1
fi
keychain_args=()
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" && -f "${APPLE_CODESIGN_KEYCHAIN}" ]]; then
keychain_args+=(--keychain "${APPLE_CODESIGN_KEYCHAIN}")
fi
codesign --force --timestamp --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$dmg_path"
notarize_submission "$dmg_name" "$dmg_path" "$notary_key_path"
xcrun stapler staple "$dmg_path"
- name: Remove signing keychain
if: ${{ always() }}
shell: bash
env:
APPLE_CODESIGN_KEYCHAIN: ${{ env.APPLE_CODESIGN_KEYCHAIN }}
run: |
set -euo pipefail
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" ]]; then
keychain_args=()
while IFS= read -r keychain; do
[[ "$keychain" == "$APPLE_CODESIGN_KEYCHAIN" ]] && continue
[[ -n "$keychain" ]] && keychain_args+=("$keychain")
done < <(security list-keychains | sed 's/^[[:space:]]*//;s/[[:space:]]*$//;s/"//g')
if ((${#keychain_args[@]} > 0)); then
security list-keychains -s "${keychain_args[@]}"
security default-keychain -s "${keychain_args[0]}"
fi
if [[ -f "$APPLE_CODESIGN_KEYCHAIN" ]]; then
security delete-keychain "$APPLE_CODESIGN_KEYCHAIN"
fi
fi

View File

@@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>com.apple.security.cs.allow-jit</key>
<true/>
</dict>
</plist>

View File

@@ -1,46 +0,0 @@
#!/usr/bin/env bash
notarize_submission() {
local label="$1"
local path="$2"
local notary_key_path="$3"
if [[ -z "${APPLE_NOTARIZATION_KEY_ID:-}" || -z "${APPLE_NOTARIZATION_ISSUER_ID:-}" ]]; then
echo "APPLE_NOTARIZATION_KEY_ID and APPLE_NOTARIZATION_ISSUER_ID are required for notarization"
exit 1
fi
if [[ -z "$notary_key_path" || ! -f "$notary_key_path" ]]; then
echo "Notary key file $notary_key_path not found"
exit 1
fi
if [[ ! -f "$path" ]]; then
echo "Notarization payload $path not found"
exit 1
fi
local submission_json
submission_json=$(xcrun notarytool submit "$path" \
--key "$notary_key_path" \
--key-id "$APPLE_NOTARIZATION_KEY_ID" \
--issuer "$APPLE_NOTARIZATION_ISSUER_ID" \
--output-format json \
--wait)
local status submission_id
status=$(printf '%s\n' "$submission_json" | jq -r '.status // "Unknown"')
submission_id=$(printf '%s\n' "$submission_json" | jq -r '.id // ""')
if [[ -z "$submission_id" ]]; then
echo "Failed to retrieve submission ID for $label"
exit 1
fi
echo "::notice title=Notarization::$label submission ${submission_id} completed with status ${status}"
if [[ "$status" != "Accepted" ]]; then
echo "Notarization failed for ${label} (submission ${submission_id}, status ${status})"
exit 1
fi
}

View File

@@ -1,57 +0,0 @@
name: windows-code-sign
description: Sign Windows binaries with Azure Trusted Signing.
inputs:
target:
description: Target triple for the artifacts to sign.
required: true
client-id:
description: Azure Trusted Signing client ID.
required: true
tenant-id:
description: Azure tenant ID for Trusted Signing.
required: true
subscription-id:
description: Azure subscription ID for Trusted Signing.
required: true
endpoint:
description: Azure Trusted Signing endpoint.
required: true
account-name:
description: Azure Trusted Signing account name.
required: true
certificate-profile-name:
description: Certificate profile name for signing.
required: true
runs:
using: composite
steps:
- name: Azure login for Trusted Signing (OIDC)
uses: azure/login@v2
with:
client-id: ${{ inputs.client-id }}
tenant-id: ${{ inputs.tenant-id }}
subscription-id: ${{ inputs.subscription-id }}
- name: Sign Windows binaries with Azure Trusted Signing
uses: azure/trusted-signing-action@v0
with:
endpoint: ${{ inputs.endpoint }}
trusted-signing-account-name: ${{ inputs.account-name }}
certificate-profile-name: ${{ inputs.certificate-profile-name }}
exclude-environment-credential: true
exclude-workload-identity-credential: true
exclude-managed-identity-credential: true
exclude-shared-token-cache-credential: true
exclude-visual-studio-credential: true
exclude-visual-studio-code-credential: true
exclude-azure-cli-credential: false
exclude-azure-powershell-credential: true
exclude-azure-developer-cli-credential: true
exclude-interactive-browser-credential: true
cache-dependencies: false
files: |
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex.exe
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-responses-api-proxy.exe
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-windows-sandbox-setup.exe
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-command-runner.exe

View File

@@ -1,9 +0,0 @@
# Paths are matched exactly, relative to the repository root.
# Keep this list short and limited to intentional large checked-in assets.
.github/codex-cli-splash.png
MODULE.bazel.lock
codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json
codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json
codex-rs/tui/tests/fixtures/oss-story.jsonl
codex-rs/tui_app_server/tests/fixtures/oss-story.jsonl

BIN
.github/codex-cli-login.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 MiB

BIN
.github/codex-cli-permissions.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 408 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 818 KiB

After

Width:  |  Height:  |  Size: 3.1 MiB

View File

@@ -1,3 +1,3 @@
model = "gpt-5.1"
model = "gpt-5"
# Consider setting [mcp_servers] here!

View File

@@ -15,10 +15,10 @@ Things to look out for when doing the review:
## Code Organization
- Each crate in the Cargo workspace in `codex-rs` has a specific purpose: make a note if you believe new code is not introduced in the correct crate.
- Each create in the Cargo workspace in `codex-rs` has a specific purpose: make a note if you believe new code is not introduced in the correct crate.
- When possible, try to keep the `core` crate as small as possible. Non-core but shared logic is often a good candidate for `codex-rs/common`.
- Be wary of large files and offer suggestions for how to break things into more reasonably-sized files.
- Rust files should generally be organized such that the public parts of the API appear near the top of the file and helper functions go below. This is analogous to the "inverted pyramid" structure that is favored in journalism.
- Rust files should generally be organized such that the public parts of the API appear near the top of the file and helper functions go below. This is analagous to the "inverted pyramid" structure that is favored in journalism.
## Assertions in Tests

BIN
.github/demo.gif vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 MiB

View File

@@ -1,24 +0,0 @@
{
"outputs": {
"argument-comment-lint": {
"platforms": {
"macos-aarch64": {
"regex": "^argument-comment-lint-aarch64-apple-darwin\\.tar\\.gz$",
"path": "argument-comment-lint/bin/argument-comment-lint"
},
"linux-x86_64": {
"regex": "^argument-comment-lint-x86_64-unknown-linux-gnu\\.tar\\.gz$",
"path": "argument-comment-lint/bin/argument-comment-lint"
},
"linux-aarch64": {
"regex": "^argument-comment-lint-aarch64-unknown-linux-gnu\\.tar\\.gz$",
"path": "argument-comment-lint/bin/argument-comment-lint"
},
"windows-x86_64": {
"regex": "^argument-comment-lint-x86_64-pc-windows-msvc\\.zip$",
"path": "argument-comment-lint/bin/argument-comment-lint.exe"
}
}
}
}
}

View File

@@ -27,58 +27,6 @@
"path": "codex.exe"
}
}
},
"codex-responses-api-proxy": {
"platforms": {
"macos-aarch64": {
"regex": "^codex-responses-api-proxy-aarch64-apple-darwin\\.zst$",
"path": "codex-responses-api-proxy"
},
"macos-x86_64": {
"regex": "^codex-responses-api-proxy-x86_64-apple-darwin\\.zst$",
"path": "codex-responses-api-proxy"
},
"linux-x86_64": {
"regex": "^codex-responses-api-proxy-x86_64-unknown-linux-musl\\.zst$",
"path": "codex-responses-api-proxy"
},
"linux-aarch64": {
"regex": "^codex-responses-api-proxy-aarch64-unknown-linux-musl\\.zst$",
"path": "codex-responses-api-proxy"
},
"windows-x86_64": {
"regex": "^codex-responses-api-proxy-x86_64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex-responses-api-proxy.exe"
},
"windows-aarch64": {
"regex": "^codex-responses-api-proxy-aarch64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex-responses-api-proxy.exe"
}
}
},
"codex-command-runner": {
"platforms": {
"windows-x86_64": {
"regex": "^codex-command-runner-x86_64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex-command-runner.exe"
},
"windows-aarch64": {
"regex": "^codex-command-runner-aarch64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex-command-runner.exe"
}
}
},
"codex-windows-sandbox-setup": {
"platforms": {
"windows-x86_64": {
"regex": "^codex-windows-sandbox-setup-x86_64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex-windows-sandbox-setup.exe"
},
"windows-aarch64": {
"regex": "^codex-windows-sandbox-setup-aarch64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex-windows-sandbox-setup.exe"
}
}
}
}
}

View File

@@ -1,23 +0,0 @@
{
"outputs": {
"codex-zsh": {
"platforms": {
"macos-aarch64": {
"name": "codex-zsh-aarch64-apple-darwin.tar.gz",
"format": "tar.gz",
"path": "codex-zsh/bin/zsh"
},
"linux-x86_64": {
"name": "codex-zsh-x86_64-unknown-linux-musl.tar.gz",
"format": "tar.gz",
"path": "codex-zsh/bin/zsh"
},
"linux-aarch64": {
"name": "codex-zsh-aarch64-unknown-linux-musl.tar.gz",
"format": "tar.gz",
"path": "codex-zsh/bin/zsh"
}
}
}
}
}

View File

@@ -4,5 +4,3 @@ Before opening this Pull Request, please read the dedicated "Contributing" markd
https://github.com/openai/codex/blob/main/docs/contributing.md
If your PR conforms to our contribution guidelines, replace this text with a detailed and high quality description of your changes.
Include a link to a bug report or enhancement request.

View File

@@ -1,61 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
if [[ "$#" -ne 1 ]]; then
echo "usage: $0 <archive-path>" >&2
exit 1
fi
archive_path="$1"
workspace="${GITHUB_WORKSPACE:?missing GITHUB_WORKSPACE}"
zsh_commit="${ZSH_COMMIT:?missing ZSH_COMMIT}"
zsh_patch="${ZSH_PATCH:?missing ZSH_PATCH}"
temp_root="${RUNNER_TEMP:-/tmp}"
work_root="$(mktemp -d "${temp_root%/}/codex-zsh-release.XXXXXX")"
trap 'rm -rf "$work_root"' EXIT
source_root="${work_root}/zsh"
package_root="${work_root}/codex-zsh"
wrapper_path="${work_root}/exec-wrapper"
stdout_path="${work_root}/stdout.txt"
wrapper_log_path="${work_root}/wrapper.log"
git clone https://git.code.sf.net/p/zsh/code "$source_root"
cd "$source_root"
git checkout "$zsh_commit"
git apply "${workspace}/${zsh_patch}"
./Util/preconfig
./configure
cores="$(command -v nproc >/dev/null 2>&1 && nproc || getconf _NPROCESSORS_ONLN)"
make -j"${cores}"
cat > "$wrapper_path" <<'EOF'
#!/usr/bin/env bash
set -euo pipefail
: "${CODEX_WRAPPER_LOG:?missing CODEX_WRAPPER_LOG}"
printf '%s\n' "$@" > "$CODEX_WRAPPER_LOG"
file="$1"
shift
if [[ "$#" -eq 0 ]]; then
exec "$file"
fi
arg0="$1"
shift
exec -a "$arg0" "$file" "$@"
EOF
chmod +x "$wrapper_path"
CODEX_WRAPPER_LOG="$wrapper_log_path" \
EXEC_WRAPPER="$wrapper_path" \
"${source_root}/Src/zsh" -fc '/bin/echo smoke-zsh' > "$stdout_path"
grep -Fx "smoke-zsh" "$stdout_path"
grep -Fx "/bin/echo" "$wrapper_log_path"
mkdir -p "$package_root/bin" "$(dirname "${workspace}/${archive_path}")"
cp "${source_root}/Src/zsh" "$package_root/bin/zsh"
chmod +x "$package_root/bin/zsh"
(cd "$work_root" && tar -czf "${workspace}/${archive_path}" codex-zsh)

View File

@@ -1,279 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
: "${TARGET:?TARGET environment variable is required}"
: "${GITHUB_ENV:?GITHUB_ENV environment variable is required}"
apt_update_args=()
if [[ -n "${APT_UPDATE_ARGS:-}" ]]; then
# shellcheck disable=SC2206
apt_update_args=(${APT_UPDATE_ARGS})
fi
apt_install_args=()
if [[ -n "${APT_INSTALL_ARGS:-}" ]]; then
# shellcheck disable=SC2206
apt_install_args=(${APT_INSTALL_ARGS})
fi
sudo apt-get update "${apt_update_args[@]}"
sudo apt-get install -y "${apt_install_args[@]}" ca-certificates curl musl-tools pkg-config libcap-dev g++ clang libc++-dev libc++abi-dev lld xz-utils
case "${TARGET}" in
x86_64-unknown-linux-musl)
arch="x86_64"
;;
aarch64-unknown-linux-musl)
arch="aarch64"
;;
*)
echo "Unexpected musl target: ${TARGET}" >&2
exit 1
;;
esac
libcap_version="2.75"
libcap_sha256="de4e7e064c9ba451d5234dd46e897d7c71c96a9ebf9a0c445bc04f4742d83632"
libcap_tarball_name="libcap-${libcap_version}.tar.xz"
libcap_download_url="https://mirrors.edge.kernel.org/pub/linux/libs/security/linux-privs/libcap2/${libcap_tarball_name}"
# Use the musl toolchain as the Rust linker to avoid Zig injecting its own CRT.
if command -v "${arch}-linux-musl-gcc" >/dev/null; then
musl_linker="$(command -v "${arch}-linux-musl-gcc")"
elif command -v musl-gcc >/dev/null; then
musl_linker="$(command -v musl-gcc)"
else
echo "musl gcc not found after install; arch=${arch}" >&2
exit 1
fi
zig_target="${TARGET/-unknown-linux-musl/-linux-musl}"
runner_temp="${RUNNER_TEMP:-/tmp}"
tool_root="${runner_temp}/codex-musl-tools-${TARGET}"
mkdir -p "${tool_root}"
libcap_root="${tool_root}/libcap-${libcap_version}"
libcap_src_root="${libcap_root}/src"
libcap_prefix="${libcap_root}/prefix"
libcap_pkgconfig_dir="${libcap_prefix}/lib/pkgconfig"
if [[ ! -f "${libcap_prefix}/lib/libcap.a" ]]; then
mkdir -p "${libcap_src_root}" "${libcap_prefix}/lib" "${libcap_prefix}/include/sys" "${libcap_prefix}/include/linux" "${libcap_pkgconfig_dir}"
libcap_tarball="${libcap_root}/${libcap_tarball_name}"
curl -fsSL "${libcap_download_url}" -o "${libcap_tarball}"
echo "${libcap_sha256} ${libcap_tarball}" | sha256sum -c -
tar -xJf "${libcap_tarball}" -C "${libcap_src_root}"
libcap_source_dir="${libcap_src_root}/libcap-${libcap_version}"
make -C "${libcap_source_dir}/libcap" -j"$(nproc)" \
CC="${musl_linker}" \
AR=ar \
RANLIB=ranlib
cp "${libcap_source_dir}/libcap/libcap.a" "${libcap_prefix}/lib/libcap.a"
cp "${libcap_source_dir}/libcap/include/uapi/linux/capability.h" "${libcap_prefix}/include/linux/capability.h"
cp "${libcap_source_dir}/libcap/../libcap/include/sys/capability.h" "${libcap_prefix}/include/sys/capability.h"
cat > "${libcap_pkgconfig_dir}/libcap.pc" <<EOF
prefix=${libcap_prefix}
exec_prefix=\${prefix}
libdir=\${prefix}/lib
includedir=\${prefix}/include
Name: libcap
Description: Linux capabilities
Version: ${libcap_version}
Libs: -L\${libdir} -lcap
Cflags: -I\${includedir}
EOF
fi
sysroot=""
if command -v zig >/dev/null; then
zig_bin="$(command -v zig)"
cc="${tool_root}/zigcc"
cxx="${tool_root}/zigcxx"
cat >"${cc}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
args=()
skip_next=0
pending_include=0
for arg in "\$@"; do
if [[ "\${pending_include}" -eq 1 ]]; then
pending_include=0
if [[ "\${arg}" == /usr/include || "\${arg}" == /usr/include/* ]]; then
# Keep host-only headers available, but after the target sysroot headers.
args+=("-idirafter" "\${arg}")
else
args+=("-I" "\${arg}")
fi
continue
fi
if [[ "\${skip_next}" -eq 1 ]]; then
skip_next=0
continue
fi
case "\${arg}" in
--target)
skip_next=1
continue
;;
--target=*|-target=*|-target)
# Drop any explicit --target/-target flags. Zig expects -target and
# rejects Rust triples like *-unknown-linux-musl.
if [[ "\${arg}" == "-target" ]]; then
skip_next=1
fi
continue
;;
-I)
pending_include=1
continue
;;
-I/usr/include|-I/usr/include/*)
# Avoid making glibc headers win over musl headers.
args+=("-idirafter" "\${arg#-I}")
continue
;;
-Wp,-U_FORTIFY_SOURCE)
# aws-lc-sys emits this GCC preprocessor forwarding form in debug
# builds, but zig cc expects the define flag directly.
args+=("-U_FORTIFY_SOURCE")
continue
;;
esac
args+=("\${arg}")
done
exec "${zig_bin}" cc -target "${zig_target}" "\${args[@]}"
EOF
cat >"${cxx}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
args=()
skip_next=0
pending_include=0
for arg in "\$@"; do
if [[ "\${pending_include}" -eq 1 ]]; then
pending_include=0
if [[ "\${arg}" == /usr/include || "\${arg}" == /usr/include/* ]]; then
# Keep host-only headers available, but after the target sysroot headers.
args+=("-idirafter" "\${arg}")
else
args+=("-I" "\${arg}")
fi
continue
fi
if [[ "\${skip_next}" -eq 1 ]]; then
skip_next=0
continue
fi
case "\${arg}" in
--target)
# Drop explicit --target and its value: we always pass zig's -target below.
skip_next=1
continue
;;
--target=*|-target=*|-target)
# Zig expects -target and rejects Rust triples like *-unknown-linux-musl.
if [[ "\${arg}" == "-target" ]]; then
skip_next=1
fi
continue
;;
-I)
pending_include=1
continue
;;
-I/usr/include|-I/usr/include/*)
# Avoid making glibc headers win over musl headers.
args+=("-idirafter" "\${arg#-I}")
continue
;;
-Wp,-U_FORTIFY_SOURCE)
# aws-lc-sys emits this GCC forwarding form in debug builds; zig c++
# expects the define flag directly.
args+=("-U_FORTIFY_SOURCE")
continue
;;
esac
args+=("\${arg}")
done
exec "${zig_bin}" c++ -target "${zig_target}" "\${args[@]}"
EOF
chmod +x "${cc}" "${cxx}"
sysroot="$("${zig_bin}" cc -target "${zig_target}" -print-sysroot 2>/dev/null || true)"
else
cc="${musl_linker}"
if command -v "${arch}-linux-musl-g++" >/dev/null; then
cxx="$(command -v "${arch}-linux-musl-g++")"
elif command -v musl-g++ >/dev/null; then
cxx="$(command -v musl-g++)"
else
cxx="${cc}"
fi
fi
if [[ -n "${sysroot}" && "${sysroot}" != "/" ]]; then
echo "BORING_BSSL_SYSROOT=${sysroot}" >> "$GITHUB_ENV"
boring_sysroot_var="BORING_BSSL_SYSROOT_${TARGET}"
boring_sysroot_var="${boring_sysroot_var//-/_}"
echo "${boring_sysroot_var}=${sysroot}" >> "$GITHUB_ENV"
fi
cflags="-pthread"
cxxflags="-pthread"
if [[ "${TARGET}" == "aarch64-unknown-linux-musl" ]]; then
# BoringSSL enables -Wframe-larger-than=25344 under clang and treats warnings as errors.
cflags="${cflags} -Wno-error=frame-larger-than"
cxxflags="${cxxflags} -Wno-error=frame-larger-than"
fi
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
echo "CC=${cc}" >> "$GITHUB_ENV"
echo "TARGET_CC=${cc}" >> "$GITHUB_ENV"
target_cc_var="CC_${TARGET}"
target_cc_var="${target_cc_var//-/_}"
echo "${target_cc_var}=${cc}" >> "$GITHUB_ENV"
echo "CXX=${cxx}" >> "$GITHUB_ENV"
echo "TARGET_CXX=${cxx}" >> "$GITHUB_ENV"
target_cxx_var="CXX_${TARGET}"
target_cxx_var="${target_cxx_var//-/_}"
echo "${target_cxx_var}=${cxx}" >> "$GITHUB_ENV"
cargo_linker_var="CARGO_TARGET_${TARGET^^}_LINKER"
cargo_linker_var="${cargo_linker_var//-/_}"
echo "${cargo_linker_var}=${musl_linker}" >> "$GITHUB_ENV"
echo "CMAKE_C_COMPILER=${cc}" >> "$GITHUB_ENV"
echo "CMAKE_CXX_COMPILER=${cxx}" >> "$GITHUB_ENV"
echo "CMAKE_ARGS=-DCMAKE_HAVE_THREADS_LIBRARY=1 -DCMAKE_USE_PTHREADS_INIT=1 -DCMAKE_THREAD_LIBS_INIT=-pthread -DTHREADS_PREFER_PTHREAD_FLAG=ON" >> "$GITHUB_ENV"
# Allow pkg-config resolution during cross-compilation.
echo "PKG_CONFIG_ALLOW_CROSS=1" >> "$GITHUB_ENV"
pkg_config_path="${libcap_pkgconfig_dir}"
if [[ -n "${PKG_CONFIG_PATH:-}" ]]; then
pkg_config_path="${pkg_config_path}:${PKG_CONFIG_PATH}"
fi
echo "PKG_CONFIG_PATH=${pkg_config_path}" >> "$GITHUB_ENV"
pkg_config_path_var="PKG_CONFIG_PATH_${TARGET}"
pkg_config_path_var="${pkg_config_path_var//-/_}"
echo "${pkg_config_path_var}=${libcap_pkgconfig_dir}" >> "$GITHUB_ENV"
if [[ -n "${sysroot}" && "${sysroot}" != "/" ]]; then
echo "PKG_CONFIG_SYSROOT_DIR=${sysroot}" >> "$GITHUB_ENV"
pkg_config_sysroot_var="PKG_CONFIG_SYSROOT_DIR_${TARGET}"
pkg_config_sysroot_var="${pkg_config_sysroot_var//-/_}"
echo "${pkg_config_sysroot_var}=${sysroot}" >> "$GITHUB_ENV"
fi

View File

@@ -1,287 +0,0 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import gzip
import re
import shutil
import subprocess
import sys
import tempfile
import tomllib
from pathlib import Path
ROOT = Path(__file__).resolve().parents[2]
MUSL_RUNTIME_ARCHIVE_LABELS = [
"@llvm//runtimes/libcxx:libcxx.static",
"@llvm//runtimes/libcxx:libcxxabi.static",
]
LLVM_AR_LABEL = "@llvm//tools:llvm-ar"
LLVM_RANLIB_LABEL = "@llvm//tools:llvm-ranlib"
def bazel_execroot() -> Path:
result = subprocess.run(
["bazel", "info", "execution_root"],
cwd=ROOT,
check=True,
capture_output=True,
text=True,
)
return Path(result.stdout.strip())
def bazel_output_base() -> Path:
result = subprocess.run(
["bazel", "info", "output_base"],
cwd=ROOT,
check=True,
capture_output=True,
text=True,
)
return Path(result.stdout.strip())
def bazel_output_path(path: str) -> Path:
if path.startswith("external/"):
return bazel_output_base() / path
return bazel_execroot() / path
def bazel_output_files(
platform: str,
labels: list[str],
compilation_mode: str = "fastbuild",
) -> list[Path]:
expression = "set(" + " ".join(labels) + ")"
result = subprocess.run(
[
"bazel",
"cquery",
"-c",
compilation_mode,
f"--platforms=@llvm//platforms:{platform}",
"--output=files",
expression,
],
cwd=ROOT,
check=True,
capture_output=True,
text=True,
)
return [bazel_output_path(line.strip()) for line in result.stdout.splitlines() if line.strip()]
def bazel_build(
platform: str,
labels: list[str],
compilation_mode: str = "fastbuild",
) -> None:
subprocess.run(
[
"bazel",
"build",
"-c",
compilation_mode,
f"--platforms=@llvm//platforms:{platform}",
*labels,
],
cwd=ROOT,
check=True,
)
def ensure_bazel_output_files(
platform: str,
labels: list[str],
compilation_mode: str = "fastbuild",
) -> list[Path]:
outputs = bazel_output_files(platform, labels, compilation_mode)
if all(path.exists() for path in outputs):
return outputs
bazel_build(platform, labels, compilation_mode)
outputs = bazel_output_files(platform, labels, compilation_mode)
missing = [str(path) for path in outputs if not path.exists()]
if missing:
raise SystemExit(f"missing built outputs for {labels}: {missing}")
return outputs
def release_pair_label(target: str) -> str:
target_suffix = target.replace("-", "_")
return f"//third_party/v8:rusty_v8_release_pair_{target_suffix}"
def resolved_v8_crate_version() -> str:
cargo_lock = tomllib.loads((ROOT / "codex-rs" / "Cargo.lock").read_text())
versions = sorted(
{
package["version"]
for package in cargo_lock["package"]
if package["name"] == "v8"
}
)
if len(versions) == 1:
return versions[0]
if len(versions) > 1:
raise SystemExit(f"expected exactly one resolved v8 version, found: {versions}")
module_bazel = (ROOT / "MODULE.bazel").read_text()
matches = sorted(
set(
re.findall(
r'https://static\.crates\.io/crates/v8/v8-([0-9]+\.[0-9]+\.[0-9]+)\.crate',
module_bazel,
)
)
)
if len(matches) != 1:
raise SystemExit(
"expected exactly one pinned v8 crate version in MODULE.bazel, "
f"found: {matches}"
)
return matches[0]
def staged_archive_name(target: str, source_path: Path) -> str:
if source_path.suffix == ".lib":
return f"rusty_v8_release_{target}.lib.gz"
return f"librusty_v8_release_{target}.a.gz"
def is_musl_archive_target(target: str, source_path: Path) -> bool:
return target.endswith("-unknown-linux-musl") and source_path.suffix == ".a"
def single_bazel_output_file(
platform: str,
label: str,
compilation_mode: str = "fastbuild",
) -> Path:
outputs = ensure_bazel_output_files(platform, [label], compilation_mode)
if len(outputs) != 1:
raise SystemExit(f"expected exactly one output for {label}, found {outputs}")
return outputs[0]
def merged_musl_archive(
platform: str,
lib_path: Path,
compilation_mode: str = "fastbuild",
) -> Path:
llvm_ar = single_bazel_output_file(platform, LLVM_AR_LABEL, compilation_mode)
llvm_ranlib = single_bazel_output_file(platform, LLVM_RANLIB_LABEL, compilation_mode)
runtime_archives = [
single_bazel_output_file(platform, label, compilation_mode)
for label in MUSL_RUNTIME_ARCHIVE_LABELS
]
temp_dir = Path(tempfile.mkdtemp(prefix="rusty-v8-musl-stage-"))
merged_archive = temp_dir / lib_path.name
merge_commands = "\n".join(
[
f"create {merged_archive}",
f"addlib {lib_path}",
*[f"addlib {archive}" for archive in runtime_archives],
"save",
"end",
]
)
subprocess.run(
[str(llvm_ar), "-M"],
cwd=ROOT,
check=True,
input=merge_commands,
text=True,
)
subprocess.run([str(llvm_ranlib), str(merged_archive)], cwd=ROOT, check=True)
return merged_archive
def stage_release_pair(
platform: str,
target: str,
output_dir: Path,
compilation_mode: str = "fastbuild",
) -> None:
outputs = ensure_bazel_output_files(
platform,
[release_pair_label(target)],
compilation_mode,
)
try:
lib_path = next(path for path in outputs if path.suffix in {".a", ".lib"})
except StopIteration as exc:
raise SystemExit(f"missing static library output for {target}") from exc
try:
binding_path = next(path for path in outputs if path.suffix == ".rs")
except StopIteration as exc:
raise SystemExit(f"missing Rust binding output for {target}") from exc
output_dir.mkdir(parents=True, exist_ok=True)
staged_library = output_dir / staged_archive_name(target, lib_path)
staged_binding = output_dir / f"src_binding_release_{target}.rs"
source_archive = (
merged_musl_archive(platform, lib_path, compilation_mode)
if is_musl_archive_target(target, lib_path)
else lib_path
)
with source_archive.open("rb") as src, staged_library.open("wb") as dst:
with gzip.GzipFile(
filename="",
mode="wb",
fileobj=dst,
compresslevel=6,
mtime=0,
) as gz:
shutil.copyfileobj(src, gz)
shutil.copyfile(binding_path, staged_binding)
print(staged_library)
print(staged_binding)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="command", required=True)
stage_release_pair_parser = subparsers.add_parser("stage-release-pair")
stage_release_pair_parser.add_argument("--platform", required=True)
stage_release_pair_parser.add_argument("--target", required=True)
stage_release_pair_parser.add_argument("--output-dir", required=True)
stage_release_pair_parser.add_argument(
"--compilation-mode",
default="fastbuild",
choices=["fastbuild", "opt", "dbg"],
)
subparsers.add_parser("resolved-v8-crate-version")
return parser.parse_args()
def main() -> int:
args = parse_args()
if args.command == "stage-release-pair":
stage_release_pair(
platform=args.platform,
target=args.target,
output_dir=Path(args.output_dir),
compilation_mode=args.compilation_mode,
)
return 0
if args.command == "resolved-v8-crate-version":
print(resolved_v8_crate_version())
return 0
raise SystemExit(f"unsupported command: {args.command}")
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,36 +0,0 @@
FROM ubuntu:24.04
# TODO(mbolin): Published to docker.io/mbolin491/codex-bazel:latest for
# initial debugging, but we should publish to a more proper location.
#
# docker buildx create --use
# docker buildx build --platform linux/amd64,linux/arm64 -f .github/workflows/Dockerfile.bazel -t mbolin491/codex-bazel:latest --push .
RUN apt-get update && \
apt-get install -y --no-install-recommends \
curl git python3 ca-certificates xz-utils && \
rm -rf /var/lib/apt/lists/*
COPY codex-rs/node-version.txt /tmp/node-version.txt
RUN set -eux; \
node_arch="$(dpkg --print-architecture)"; \
case "${node_arch}" in \
amd64) node_dist_arch="x64" ;; \
arm64) node_dist_arch="arm64" ;; \
*) echo "unsupported architecture: ${node_arch}"; exit 1 ;; \
esac; \
node_version="$(tr -d '[:space:]' </tmp/node-version.txt)"; \
curl -fsSLO "https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-${node_dist_arch}.tar.xz"; \
tar -xJf "node-v${node_version}-linux-${node_dist_arch}.tar.xz" -C /usr/local --strip-components=1; \
rm "node-v${node_version}-linux-${node_dist_arch}.tar.xz" /tmp/node-version.txt; \
node --version; \
npm --version
# Install dotslash.
RUN curl -LSfs "https://github.com/facebook/dotslash/releases/download/v0.5.8/dotslash-ubuntu-22.04.$(uname -m).tar.gz" | tar fxz - -C /usr/local/bin
# Ubuntu 24.04 ships with user 'ubuntu' already created with UID 1000.
USER ubuntu
WORKDIR /workspace

View File

@@ -1,231 +0,0 @@
name: Bazel (experimental)
# Note this workflow was originally derived from:
# https://github.com/cerisier/toolchains_llvm_bootstrapped/blob/main/.github/workflows/ci.yaml
on:
pull_request: {}
push:
branches:
- main
workflow_dispatch:
concurrency:
# Cancel previous actions from the same PR or branch except 'main' branch.
# See https://docs.github.com/en/actions/using-jobs/using-concurrency and https://docs.github.com/en/actions/learn-github-actions/contexts for more info.
group: concurrency-group::${{ github.workflow }}::${{ github.event.pull_request.number > 0 && format('pr-{0}', github.event.pull_request.number) || github.ref_name }}${{ github.ref_name == 'main' && format('::{0}', github.run_id) || ''}}
cancel-in-progress: ${{ github.ref_name != 'main' }}
jobs:
test:
strategy:
fail-fast: false
matrix:
include:
# macOS
- os: macos-15-xlarge
target: aarch64-apple-darwin
- os: macos-15-xlarge
target: x86_64-apple-darwin
# Linux
- os: ubuntu-24.04
target: x86_64-unknown-linux-gnu
- os: ubuntu-24.04
target: x86_64-unknown-linux-musl
# 2026-02-27 Bazel tests have been flaky on arm in CI.
# Disable until we can investigate and stabilize them.
# - os: ubuntu-24.04-arm
# target: aarch64-unknown-linux-musl
# - os: ubuntu-24.04-arm
# target: aarch64-unknown-linux-gnu
# TODO: Enable Windows once we fix the toolchain issues there.
#- os: windows-latest
# target: x86_64-pc-windows-gnullvm
runs-on: ${{ matrix.os }}
# Configure a human readable name for each job
name: Local Bazel build on ${{ matrix.os }} for ${{ matrix.target }}
steps:
- uses: actions/checkout@v6
- name: Set up Node.js for js_repl tests
uses: actions/setup-node@v6
with:
node-version-file: codex-rs/node-version.txt
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- name: Make DotSlash available in PATH (Unix)
if: runner.os != 'Windows'
run: cp "$(which dotslash)" /usr/local/bin
- name: Make DotSlash available in PATH (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: Copy-Item (Get-Command dotslash).Source -Destination "$env:LOCALAPPDATA\Microsoft\WindowsApps\dotslash.exe"
# Install Bazel via Bazelisk
- name: Set up Bazel
uses: bazelbuild/setup-bazelisk@v3
- name: Check MODULE.bazel.lock is up to date
if: matrix.os == 'ubuntu-24.04' && matrix.target == 'x86_64-unknown-linux-gnu'
shell: bash
run: ./scripts/check-module-bazel-lock.sh
# TODO(mbolin): Bring this back once we have caching working. Currently,
# we never seem to get a cache hit but we still end up paying the cost of
# uploading at the end of the build, which takes over a minute!
#
# Cache build and external artifacts so that the next ci build is incremental.
# Because github action caches cannot be updated after a build, we need to
# store the contents of each build in a unique cache key, then fall back to loading
# it on the next ci run. We use hashFiles(...) in the key and restore-keys- with
# the prefix to load the most recent cache for the branch on a cache miss. You
# should customize the contents of hashFiles to capture any bazel input sources,
# although this doesn't need to be perfect. If none of the input sources change
# then a cache hit will load an existing cache and bazel won't have to do any work.
# In the case of a cache miss, you want the fallback cache to contain most of the
# previously built artifacts to minimize build time. The more precise you are with
# hashFiles sources the less work bazel will have to do.
# - name: Mount bazel caches
# uses: actions/cache@v5
# with:
# path: |
# ~/.cache/bazel-repo-cache
# ~/.cache/bazel-repo-contents-cache
# key: bazel-cache-${{ matrix.os }}-${{ hashFiles('**/BUILD.bazel', '**/*.bzl', 'MODULE.bazel') }}
# restore-keys: |
# bazel-cache-${{ matrix.os }}
- name: Configure Bazel startup args (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
# Use a very short path to reduce argv/path length issues.
"BAZEL_STARTUP_ARGS=--output_user_root=C:\" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
- name: bazel test //...
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
set -o pipefail
bazel_console_log="$(mktemp)"
print_failed_bazel_test_logs() {
local console_log="$1"
local testlogs_dir
testlogs_dir="$(bazel $BAZEL_STARTUP_ARGS info bazel-testlogs 2>/dev/null || echo bazel-testlogs)"
local failed_targets=()
while IFS= read -r target; do
failed_targets+=("$target")
done < <(
grep -E '^FAIL: //' "$console_log" \
| sed -E 's#^FAIL: (//[^ ]+).*#\1#' \
| sort -u
)
if [[ ${#failed_targets[@]} -eq 0 ]]; then
echo "No failed Bazel test targets were found in console output."
return
fi
for target in "${failed_targets[@]}"; do
local rel_path="${target#//}"
rel_path="${rel_path/:/\/}"
local test_log="${testlogs_dir}/${rel_path}/test.log"
echo "::group::Bazel test log tail for ${target}"
if [[ -f "$test_log" ]]; then
tail -n 200 "$test_log"
else
echo "Missing test log: $test_log"
fi
echo "::endgroup::"
done
}
bazel_args=(
test
--test_verbose_timeout_warnings
--build_metadata=REPO_URL=https://github.com/openai/codex.git
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD)
--build_metadata=ROLE=CI
--build_metadata=VISIBILITY=PUBLIC
)
bazel_targets=(
//...
# Keep V8 out of the ordinary Bazel CI path. Only the dedicated
# canary and release workflows should build `third_party/v8`.
-//third_party/v8:all
)
if [[ "${RUNNER_OS:-}" != "Windows" ]]; then
# Bazel test sandboxes on macOS may resolve an older Homebrew `node`
# before the `actions/setup-node` runtime on PATH.
node_bin="$(which node)"
bazel_args+=("--test_env=CODEX_JS_REPL_NODE_PATH=${node_bin}")
fi
if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then
echo "BuildBuddy API key is available; using remote Bazel configuration."
# Work around Bazel 9 remote repo contents cache / overlay materialization failures
# seen in CI (for example "is not a symlink" or permission errors while
# materializing external repos such as rules_perl). We still use BuildBuddy for
# remote execution/cache; this only disables the startup-level repo contents cache.
set +e
bazel $BAZEL_STARTUP_ARGS \
--noexperimental_remote_repo_contents_cache \
--bazelrc=.github/workflows/ci.bazelrc \
"${bazel_args[@]}" \
"--remote_header=x-buildbuddy-api-key=$BUILDBUDDY_API_KEY" \
-- \
"${bazel_targets[@]}" \
2>&1 | tee "$bazel_console_log"
bazel_status=${PIPESTATUS[0]}
set -e
else
echo "BuildBuddy API key is not available; using local Bazel configuration."
# Keep fork/community PRs on Bazel but disable remote services that are
# configured in .bazelrc and require auth.
#
# Flag docs:
# - Command-line reference: https://bazel.build/reference/command-line-reference
# - Remote caching overview: https://bazel.build/remote/caching
# - Remote execution overview: https://bazel.build/remote/rbe
# - Build Event Protocol overview: https://bazel.build/remote/bep
#
# --noexperimental_remote_repo_contents_cache:
# disable remote repo contents cache enabled in .bazelrc startup options.
# https://bazel.build/reference/command-line-reference#startup_options-flag--experimental_remote_repo_contents_cache
# --remote_cache= and --remote_executor=:
# clear remote cache/execution endpoints configured in .bazelrc.
# https://bazel.build/reference/command-line-reference#common_options-flag--remote_cache
# https://bazel.build/reference/command-line-reference#common_options-flag--remote_executor
set +e
bazel $BAZEL_STARTUP_ARGS \
--noexperimental_remote_repo_contents_cache \
"${bazel_args[@]}" \
--remote_cache= \
--remote_executor= \
-- \
"${bazel_targets[@]}" \
2>&1 | tee "$bazel_console_log"
bazel_status=${PIPESTATUS[0]}
set -e
fi
if [[ ${bazel_status:-0} -ne 0 ]]; then
print_failed_bazel_test_logs "$bazel_console_log"
exit "$bazel_status"
fi

View File

@@ -1,32 +0,0 @@
name: blob-size-policy
on:
pull_request: {}
jobs:
check:
name: Blob size policy
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Determine PR comparison range
id: range
shell: bash
run: |
set -euo pipefail
echo "base=$(git rev-parse HEAD^1)" >> "$GITHUB_OUTPUT"
echo "head=$(git rev-parse HEAD^2)" >> "$GITHUB_OUTPUT"
- name: Check changed blob sizes
env:
BASE_SHA: ${{ steps.range.outputs.base }}
HEAD_SHA: ${{ steps.range.outputs.head }}
run: |
python3 scripts/check_blob_size.py \
--base "$BASE_SHA" \
--head "$HEAD_SHA" \
--max-bytes 512000 \
--allowlist .github/blob-size-allowlist.txt

View File

@@ -1,26 +0,0 @@
name: cargo-deny
on:
pull_request:
push:
branches:
- main
jobs:
cargo-deny:
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./codex-rs
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Run cargo-deny
uses: EmbarkStudios/cargo-deny-action@v2
with:
rust-version: stable
manifest-path: ./codex-rs/Cargo.toml

View File

@@ -1,27 +0,0 @@
common --remote_download_minimal
common --keep_going
common --verbose_failures
# Disable disk cache since we have remote one and aren't using persistent workers.
common --disk_cache=
# Rearrange caches on Windows so they're on the same volume as the checkout.
common:windows --repo_contents_cache=D:/a/.cache/bazel-repo-contents-cache
common:windows --repository_cache=D:/a/.cache/bazel-repo-cache
# We prefer to run the build actions entirely remotely so we can dial up the concurrency.
# We have platform-specific tests, so we want to execute the tests on all platforms using the strongest sandboxing available on each platform.
# On linux, we can do a full remote build/test, by targeting the right (x86/arm) runners, so we have coverage of both.
# Linux crossbuilds don't work until we untangle the libc constraint mess.
common:linux --config=remote
common:linux --strategy=remote
common:linux --platforms=//:rbe
# On mac, we can run all the build actions remotely but test actions locally.
common:macos --config=remote
common:macos --strategy=remote
common:macos --strategy=TestRunner=darwin-sandbox,local
# On windows we cannot cross-build the tests but run them locally due to what appears to be a Bazel bug
# (windows vs unix path confusion)

View File

@@ -1,7 +1,7 @@
name: ci
on:
pull_request: {}
pull_request: { branches: [main] }
push: { branches: [main] }
jobs:
@@ -12,45 +12,27 @@ jobs:
NODE_OPTIONS: --max-old-space-size=4096
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Setup pnpm
uses: pnpm/action-setup@v5
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v5
with:
node-version: 22
- name: Install dependencies
run: pnpm install --frozen-lockfile
# stage_npm_packages.py requires DotSlash when staging releases.
- uses: facebook/install-dotslash@v2
# Run all tasks using workspace filters
- name: Stage npm package
id: stage_npm_package
- name: Ensure staging a release works.
env:
GH_TOKEN: ${{ github.token }}
run: |
set -euo pipefail
# Use a rust-release version that includes all native binaries.
CODEX_VERSION=0.115.0
OUTPUT_DIR="${RUNNER_TEMP}"
python3 ./scripts/stage_npm_packages.py \
--release-version "$CODEX_VERSION" \
--package codex \
--output-dir "$OUTPUT_DIR"
PACK_OUTPUT="${OUTPUT_DIR}/codex-npm-${CODEX_VERSION}.tgz"
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"
- name: Upload staged npm package artifact
uses: actions/upload-artifact@v7
with:
name: codex-npm-staging
path: ${{ steps.stage_npm_package.outputs.pack_output }}
run: ./codex-cli/scripts/stage_release.sh
- name: Ensure root README.md contains only ASCII and certain Unicode code points
run: ./scripts/asciicheck.py README.md
@@ -61,6 +43,3 @@ jobs:
run: ./scripts/asciicheck.py codex-cli/README.md
- name: Check codex-cli/README ToC
run: python3 scripts/readme_toc.py codex-cli/README.md
- name: Prettier (run `pnpm run format:fix` to fix)
run: pnpm run format

View File

@@ -13,37 +13,17 @@ permissions:
jobs:
cla:
# Only run the CLA assistant for the canonical openai repo so forks are not blocked
# and contributors who signed previously do not receive duplicate CLA notifications.
if: ${{ github.repository_owner == 'openai' }}
runs-on: ubuntu-latest
steps:
- uses: contributor-assistant/github-action@v2.6.1
# Run on close only if the PR was merged. This will lock the PR to preserve
# the CLA agreement. We don't want to lock PRs that have been closed without
# merging because the contributor may want to respond with additional comments.
# This action has a "lock-pullrequest-aftermerge" option that can be set to false,
# but that would unconditionally skip locking even in cases where the PR was merged.
if: |
(
github.event_name == 'pull_request_target' &&
(
github.event.action == 'opened' ||
github.event.action == 'synchronize' ||
(github.event.action == 'closed' && github.event.pull_request.merged == true)
)
) ||
(
github.event_name == 'issue_comment' &&
(
github.event.comment.body == 'recheck' ||
github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA'
)
)
github.event_name == 'pull_request_target' ||
github.event.comment.body == 'recheck' ||
github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
path-to-document: https://github.com/openai/codex/blob/main/docs/CLA.md
path-to-signatures: signatures/cla.json
branch: cla-signatures
allowlist: codex,dependabot,dependabot[bot],github-actions[bot]
allowlist: dependabot[bot]

View File

@@ -1,107 +0,0 @@
name: Close stale contributor PRs
on:
workflow_dispatch:
schedule:
- cron: "0 6 * * *"
permissions:
contents: read
issues: write
pull-requests: write
jobs:
close-stale-contributor-prs:
# Prevent scheduled runs on forks
if: github.repository == 'openai/codex'
runs-on: ubuntu-latest
steps:
- name: Close inactive PRs from contributors
uses: actions/github-script@v8
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const DAYS_INACTIVE = 14;
const cutoff = new Date(Date.now() - DAYS_INACTIVE * 24 * 60 * 60 * 1000);
const { owner, repo } = context.repo;
const dryRun = false;
const stalePrs = [];
core.info(`Dry run mode: ${dryRun}`);
const prs = await github.paginate(github.rest.pulls.list, {
owner,
repo,
state: "open",
per_page: 100,
sort: "updated",
direction: "asc",
});
for (const pr of prs) {
const lastUpdated = new Date(pr.updated_at);
if (lastUpdated > cutoff) {
core.info(`PR ${pr.number} is fresh`);
continue;
}
if (!pr.user || pr.user.type !== "User") {
core.info(`PR ${pr.number} wasn't created by a user`);
continue;
}
let permission;
try {
const permissionResponse = await github.rest.repos.getCollaboratorPermissionLevel({
owner,
repo,
username: pr.user.login,
});
permission = permissionResponse.data.permission;
} catch (error) {
if (error.status === 404) {
core.info(`Author ${pr.user.login} is not a collaborator; skipping #${pr.number}`);
continue;
}
throw error;
}
const hasContributorAccess = ["admin", "maintain", "write"].includes(permission);
if (!hasContributorAccess) {
core.info(`Author ${pr.user.login} has ${permission} access; skipping #${pr.number}`);
continue;
}
stalePrs.push(pr);
}
if (!stalePrs.length) {
core.info("No stale contributor pull requests found.");
return;
}
for (const pr of stalePrs) {
const issue_number = pr.number;
const closeComment = `Closing this pull request because it has had no updates for more than ${DAYS_INACTIVE} days. If you plan to continue working on it, feel free to reopen or open a new PR.`;
if (dryRun) {
core.info(`[dry-run] Would close contributor PR #${issue_number} from ${pr.user.login}`);
continue;
}
await github.rest.issues.createComment({
owner,
repo,
issue_number,
body: closeComment,
});
await github.rest.pulls.update({
owner,
repo,
pull_number: issue_number,
state: "closed",
});
core.info(`Closed contributor PR #${issue_number} from ${pr.user.login}`);
}

View File

@@ -18,10 +18,10 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Annotate locations with typos
uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1
- name: Codespell
uses: codespell-project/actions-codespell@8f01853be192eb0f849a5c7d721450e7a467c579 # v2.2
uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2
with:
ignore_words_file: .codespellignore

View File

@@ -1,402 +0,0 @@
name: Issue Deduplicator
on:
issues:
types:
- opened
- labeled
jobs:
gather-duplicates-all:
name: Identify potential duplicates (all issues)
# Prevent runs on forks (requires OpenAI API key, wastes Actions minutes)
if: github.repository == 'openai/codex' && (github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate'))
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
issues_json: ${{ steps.normalize-all.outputs.issues_json }}
reason: ${{ steps.normalize-all.outputs.reason }}
has_matches: ${{ steps.normalize-all.outputs.has_matches }}
steps:
- uses: actions/checkout@v6
- name: Prepare Codex inputs
env:
GH_TOKEN: ${{ github.token }}
REPO: ${{ github.repository }}
ISSUE_NUMBER: ${{ github.event.issue.number }}
run: |
set -eo pipefail
CURRENT_ISSUE_FILE=codex-current-issue.json
EXISTING_ALL_FILE=codex-existing-issues-all.json
gh issue list --repo "$REPO" \
--json number,title,body,createdAt,updatedAt,state,labels \
--limit 1000 \
--state all \
--search "sort:created-desc" \
| jq '[.[] | {
number,
title,
body: ((.body // "")[0:4000]),
createdAt,
updatedAt,
state,
labels: ((.labels // []) | map(.name))
}]' \
> "$EXISTING_ALL_FILE"
gh issue view "$ISSUE_NUMBER" \
--repo "$REPO" \
--json number,title,body \
| jq '{number, title, body: ((.body // "")[0:4000])}' \
> "$CURRENT_ISSUE_FILE"
echo "Prepared duplicate detection input files."
echo "all_issue_count=$(jq 'length' "$EXISTING_ALL_FILE")"
# Prompt instructions are intentionally inline in this workflow. The old
# .github/prompts/issue-deduplicator.txt file is obsolete and removed.
- id: codex-all
name: Find duplicates (pass 1, all issues)
uses: openai/codex-action@main
with:
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
allow-users: "*"
prompt: |
You are an assistant that triages new GitHub issues by identifying potential duplicates.
You will receive the following JSON files located in the current working directory:
- `codex-current-issue.json`: JSON object describing the newly created issue (fields: number, title, body).
- `codex-existing-issues-all.json`: JSON array of recent issues with states, timestamps, and labels.
Instructions:
- Compare the current issue against the existing issues to find up to five that appear to describe the same underlying problem or request.
- Prioritize concrete overlap in symptoms, reproduction details, error signatures, and user intent.
- Prefer active unresolved issues when confidence is similar.
- Closed issues can still be valid duplicates if they clearly match.
- Return fewer matches rather than speculative ones.
- If confidence is low, return an empty list.
- Include at most five issue numbers.
- After analysis, provide a short reason for your decision.
output-schema: |
{
"type": "object",
"properties": {
"issues": {
"type": "array",
"items": {
"type": "string"
}
},
"reason": { "type": "string" }
},
"required": ["issues", "reason"],
"additionalProperties": false
}
- id: normalize-all
name: Normalize pass 1 output
env:
CODEX_OUTPUT: ${{ steps.codex-all.outputs.final-message }}
CURRENT_ISSUE_NUMBER: ${{ github.event.issue.number }}
run: |
set -eo pipefail
raw=${CODEX_OUTPUT//$'\r'/}
parsed=false
issues='[]'
reason=''
if [ -n "$raw" ] && printf '%s' "$raw" | jq -e 'type == "object" and (.issues | type == "array")' >/dev/null 2>&1; then
parsed=true
issues=$(printf '%s' "$raw" | jq -c '[.issues[] | tostring]')
reason=$(printf '%s' "$raw" | jq -r '.reason // ""')
else
reason='Pass 1 output was empty or invalid JSON.'
fi
filtered=$(jq -cn --argjson issues "$issues" --arg current "$CURRENT_ISSUE_NUMBER" '[
$issues[]
| tostring
| select(. != $current)
] | reduce .[] as $issue ([]; if index($issue) then . else . + [$issue] end) | .[:5]')
has_matches=false
if [ "$(jq 'length' <<< "$filtered")" -gt 0 ]; then
has_matches=true
fi
echo "Pass 1 parsed: $parsed"
echo "Pass 1 matches after filtering: $(jq 'length' <<< "$filtered")"
echo "Pass 1 reason: $reason"
{
echo "issues_json=$filtered"
echo "reason<<EOF"
echo "$reason"
echo "EOF"
echo "has_matches=$has_matches"
} >> "$GITHUB_OUTPUT"
gather-duplicates-open:
name: Identify potential duplicates (open issues fallback)
# Pass 1 may drop sudo on the runner, so run the fallback in a fresh job.
needs: gather-duplicates-all
if: ${{ needs.gather-duplicates-all.result == 'success' && needs.gather-duplicates-all.outputs.has_matches != 'true' }}
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
issues_json: ${{ steps.normalize-open.outputs.issues_json }}
reason: ${{ steps.normalize-open.outputs.reason }}
has_matches: ${{ steps.normalize-open.outputs.has_matches }}
steps:
- uses: actions/checkout@v6
- name: Prepare Codex inputs
env:
GH_TOKEN: ${{ github.token }}
REPO: ${{ github.repository }}
ISSUE_NUMBER: ${{ github.event.issue.number }}
run: |
set -eo pipefail
CURRENT_ISSUE_FILE=codex-current-issue.json
EXISTING_OPEN_FILE=codex-existing-issues-open.json
gh issue list --repo "$REPO" \
--json number,title,body,createdAt,updatedAt,state,labels \
--limit 1000 \
--state open \
--search "sort:created-desc" \
| jq '[.[] | {
number,
title,
body: ((.body // "")[0:4000]),
createdAt,
updatedAt,
state,
labels: ((.labels // []) | map(.name))
}]' \
> "$EXISTING_OPEN_FILE"
gh issue view "$ISSUE_NUMBER" \
--repo "$REPO" \
--json number,title,body \
| jq '{number, title, body: ((.body // "")[0:4000])}' \
> "$CURRENT_ISSUE_FILE"
echo "Prepared fallback duplicate detection input files."
echo "open_issue_count=$(jq 'length' "$EXISTING_OPEN_FILE")"
- id: codex-open
name: Find duplicates (pass 2, open issues)
uses: openai/codex-action@main
with:
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
allow-users: "*"
prompt: |
You are an assistant that triages new GitHub issues by identifying potential duplicates.
This is a fallback pass because a broad search did not find convincing matches.
You will receive the following JSON files located in the current working directory:
- `codex-current-issue.json`: JSON object describing the newly created issue (fields: number, title, body).
- `codex-existing-issues-open.json`: JSON array of open issues only.
Instructions:
- Search only these active unresolved issues for duplicates of the current issue.
- Prioritize concrete overlap in symptoms, reproduction details, error signatures, and user intent.
- Prefer fewer, higher-confidence matches.
- If confidence is low, return an empty list.
- Include at most five issue numbers.
- After analysis, provide a short reason for your decision.
output-schema: |
{
"type": "object",
"properties": {
"issues": {
"type": "array",
"items": {
"type": "string"
}
},
"reason": { "type": "string" }
},
"required": ["issues", "reason"],
"additionalProperties": false
}
- id: normalize-open
name: Normalize pass 2 output
env:
CODEX_OUTPUT: ${{ steps.codex-open.outputs.final-message }}
CURRENT_ISSUE_NUMBER: ${{ github.event.issue.number }}
run: |
set -eo pipefail
raw=${CODEX_OUTPUT//$'\r'/}
parsed=false
issues='[]'
reason=''
if [ -n "$raw" ] && printf '%s' "$raw" | jq -e 'type == "object" and (.issues | type == "array")' >/dev/null 2>&1; then
parsed=true
issues=$(printf '%s' "$raw" | jq -c '[.issues[] | tostring]')
reason=$(printf '%s' "$raw" | jq -r '.reason // ""')
else
reason='Pass 2 output was empty or invalid JSON.'
fi
filtered=$(jq -cn --argjson issues "$issues" --arg current "$CURRENT_ISSUE_NUMBER" '[
$issues[]
| tostring
| select(. != $current)
] | reduce .[] as $issue ([]; if index($issue) then . else . + [$issue] end) | .[:5]')
has_matches=false
if [ "$(jq 'length' <<< "$filtered")" -gt 0 ]; then
has_matches=true
fi
echo "Pass 2 parsed: $parsed"
echo "Pass 2 matches after filtering: $(jq 'length' <<< "$filtered")"
echo "Pass 2 reason: $reason"
{
echo "issues_json=$filtered"
echo "reason<<EOF"
echo "$reason"
echo "EOF"
echo "has_matches=$has_matches"
} >> "$GITHUB_OUTPUT"
select-final:
name: Select final duplicate set
needs:
- gather-duplicates-all
- gather-duplicates-open
if: ${{ always() && needs.gather-duplicates-all.result == 'success' && (needs.gather-duplicates-open.result == 'success' || needs.gather-duplicates-open.result == 'skipped') }}
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
codex_output: ${{ steps.select-final.outputs.codex_output }}
steps:
- id: select-final
name: Select final duplicate set
env:
PASS1_ISSUES: ${{ needs.gather-duplicates-all.outputs.issues_json }}
PASS1_REASON: ${{ needs.gather-duplicates-all.outputs.reason }}
PASS2_ISSUES: ${{ needs.gather-duplicates-open.outputs.issues_json }}
PASS2_REASON: ${{ needs.gather-duplicates-open.outputs.reason }}
PASS1_HAS_MATCHES: ${{ needs.gather-duplicates-all.outputs.has_matches }}
PASS2_HAS_MATCHES: ${{ needs.gather-duplicates-open.outputs.has_matches }}
run: |
set -eo pipefail
selected_issues='[]'
selected_reason='No plausible duplicates found.'
selected_pass='none'
if [ "$PASS1_HAS_MATCHES" = "true" ]; then
selected_issues=${PASS1_ISSUES:-'[]'}
selected_reason=${PASS1_REASON:-'Pass 1 found duplicates.'}
selected_pass='all'
fi
if [ "$PASS2_HAS_MATCHES" = "true" ]; then
selected_issues=${PASS2_ISSUES:-'[]'}
selected_reason=${PASS2_REASON:-'Pass 2 found duplicates.'}
selected_pass='open-fallback'
fi
final_json=$(jq -cn \
--argjson issues "$selected_issues" \
--arg reason "$selected_reason" \
--arg pass "$selected_pass" \
'{issues: $issues, reason: $reason, pass: $pass}')
echo "Final pass used: $selected_pass"
echo "Final duplicate count: $(jq '.issues | length' <<< "$final_json")"
echo "Final reason: $(jq -r '.reason' <<< "$final_json")"
{
echo "codex_output<<EOF"
echo "$final_json"
echo "EOF"
} >> "$GITHUB_OUTPUT"
comment-on-issue:
name: Comment with potential duplicates
needs: select-final
if: ${{ always() && needs.select-final.result == 'success' }}
runs-on: ubuntu-latest
permissions:
contents: read
issues: write
steps:
- name: Comment on issue
uses: actions/github-script@v8
env:
CODEX_OUTPUT: ${{ needs.select-final.outputs.codex_output }}
with:
github-token: ${{ github.token }}
script: |
const raw = process.env.CODEX_OUTPUT ?? '';
let parsed;
try {
parsed = JSON.parse(raw);
} catch (error) {
core.info(`Codex output was not valid JSON. Raw output: ${raw}`);
core.info(`Parse error: ${error.message}`);
return;
}
const issues = Array.isArray(parsed?.issues) ? parsed.issues : [];
const currentIssueNumber = String(context.payload.issue.number);
const passUsed = typeof parsed?.pass === 'string' ? parsed.pass : 'unknown';
const reason = typeof parsed?.reason === 'string' ? parsed.reason : '';
console.log(`Current issue number: ${currentIssueNumber}`);
console.log(`Pass used: ${passUsed}`);
if (reason) {
console.log(`Reason: ${reason}`);
}
console.log(issues);
const filteredIssues = [...new Set(issues.map((value) => String(value)))].filter((value) => value !== currentIssueNumber).slice(0, 5);
if (filteredIssues.length === 0) {
core.info('Codex reported no potential duplicates.');
return;
}
const lines = [
'Potential duplicates detected. Please review them and close your issue if it is a duplicate.',
'',
...filteredIssues.map((value) => `- #${String(value)}`),
'',
'*Powered by [Codex Action](https://github.com/openai/codex-action)*'];
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.issue.number,
body: lines.join("\n"),
});
- name: Remove codex-deduplicate label
if: ${{ always() && github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate' }}
env:
GH_TOKEN: ${{ github.token }}
GH_REPO: ${{ github.repository }}
ISSUE_NUMBER: ${{ github.event.issue.number }}
run: |
gh issue edit "$ISSUE_NUMBER" --remove-label codex-deduplicate || true
echo "Attempted to remove label: codex-deduplicate"

View File

@@ -1,133 +0,0 @@
name: Issue Labeler
on:
issues:
types:
- opened
- labeled
jobs:
gather-labels:
name: Generate label suggestions
# Prevent runs on forks (requires OpenAI API key, wastes Actions minutes)
if: github.repository == 'openai/codex' && (github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label'))
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
codex_output: ${{ steps.codex.outputs.final-message }}
steps:
- uses: actions/checkout@v6
- id: codex
uses: openai/codex-action@main
with:
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
allow-users: "*"
prompt: |
You are an assistant that reviews GitHub issues for the repository.
Your job is to choose the most appropriate labels for the issue described later in this prompt.
Follow these rules:
- Add one (and only one) of the following three labels to distinguish the type of issue. Default to "bug" if unsure.
1. bug — Reproducible defects in Codex products (CLI, VS Code extension, web, auth).
2. enhancement — Feature requests or usability improvements that ask for new capabilities, better ergonomics, or quality-of-life tweaks.
3. documentation — Updates or corrections needed in docs/README/config references (broken links, missing examples, outdated keys, clarification requests).
- If applicable, add one of the following labels to specify which sub-product or product surface the issue relates to.
1. CLI — the Codex command line interface.
2. extension — VS Code (or other IDE) extension-specific issues.
3. app - Issues related to the Codex desktop application.
4. codex-web — Issues targeting the Codex web UI/Cloud experience.
5. github-action — Issues with the Codex GitHub action.
6. iOS — Issues with the Codex iOS app.
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
2. mcp — Topics involving Model Context Protocol servers/clients.
3. mcp-server — Problems related to the codex mcp-server command, where codex runs as an MCP server.
4. azure — Problems or requests tied to Azure OpenAI deployments.
5. model-behavior — Undesirable LLM behavior: forgetting goals, refusing work, hallucinating environment details, quota misreports, or other reasoning/performance anomalies.
6. code-review — Issues related to the code review feature or functionality.
7. safety-check - Issues related to cyber risk detection or trusted access verification.
8. auth - Problems related to authentication, login, or access tokens.
9. codex-exec - Problems related to the "codex exec" command or functionality.
10. context-management - Problems related to compaction, context windows, or available context reporting.
11. custom-model - Problems that involve using custom model providers, local models, or OSS models.
12. rate-limits - Problems related to token limits, rate limits, or token usage reporting.
13. sandbox - Issues related to local sandbox environments or tool call approvals to override sandbox restrictions.
14. tool-calls - Problems related to specific tool call invocations including unexpected errors, failures, or hangs.
15. TUI - Problems with the terminal user interface (TUI) including keyboard shortcuts, copy & pasting, menus, or screen update issues.
Issue number: ${{ github.event.issue.number }}
Issue title:
${{ github.event.issue.title }}
Issue body:
${{ github.event.issue.body }}
Repository full name:
${{ github.repository }}
output-schema: |
{
"type": "object",
"properties": {
"labels": {
"type": "array",
"items": {
"type": "string"
}
}
},
"required": ["labels"],
"additionalProperties": false
}
apply-labels:
name: Apply labels from Codex output
needs: gather-labels
if: ${{ needs.gather-labels.result != 'skipped' }}
runs-on: ubuntu-latest
permissions:
contents: read
issues: write
env:
GH_TOKEN: ${{ github.token }}
GH_REPO: ${{ github.repository }}
ISSUE_NUMBER: ${{ github.event.issue.number }}
CODEX_OUTPUT: ${{ needs.gather-labels.outputs.codex_output }}
steps:
- name: Apply labels
run: |
json=${CODEX_OUTPUT//$'\r'/}
if [ -z "$json" ]; then
echo "Codex produced no output. Skipping label application."
exit 0
fi
if ! printf '%s' "$json" | jq -e 'type == "object" and (.labels | type == "array")' >/dev/null 2>&1; then
echo "Codex output did not include a labels array. Raw output: $json"
exit 0
fi
labels=$(printf '%s' "$json" | jq -r '.labels[] | tostring')
if [ -z "$labels" ]; then
echo "Codex returned an empty array. Nothing to do."
exit 0
fi
cmd=(gh issue edit "$ISSUE_NUMBER")
while IFS= read -r label; do
cmd+=(--add-label "$label")
done <<< "$labels"
"${cmd[@]}" || true
- name: Remove codex-label trigger
if: ${{ always() && github.event.action == 'labeled' && github.event.label.name == 'codex-label' }}
run: |
gh issue edit "$ISSUE_NUMBER" --remove-label codex-label || true
echo "Attempted to remove label: codex-label"

View File

@@ -9,17 +9,15 @@ on:
# CI builds in debug (dev) for faster signal.
jobs:
# --- Detect what changed to detect which tests to run (always runs) -------------------------------------
# --- Detect what changed (always runs) -------------------------------------
changed:
name: Detect changed areas
runs-on: ubuntu-24.04
outputs:
argument_comment_lint: ${{ steps.detect.outputs.argument_comment_lint }}
argument_comment_lint_package: ${{ steps.detect.outputs.argument_comment_lint_package }}
codex: ${{ steps.detect.outputs.codex }}
workflows: ${{ steps.detect.outputs.workflows }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Detect changed paths (no external action)
@@ -30,29 +28,21 @@ jobs:
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
BASE_SHA='${{ github.event.pull_request.base.sha }}'
HEAD_SHA='${{ github.event.pull_request.head.sha }}'
echo "Base SHA: $BASE_SHA"
echo "Head SHA: $HEAD_SHA"
# List files changed between base and PR head
mapfile -t files < <(git diff --name-only --no-renames "$BASE_SHA" "$HEAD_SHA")
# List files changed between base and current HEAD (merge-base aware)
mapfile -t files < <(git diff --name-only --no-renames "$BASE_SHA"...HEAD)
else
# On push / manual runs, default to running everything
files=("codex-rs/force" ".github/force")
fi
codex=false
argument_comment_lint=false
argument_comment_lint_package=false
workflows=false
for f in "${files[@]}"; do
[[ $f == codex-rs/* ]] && codex=true
[[ $f == codex-rs/* || $f == tools/argument-comment-lint/* || $f == justfile ]] && argument_comment_lint=true
[[ $f == tools/argument-comment-lint/* || $f == .github/workflows/rust-ci.yml ]] && argument_comment_lint_package=true
[[ $f == .github/* ]] && workflows=true
done
echo "argument_comment_lint=$argument_comment_lint" >> "$GITHUB_OUTPUT"
echo "argument_comment_lint_package=$argument_comment_lint_package" >> "$GITHUB_OUTPUT"
echo "codex=$codex" >> "$GITHUB_OUTPUT"
echo "workflows=$workflows" >> "$GITHUB_OUTPUT"
@@ -66,8 +56,8 @@ jobs:
run:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.93.0
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
with:
components: rustfmt
- name: cargo fmt
@@ -82,85 +72,19 @@ jobs:
run:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.93.0
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: cargo-shear
version: 1.5.1
- name: cargo shear
run: cargo shear
argument_comment_lint_package:
name: Argument comment lint package
runs-on: ubuntu-24.04
needs: changed
if: ${{ needs.changed.outputs.argument_comment_lint_package == 'true' || github.event_name == 'push' }}
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.93.0
with:
toolchain: nightly-2025-09-18
components: llvm-tools-preview, rustc-dev, rust-src
- name: Cache cargo-dylint tooling
id: cargo_dylint_cache
uses: actions/cache@v5
with:
path: |
~/.cargo/bin/cargo-dylint
~/.cargo/bin/dylint-link
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
key: argument-comment-lint-${{ runner.os }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml') }}
- name: Install cargo-dylint tooling
if: ${{ steps.cargo_dylint_cache.outputs.cache-hit != 'true' }}
run: cargo install --locked cargo-dylint dylint-link
- name: Check source wrapper syntax
run: bash -n tools/argument-comment-lint/run.sh
- name: Test argument comment lint package
working-directory: tools/argument-comment-lint
run: cargo test
argument_comment_lint_prebuilt:
name: Argument comment lint - ${{ matrix.name }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
needs: changed
if: ${{ needs.changed.outputs.argument_comment_lint == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
strategy:
fail-fast: false
matrix:
include:
- name: Linux
runner: ubuntu-24.04
- name: macOS
runner: macos-15-xlarge
- name: Windows
runner: windows-x64
runs_on:
group: codex-runners
labels: codex-windows-x64
steps:
- uses: actions/checkout@v6
- name: Install Linux sandbox build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
sudo DEBIAN_FRONTEND=noninteractive apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
- uses: dtolnay/rust-toolchain@1.93.0
with:
toolchain: nightly-2025-09-18
components: llvm-tools-preview, rustc-dev, rust-src
- uses: facebook/install-dotslash@v2
- name: Run argument comment lint on codex-rs
shell: bash
run: ./tools/argument-comment-lint/run-prebuilt-linter.sh
# --- CI to validate on different os/targets --------------------------------
lint_build:
name: Lint/Build — ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
lint_build_test:
name: ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
needs: changed
# Keep job-level if to avoid spinning up runners when not needed
@@ -168,658 +92,138 @@ jobs:
defaults:
run:
working-directory: codex-rs
env:
# Speed up repeated builds across CI runs by caching compiled objects, except on
# arm64 macOS runners cross-targeting x86_64 where ring/cc-rs can produce
# mixed-architecture archives under sccache.
USE_SCCACHE: ${{ (startsWith(matrix.runner, 'windows') || (matrix.runner == 'macos-15-xlarge' && matrix.target == 'x86_64-apple-darwin')) && 'false' || 'true' }}
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
# In rust-ci, representative release-profile checks use thin LTO for faster feedback.
CARGO_PROFILE_RELEASE_LTO: ${{ matrix.profile == 'release' && 'thin' || 'fat' }}
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
- runner: macos-14
target: aarch64-apple-darwin
profile: dev
- runner: macos-15-xlarge
- runner: macos-14
target: x86_64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
- runner: windows-latest
target: x86_64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
- runner: windows-11-arm
target: aarch64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-arm64
# Also run representative release builds on Mac and Linux because
# there could be release-only build errors we want to catch.
# Hopefully this also pre-populates the build cache to speed up
# releases.
- runner: macos-15-xlarge
- runner: macos-14
target: aarch64-apple-darwin
profile: release
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
profile: release
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
profile: release
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
- runner: windows-latest
target: x86_64-pc-windows-msvc
profile: release
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
- runner: windows-11-arm
target: aarch64-pc-windows-msvc
profile: release
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@v6
- name: Install Linux build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
packages=(pkg-config libcap-dev)
if [[ "${{ matrix.target }}" == 'x86_64-unknown-linux-musl' || "${{ matrix.target }}" == 'aarch64-unknown-linux-musl' ]]; then
packages+=(libubsan1)
fi
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends "${packages[@]}"
fi
- uses: dtolnay/rust-toolchain@1.93.0
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
with:
targets: ${{ matrix.target }}
components: clippy
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Use hermetic Cargo home (musl)
shell: bash
run: |
set -euo pipefail
cargo_home="${GITHUB_WORKSPACE}/.cargo-home"
mkdir -p "${cargo_home}/bin"
echo "CARGO_HOME=${cargo_home}" >> "$GITHUB_ENV"
echo "${cargo_home}/bin" >> "$GITHUB_PATH"
: > "${cargo_home}/config.toml"
- name: Compute lockfile hash
id: lockhash
working-directory: codex-rs
shell: bash
run: |
set -euo pipefail
echo "hash=$(sha256sum Cargo.lock | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
echo "toolchain_hash=$(sha256sum rust-toolchain.toml | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
# Explicit cache restore: split cargo home vs target, so we can
# avoid caching the large target dir on the gnu-dev job.
- name: Restore cargo home cache
id: cache_cargo_home_restore
uses: actions/cache/restore@v5
- uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
# Install and restore sccache cache
- name: Install sccache
if: ${{ env.USE_SCCACHE == 'true' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: sccache
version: 0.7.5
- name: Configure sccache backend
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: |
set -euo pipefail
if [[ -n "${ACTIONS_CACHE_URL:-}" && -n "${ACTIONS_RUNTIME_TOKEN:-}" ]]; then
echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV"
echo "Using sccache GitHub backend"
else
echo "SCCACHE_GHA_ENABLED=false" >> "$GITHUB_ENV"
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> "$GITHUB_ENV"
echo "Using sccache local disk + actions/cache fallback"
fi
- name: Enable sccache wrapper
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV"
- name: Restore sccache cache (fallback)
if: ${{ env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true' }}
id: cache_sccache_restore
uses: actions/cache/restore@v5
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
restore-keys: |
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Disable sccache wrapper (musl)
shell: bash
run: |
set -euo pipefail
echo "RUSTC_WRAPPER=" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Prepare APT cache directories (musl)
shell: bash
run: |
set -euo pipefail
sudo mkdir -p /var/cache/apt/archives /var/lib/apt/lists
sudo chown -R "$USER:$USER" /var/cache/apt /var/lib/apt/lists
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Restore APT cache (musl)
id: cache_apt_restore
uses: actions/cache/restore@v5
with:
path: |
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@d1434d08867e3ee9daa34448df10607b98908d29 # v2
with:
version: 0.14.0
${{ github.workspace }}/codex-rs/target/
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
DEBIAN_FRONTEND: noninteractive
TARGET: ${{ matrix.target }}
APT_UPDATE_ARGS: -o Acquire::Retries=3
APT_INSTALL_ARGS: --no-install-recommends
shell: bash
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
name: Configure musl rusty_v8 artifact overrides
env:
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
version="$(python3 "${GITHUB_WORKSPACE}/.github/scripts/rusty_v8_bazel.py" resolved-v8-crate-version)"
release_tag="rusty-v8-v${version}"
base_url="https://github.com/openai/codex/releases/download/${release_tag}"
archive="https://github.com/openai/codex/releases/download/rusty-v8-v${version}/librusty_v8_release_${TARGET}.a.gz"
binding_dir="${RUNNER_TEMP}/rusty_v8"
binding_path="${binding_dir}/src_binding_release_${TARGET}.rs"
mkdir -p "${binding_dir}"
curl -fsSL "${base_url}/src_binding_release_${TARGET}.rs" -o "${binding_path}"
echo "RUSTY_V8_ARCHIVE=${archive}" >> "$GITHUB_ENV"
echo "RUSTY_V8_SRC_BINDING_PATH=${binding_path}" >> "$GITHUB_ENV"
- name: Install cargo-chef
if: ${{ matrix.profile == 'release' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: cargo-chef
version: 0.1.71
- name: Pre-warm dependency cache (cargo-chef)
if: ${{ matrix.profile == 'release' }}
shell: bash
run: |
set -euo pipefail
RECIPE="${RUNNER_TEMP}/chef-recipe.json"
cargo chef prepare --recipe-path "$RECIPE"
cargo chef cook --recipe-path "$RECIPE" --target ${{ matrix.target }} --release --all-features
sudo apt install -y musl-tools pkg-config && sudo rm -rf /var/lib/apt/lists/*
- name: cargo clippy
run: cargo clippy --target ${{ matrix.target }} --all-features --tests --profile ${{ matrix.profile }} --timings -- -D warnings
id: clippy
run: cargo clippy --target ${{ matrix.target }} --all-features --tests --profile ${{ matrix.profile }} -- -D warnings
- name: Upload Cargo timings (clippy)
if: always()
uses: actions/upload-artifact@v7
with:
name: cargo-timings-rust-ci-clippy-${{ matrix.target }}-${{ matrix.profile }}
path: codex-rs/target/**/cargo-timings/cargo-timing.html
if-no-files-found: warn
# Save caches explicitly; make non-fatal so cache packaging
# never fails the overall job. Only save when key wasn't hit.
- name: Save cargo home cache
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
# Running `cargo build` from the workspace root builds the workspace using
# the union of all features from third-party crates. This can mask errors
# where individual crates have underspecified features. To avoid this, we
# run `cargo check` for each crate individually, though because this is
# slower, we only do this for the x86_64-unknown-linux-gnu target.
- name: cargo check individual crates
id: cargo_check_all_crates
if: ${{ matrix.target == 'x86_64-unknown-linux-gnu' && matrix.profile != 'release' }}
continue-on-error: true
uses: actions/cache/save@v5
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
- name: Save sccache cache (fallback)
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
continue-on-error: true
uses: actions/cache/save@v5
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
- name: sccache stats
if: always() && env.USE_SCCACHE == 'true'
continue-on-error: true
run: sccache --show-stats || true
- name: sccache summary
if: always() && env.USE_SCCACHE == 'true'
shell: bash
run: |
{
echo "### sccache stats — ${{ matrix.target }} (${{ matrix.profile }})";
echo;
echo '```';
sccache --show-stats || true;
echo '```';
} >> "$GITHUB_STEP_SUMMARY"
find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 \
| xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo check --profile ${{ matrix.profile }}'
- name: Save APT cache (musl)
if: always() && !cancelled() && (matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl') && steps.cache_apt_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v5
with:
path: |
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
tests:
name: Tests — ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.remote_env == 'true' && ' (remote)' || '' }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
timeout-minutes: ${{ matrix.runner == 'windows-arm64' && 35 || 30 }}
needs: changed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
defaults:
run:
working-directory: codex-rs
env:
# Speed up repeated builds across CI runs by caching compiled objects, except on
# arm64 macOS runners cross-targeting x86_64 where ring/cc-rs can produce
# mixed-architecture archives under sccache.
USE_SCCACHE: ${{ (startsWith(matrix.runner, 'windows') || (matrix.runner == 'macos-15-xlarge' && matrix.target == 'x86_64-apple-darwin')) && 'false' || 'true' }}
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
remote_env: "true"
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@v6
- name: Set up Node.js for js_repl tests
uses: actions/setup-node@v6
with:
node-version-file: codex-rs/node-version.txt
- name: Install Linux build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
fi
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- uses: dtolnay/rust-toolchain@1.93.0
with:
targets: ${{ matrix.target }}
- name: Compute lockfile hash
id: lockhash
working-directory: codex-rs
shell: bash
run: |
set -euo pipefail
echo "hash=$(sha256sum Cargo.lock | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
echo "toolchain_hash=$(sha256sum rust-toolchain.toml | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
- name: Restore cargo home cache
id: cache_cargo_home_restore
uses: actions/cache/restore@v5
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- name: Install sccache
if: ${{ env.USE_SCCACHE == 'true' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: sccache
version: 0.7.5
- name: Configure sccache backend
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: |
set -euo pipefail
if [[ -n "${ACTIONS_CACHE_URL:-}" && -n "${ACTIONS_RUNTIME_TOKEN:-}" ]]; then
echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV"
echo "Using sccache GitHub backend"
else
echo "SCCACHE_GHA_ENABLED=false" >> "$GITHUB_ENV"
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> "$GITHUB_ENV"
echo "Using sccache local disk + actions/cache fallback"
fi
- name: Enable sccache wrapper
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV"
- name: Restore sccache cache (fallback)
if: ${{ env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true' }}
id: cache_sccache_restore
uses: actions/cache/restore@v5
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
restore-keys: |
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
- uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: nextest
version: 0.9.103
- name: Enable unprivileged user namespaces (Linux)
if: runner.os == 'Linux'
run: |
# Required for bubblewrap to work on Linux CI runners.
sudo sysctl -w kernel.unprivileged_userns_clone=1
# Ubuntu 24.04+ can additionally gate unprivileged user namespaces
# behind AppArmor.
if sudo sysctl -a 2>/dev/null | grep -q '^kernel.apparmor_restrict_unprivileged_userns'; then
sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0
fi
- name: Set up remote test env (Docker)
if: ${{ runner.os == 'Linux' && matrix.remote_env == 'true' }}
shell: bash
run: |
set -euo pipefail
export CODEX_TEST_REMOTE_ENV_CONTAINER_NAME=codex-remote-test-env
source "${GITHUB_WORKSPACE}/scripts/test-remote-env.sh"
echo "CODEX_TEST_REMOTE_ENV=${CODEX_TEST_REMOTE_ENV}" >> "$GITHUB_ENV"
- name: tests
id: test
run: cargo nextest run --all-features --no-fail-fast --target ${{ matrix.target }} --cargo-profile ci-test --timings
# Tests take too long for release builds to run them on every PR.
if: ${{ matrix.profile != 'release' }}
continue-on-error: true
run: cargo nextest run --all-features --no-fail-fast --target ${{ matrix.target }}
env:
RUST_BACKTRACE: 1
NEXTEST_STATUS_LEVEL: leak
- name: Upload Cargo timings (nextest)
if: always()
uses: actions/upload-artifact@v7
with:
name: cargo-timings-rust-ci-nextest-${{ matrix.target }}-${{ matrix.profile }}
path: codex-rs/target/**/cargo-timings/cargo-timing.html
if-no-files-found: warn
- name: Save cargo home cache
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v5
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
- name: Save sccache cache (fallback)
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
continue-on-error: true
uses: actions/cache/save@v5
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
- name: sccache stats
if: always() && env.USE_SCCACHE == 'true'
continue-on-error: true
run: sccache --show-stats || true
- name: sccache summary
if: always() && env.USE_SCCACHE == 'true'
shell: bash
# Fail the job if any of the previous steps failed.
- name: verify all steps passed
if: |
steps.clippy.outcome == 'failure' ||
steps.cargo_check_all_crates.outcome == 'failure' ||
steps.test.outcome == 'failure'
run: |
{
echo "### sccache stats — ${{ matrix.target }} (tests)";
echo;
echo '```';
sccache --show-stats || true;
echo '```';
} >> "$GITHUB_STEP_SUMMARY"
- name: Tear down remote test env
if: ${{ always() && runner.os == 'Linux' && matrix.remote_env == 'true' }}
shell: bash
run: |
set +e
if [[ "${{ steps.test.outcome }}" != "success" ]]; then
docker logs codex-remote-test-env || true
fi
docker rm -f codex-remote-test-env >/dev/null 2>&1 || true
- name: verify tests passed
if: steps.test.outcome == 'failure'
run: |
echo "Tests failed. See logs for details."
echo "One or more checks failed (clippy, cargo_check_all_crates, or test). See logs for details."
exit 1
# --- Gatherer job that you mark as the ONLY required status -----------------
results:
name: CI results (required)
needs:
[
changed,
general,
cargo_shear,
argument_comment_lint_package,
argument_comment_lint_prebuilt,
lint_build,
tests,
]
needs: [changed, general, cargo_shear, lint_build_test]
if: always()
runs-on: ubuntu-24.04
steps:
- name: Summarize
shell: bash
run: |
echo "argpkg : ${{ needs.argument_comment_lint_package.result }}"
echo "arglint: ${{ needs.argument_comment_lint_prebuilt.result }}"
echo "general: ${{ needs.general.result }}"
echo "shear : ${{ needs.cargo_shear.result }}"
echo "lint : ${{ needs.lint_build.result }}"
echo "tests : ${{ needs.tests.result }}"
echo "matrix : ${{ needs.lint_build_test.result }}"
# If nothing relevant changed (PR touching only root README, etc.),
# declare success regardless of other jobs.
if [[ '${{ needs.changed.outputs.argument_comment_lint }}' != 'true' && '${{ needs.changed.outputs.codex }}' != 'true' && '${{ needs.changed.outputs.workflows }}' != 'true' && '${{ github.event_name }}' != 'push' ]]; then
if [[ '${{ needs.changed.outputs.codex }}' != 'true' && '${{ needs.changed.outputs.workflows }}' != 'true' && '${{ github.event_name }}' != 'push' ]]; then
echo 'No relevant changes -> CI not required.'
exit 0
fi
if [[ '${{ needs.changed.outputs.argument_comment_lint_package }}' == 'true' || '${{ github.event_name }}' == 'push' ]]; then
[[ '${{ needs.argument_comment_lint_package.result }}' == 'success' ]] || { echo 'argument_comment_lint_package failed'; exit 1; }
fi
if [[ '${{ needs.changed.outputs.argument_comment_lint }}' == 'true' || '${{ needs.changed.outputs.workflows }}' == 'true' || '${{ github.event_name }}' == 'push' ]]; then
[[ '${{ needs.argument_comment_lint_prebuilt.result }}' == 'success' ]] || { echo 'argument_comment_lint_prebuilt failed'; exit 1; }
fi
if [[ '${{ needs.changed.outputs.codex }}' == 'true' || '${{ needs.changed.outputs.workflows }}' == 'true' || '${{ github.event_name }}' == 'push' ]]; then
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
[[ '${{ needs.cargo_shear.result }}' == 'success' ]] || { echo 'cargo_shear failed'; exit 1; }
[[ '${{ needs.lint_build.result }}' == 'success' ]] || { echo 'lint_build failed'; exit 1; }
[[ '${{ needs.tests.result }}' == 'success' ]] || { echo 'tests failed'; exit 1; }
fi
- name: sccache summary note
if: always()
run: |
echo "Per-job sccache stats are attached to each matrix job's Step Summary."
# Otherwise require the jobs to have succeeded
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
[[ '${{ needs.cargo_shear.result }}' == 'success' ]] || { echo 'cargo_shear failed'; exit 1; }
[[ '${{ needs.lint_build_test.result }}' == 'success' ]] || { echo 'matrix failed'; exit 1; }

View File

@@ -1,103 +0,0 @@
name: rust-release-argument-comment-lint
on:
workflow_call:
inputs:
publish:
required: true
type: boolean
jobs:
skip:
if: ${{ !inputs.publish }}
runs-on: ubuntu-latest
steps:
- run: echo "Skipping argument-comment-lint release assets for prerelease tag"
build:
if: ${{ inputs.publish }}
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
archive_name: argument-comment-lint-aarch64-apple-darwin.tar.gz
lib_name: libargument_comment_lint@nightly-2025-09-18-aarch64-apple-darwin.dylib
runner_binary: argument-comment-lint
cargo_dylint_binary: cargo-dylint
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
archive_name: argument-comment-lint-x86_64-unknown-linux-gnu.tar.gz
lib_name: libargument_comment_lint@nightly-2025-09-18-x86_64-unknown-linux-gnu.so
runner_binary: argument-comment-lint
cargo_dylint_binary: cargo-dylint
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
archive_name: argument-comment-lint-aarch64-unknown-linux-gnu.tar.gz
lib_name: libargument_comment_lint@nightly-2025-09-18-aarch64-unknown-linux-gnu.so
runner_binary: argument-comment-lint
cargo_dylint_binary: cargo-dylint
- runner: windows-x64
target: x86_64-pc-windows-msvc
archive_name: argument-comment-lint-x86_64-pc-windows-msvc.zip
lib_name: argument_comment_lint@nightly-2025-09-18-x86_64-pc-windows-msvc.dll
runner_binary: argument-comment-lint.exe
cargo_dylint_binary: cargo-dylint.exe
runs_on:
group: codex-runners
labels: codex-windows-x64
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.93.0
with:
toolchain: nightly-2025-09-18
targets: ${{ matrix.target }}
components: llvm-tools-preview, rustc-dev, rust-src
- name: Install tooling
shell: bash
run: |
install_root="${RUNNER_TEMP}/argument-comment-lint-tools"
cargo install --locked cargo-dylint --root "$install_root"
cargo install --locked dylint-link
echo "INSTALL_ROOT=$install_root" >> "$GITHUB_ENV"
- name: Cargo build
working-directory: tools/argument-comment-lint
shell: bash
run: cargo build --release --target ${{ matrix.target }}
- name: Stage artifact
shell: bash
run: |
dest="dist/argument-comment-lint/${{ matrix.target }}"
mkdir -p "$dest"
package_root="${RUNNER_TEMP}/argument-comment-lint"
rm -rf "$package_root"
mkdir -p "$package_root/bin" "$package_root/lib"
cp "tools/argument-comment-lint/target/${{ matrix.target }}/release/${{ matrix.runner_binary }}" \
"$package_root/bin/${{ matrix.runner_binary }}"
cp "${INSTALL_ROOT}/bin/${{ matrix.cargo_dylint_binary }}" \
"$package_root/bin/${{ matrix.cargo_dylint_binary }}"
cp "tools/argument-comment-lint/target/${{ matrix.target }}/release/${{ matrix.lib_name }}" \
"$package_root/lib/${{ matrix.lib_name }}"
archive_path="$dest/${{ matrix.archive_name }}"
if [[ "${{ runner.os }}" == "Windows" ]]; then
(cd "${RUNNER_TEMP}" && 7z a "$GITHUB_WORKSPACE/$archive_path" argument-comment-lint >/dev/null)
else
(cd "${RUNNER_TEMP}" && tar -czf "$GITHUB_WORKSPACE/$archive_path" argument-comment-lint)
fi
- uses: actions/upload-artifact@v7
with:
name: argument-comment-lint-${{ matrix.target }}
path: dist/argument-comment-lint/${{ matrix.target }}/*

View File

@@ -1,53 +0,0 @@
name: rust-release-prepare
on:
workflow_dispatch:
schedule:
- cron: "0 */4 * * *"
concurrency:
group: ${{ github.workflow }}
cancel-in-progress: false
permissions:
contents: write
pull-requests: write
jobs:
prepare:
# Prevent scheduled runs on forks (no secrets, wastes Actions minutes)
if: github.repository == 'openai/codex'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
with:
ref: main
fetch-depth: 0
- name: Update models.json
env:
OPENAI_API_KEY: ${{ secrets.CODEX_OPENAI_API_KEY }}
run: |
set -euo pipefail
client_version="99.99.99"
terminal_info="github-actions"
user_agent="codex_cli_rs/99.99.99 (Linux $(uname -r); $(uname -m)) ${terminal_info}"
base_url="${OPENAI_BASE_URL:-https://chatgpt.com/backend-api/codex}"
headers=(
-H "Authorization: Bearer ${OPENAI_API_KEY}"
-H "User-Agent: ${user_agent}"
)
url="${base_url%/}/models?client_version=${client_version}"
curl --http1.1 --fail --show-error --location "${headers[@]}" "${url}" | jq '.' > codex-rs/core/models.json
- name: Open pull request (if changed)
uses: peter-evans/create-pull-request@v8
with:
commit-message: "Update models.json"
title: "Update models.json"
body: "Automated update of models.json."
branch: "bot/update-models-json"
reviewers: "pakrym-oai,aibrahim-oai"
delete-branch: true

View File

@@ -1,264 +0,0 @@
name: rust-release-windows
on:
workflow_call:
inputs:
release-lto:
required: true
type: string
secrets:
AZURE_TRUSTED_SIGNING_CLIENT_ID:
required: true
AZURE_TRUSTED_SIGNING_TENANT_ID:
required: true
AZURE_TRUSTED_SIGNING_SUBSCRIPTION_ID:
required: true
AZURE_TRUSTED_SIGNING_ENDPOINT:
required: true
AZURE_TRUSTED_SIGNING_ACCOUNT_NAME:
required: true
AZURE_TRUSTED_SIGNING_CERTIFICATE_PROFILE_NAME:
required: true
jobs:
build-windows-binaries:
name: Build Windows binaries - ${{ matrix.runner }} - ${{ matrix.target }} - ${{ matrix.bundle }}
runs-on: ${{ matrix.runs_on }}
timeout-minutes: 60
permissions:
contents: read
defaults:
run:
working-directory: codex-rs
env:
CARGO_PROFILE_RELEASE_LTO: ${{ inputs.release-lto }}
strategy:
fail-fast: false
matrix:
include:
- runner: windows-x64
target: x86_64-pc-windows-msvc
bundle: primary
build_args: --bin codex --bin codex-responses-api-proxy
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
bundle: primary
build_args: --bin codex --bin codex-responses-api-proxy
runs_on:
group: codex-runners
labels: codex-windows-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
bundle: helpers
build_args: --bin codex-windows-sandbox-setup --bin codex-command-runner
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
bundle: helpers
build_args: --bin codex-windows-sandbox-setup --bin codex-command-runner
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@v6
- name: Print runner specs (Windows)
shell: powershell
run: |
$computer = Get-CimInstance Win32_ComputerSystem
$cpu = Get-CimInstance Win32_Processor | Select-Object -First 1
$ramGiB = [math]::Round($computer.TotalPhysicalMemory / 1GB, 1)
Write-Host "Runner: $env:RUNNER_NAME"
Write-Host "OS: $([System.Environment]::OSVersion.VersionString)"
Write-Host "CPU: $($cpu.Name)"
Write-Host "Logical CPUs: $($computer.NumberOfLogicalProcessors)"
Write-Host "Physical CPUs: $($computer.NumberOfProcessors)"
Write-Host "Total RAM: $ramGiB GiB"
Write-Host "Disk usage:"
Get-PSDrive -PSProvider FileSystem | Format-Table -AutoSize Name, @{Name='Size(GB)';Expression={[math]::Round(($_.Used + $_.Free) / 1GB, 1)}}, @{Name='Free(GB)';Expression={[math]::Round($_.Free / 1GB, 1)}}
- uses: dtolnay/rust-toolchain@1.93.0
with:
targets: ${{ matrix.target }}
- name: Cargo build (Windows binaries)
shell: bash
run: |
cargo build --target ${{ matrix.target }} --release --timings ${{ matrix.build_args }}
- name: Upload Cargo timings
uses: actions/upload-artifact@v7
with:
name: cargo-timings-rust-release-windows-${{ matrix.target }}-${{ matrix.bundle }}
path: codex-rs/target/**/cargo-timings/cargo-timing.html
if-no-files-found: warn
- name: Stage Windows binaries
shell: bash
run: |
output_dir="target/${{ matrix.target }}/release/staged-${{ matrix.bundle }}"
mkdir -p "$output_dir"
if [[ "${{ matrix.bundle }}" == "primary" ]]; then
cp target/${{ matrix.target }}/release/codex.exe "$output_dir/codex.exe"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$output_dir/codex-responses-api-proxy.exe"
else
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$output_dir/codex-windows-sandbox-setup.exe"
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$output_dir/codex-command-runner.exe"
fi
- name: Upload Windows binaries
uses: actions/upload-artifact@v7
with:
name: windows-binaries-${{ matrix.target }}-${{ matrix.bundle }}
path: |
codex-rs/target/${{ matrix.target }}/release/staged-${{ matrix.bundle }}/*
build-windows:
needs:
- build-windows-binaries
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on }}
timeout-minutes: 60
permissions:
contents: read
id-token: write
defaults:
run:
working-directory: codex-rs
strategy:
fail-fast: false
matrix:
include:
- runner: windows-x64
target: x86_64-pc-windows-msvc
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@v6
- name: Download prebuilt Windows primary binaries
uses: actions/download-artifact@v8
with:
name: windows-binaries-${{ matrix.target }}-primary
path: codex-rs/target/${{ matrix.target }}/release
- name: Download prebuilt Windows helper binaries
uses: actions/download-artifact@v8
with:
name: windows-binaries-${{ matrix.target }}-helpers
path: codex-rs/target/${{ matrix.target }}/release
- name: Verify binaries
shell: bash
run: |
set -euo pipefail
ls -lh target/${{ matrix.target }}/release/codex.exe
ls -lh target/${{ matrix.target }}/release/codex-responses-api-proxy.exe
ls -lh target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe
ls -lh target/${{ matrix.target }}/release/codex-command-runner.exe
- name: Sign Windows binaries with Azure Trusted Signing
uses: ./.github/actions/windows-code-sign
with:
target: ${{ matrix.target }}
client-id: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_TRUSTED_SIGNING_TENANT_ID }}
subscription-id: ${{ secrets.AZURE_TRUSTED_SIGNING_SUBSCRIPTION_ID }}
endpoint: ${{ secrets.AZURE_TRUSTED_SIGNING_ENDPOINT }}
account-name: ${{ secrets.AZURE_TRUSTED_SIGNING_ACCOUNT_NAME }}
certificate-profile-name: ${{ secrets.AZURE_TRUSTED_SIGNING_CERTIFICATE_PROFILE_NAME }}
- name: Stage artifacts
shell: bash
run: |
dest="dist/${{ matrix.target }}"
mkdir -p "$dest"
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$dest/codex-responses-api-proxy-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$dest/codex-command-runner-${{ matrix.target }}.exe"
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- name: Compress artifacts
shell: bash
run: |
# Path that contains the uncompressed binaries for the current
# ${{ matrix.target }}
dest="dist/${{ matrix.target }}"
repo_root=$PWD
# For compatibility with environments that lack the `zstd` tool we
# additionally create a `.tar.gz` and `.zip` for every Windows binary.
# The end result is:
# codex-<target>.zst
# codex-<target>.tar.gz
# codex-<target>.zip
for f in "$dest"/*; do
base="$(basename "$f")"
# Skip files that are already archives (shouldn't happen, but be
# safe).
if [[ "$base" == *.tar.gz || "$base" == *.zip || "$base" == *.dmg ]]; then
continue
fi
# Don't try to compress signature bundles.
if [[ "$base" == *.sigstore ]]; then
continue
fi
# Create per-binary tar.gz
tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base"
# Create zip archive for Windows binaries.
# Must run from inside the dest dir so 7z won't embed the
# directory path inside the zip.
if [[ "$base" == "codex-${{ matrix.target }}.exe" ]]; then
# Bundle the sandbox helper binaries into the main codex zip so
# WinGet installs include the required helpers next to codex.exe.
# Fall back to the single-binary zip if the helpers are missing
# to avoid breaking releases.
bundle_dir="$(mktemp -d)"
runner_src="$dest/codex-command-runner-${{ matrix.target }}.exe"
setup_src="$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
if [[ -f "$runner_src" && -f "$setup_src" ]]; then
cp "$dest/$base" "$bundle_dir/$base"
cp "$runner_src" "$bundle_dir/codex-command-runner.exe"
cp "$setup_src" "$bundle_dir/codex-windows-sandbox-setup.exe"
# Use an absolute path so bundle zips land in the real dist
# dir even when 7z runs from a temp directory.
(cd "$bundle_dir" && 7z a "$repo_root/$dest/${base}.zip" .)
else
echo "warning: missing sandbox binaries; falling back to single-binary zip"
echo "warning: expected $runner_src and $setup_src"
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
rm -rf "$bundle_dir"
else
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
# Keep raw executables and produce .zst alongside them.
"${GITHUB_WORKSPACE}/.github/workflows/zstd" -T0 -19 "$dest/$base"
done
- uses: actions/upload-artifact@v7
with:
name: ${{ matrix.target }}
path: |
codex-rs/dist/${{ matrix.target }}/*

View File

@@ -1,95 +0,0 @@
name: rust-release-zsh
on:
workflow_call:
env:
ZSH_COMMIT: 77045ef899e53b9598bebc5a41db93a548a40ca6
ZSH_PATCH: codex-rs/shell-escalation/patches/zsh-exec-wrapper.patch
jobs:
linux:
name: Build zsh (Linux) - ${{ matrix.variant }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
container:
image: ${{ matrix.image }}
strategy:
fail-fast: false
matrix:
include:
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: ubuntu-24.04
image: ubuntu:24.04
archive_name: codex-zsh-x86_64-unknown-linux-musl.tar.gz
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: ubuntu-24.04
image: arm64v8/ubuntu:24.04
archive_name: codex-zsh-aarch64-unknown-linux-musl.tar.gz
steps:
- name: Install build prerequisites
shell: bash
run: |
set -euo pipefail
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get install -y \
autoconf \
bison \
build-essential \
ca-certificates \
gettext \
git \
libncursesw5-dev
- uses: actions/checkout@v6
- name: Build, smoke-test, and stage zsh artifact
shell: bash
run: |
"${GITHUB_WORKSPACE}/.github/scripts/build-zsh-release-artifact.sh" \
"dist/zsh/${{ matrix.target }}/${{ matrix.archive_name }}"
- uses: actions/upload-artifact@v7
with:
name: codex-zsh-${{ matrix.target }}
path: dist/zsh/${{ matrix.target }}/*
darwin:
name: Build zsh (macOS) - ${{ matrix.variant }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
variant: macos-15
archive_name: codex-zsh-aarch64-apple-darwin.tar.gz
steps:
- name: Install build prerequisites
shell: bash
run: |
set -euo pipefail
if ! command -v autoconf >/dev/null 2>&1; then
brew install autoconf
fi
- uses: actions/checkout@v6
- name: Build, smoke-test, and stage zsh artifact
shell: bash
run: |
"${GITHUB_WORKSPACE}/.github/scripts/build-zsh-release-artifact.sh" \
"dist/zsh/${{ matrix.target }}/${{ matrix.archive_name }}"
- uses: actions/upload-artifact@v7
with:
name: codex-zsh-${{ matrix.target }}
path: dist/zsh/${{ matrix.target }}/*

View File

@@ -19,8 +19,8 @@ jobs:
tag-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- uses: actions/checkout@v5
- name: Validate tag matches Cargo.toml version
shell: bash
run: |
@@ -47,27 +47,20 @@ jobs:
build:
needs: tag-check
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
timeout-minutes: 60
permissions:
contents: read
id-token: write
name: ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
defaults:
run:
working-directory: codex-rs
env:
# 2026-03-04: temporarily change releases to use thin LTO because
# Ubuntu ARM is timing out at 60 minutes.
CARGO_PROFILE_RELEASE_LTO: ${{ contains(github.ref_name, '-alpha') && 'thin' || 'thin' }}
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
- runner: macos-14
target: aarch64-apple-darwin
- runner: macos-15-xlarge
- runner: macos-14
target: x86_64-apple-darwin
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
@@ -77,248 +70,34 @@ jobs:
target: aarch64-unknown-linux-musl
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
- runner: windows-latest
target: x86_64-pc-windows-msvc
- runner: windows-11-arm
target: aarch64-pc-windows-msvc
steps:
- uses: actions/checkout@v6
- name: Print runner specs (Linux)
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
set -euo pipefail
cpu_model="$(lscpu | awk -F: '/Model name/ {gsub(/^[ \t]+/, "", $2); print $2; exit}')"
total_ram="$(awk '/MemTotal/ {printf "%.1f GiB\n", $2 / 1024 / 1024}' /proc/meminfo)"
echo "Runner: ${RUNNER_NAME:-unknown}"
echo "OS: $(uname -a)"
echo "CPU model: ${cpu_model}"
echo "Logical CPUs: $(nproc)"
echo "Total RAM: ${total_ram}"
echo "Disk usage:"
df -h .
- name: Print runner specs (macOS)
if: ${{ runner.os == 'macOS' }}
shell: bash
run: |
set -euo pipefail
total_ram="$(sysctl -n hw.memsize | awk '{printf "%.1f GiB\n", $1 / 1024 / 1024 / 1024}')"
echo "Runner: ${RUNNER_NAME:-unknown}"
echo "OS: $(sw_vers -productName) $(sw_vers -productVersion)"
echo "Hardware model: $(sysctl -n hw.model)"
echo "CPU architecture: $(uname -m)"
echo "Logical CPUs: $(sysctl -n hw.logicalcpu)"
echo "Physical CPUs: $(sysctl -n hw.physicalcpu)"
echo "Total RAM: ${total_ram}"
echo "Disk usage:"
df -h .
- name: Install Linux bwrap build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
set -euo pipefail
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
- name: Install UBSan runtime (musl)
if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
fi
- uses: dtolnay/rust-toolchain@1.93.0
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
with:
targets: ${{ matrix.target }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Use hermetic Cargo home (musl)
shell: bash
run: |
set -euo pipefail
cargo_home="${GITHUB_WORKSPACE}/.cargo-home"
mkdir -p "${cargo_home}/bin"
echo "CARGO_HOME=${cargo_home}" >> "$GITHUB_ENV"
echo "${cargo_home}/bin" >> "$GITHUB_PATH"
: > "${cargo_home}/config.toml"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@d1434d08867e3ee9daa34448df10607b98908d29 # v2
- uses: actions/cache@v4
with:
version: 0.14.0
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/codex-rs/target/
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-release-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
TARGET: ${{ matrix.target }}
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Avoid problematic aws-lc jitter entropy code path on musl builders.
echo "AWS_LC_SYS_NO_JITTER_ENTROPY=1" >> "$GITHUB_ENV"
target_no_jitter="AWS_LC_SYS_NO_JITTER_ENTROPY_${{ matrix.target }}"
target_no_jitter="${target_no_jitter//-/_}"
echo "${target_no_jitter}=1" >> "$GITHUB_ENV"
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
name: Configure musl rusty_v8 artifact overrides
env:
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
version="$(python3 "${GITHUB_WORKSPACE}/.github/scripts/rusty_v8_bazel.py" resolved-v8-crate-version)"
release_tag="rusty-v8-v${version}"
base_url="https://github.com/openai/codex/releases/download/${release_tag}"
archive="https://github.com/openai/codex/releases/download/rusty-v8-v${version}/librusty_v8_release_${TARGET}.a.gz"
binding_dir="${RUNNER_TEMP}/rusty_v8"
binding_path="${binding_dir}/src_binding_release_${TARGET}.rs"
mkdir -p "${binding_dir}"
curl -fsSL "${base_url}/src_binding_release_${TARGET}.rs" -o "${binding_path}"
echo "RUSTY_V8_ARCHIVE=${archive}" >> "$GITHUB_ENV"
echo "RUSTY_V8_SRC_BINDING_PATH=${binding_path}" >> "$GITHUB_ENV"
sudo apt install -y musl-tools pkg-config
- name: Cargo build
shell: bash
run: |
echo "CARGO_PROFILE_RELEASE_LTO: ${CARGO_PROFILE_RELEASE_LTO}"
cargo build --target ${{ matrix.target }} --release --timings --bin codex --bin codex-responses-api-proxy
- name: Upload Cargo timings
uses: actions/upload-artifact@v7
with:
name: cargo-timings-rust-release-${{ matrix.target }}
path: codex-rs/target/**/cargo-timings/cargo-timing.html
if-no-files-found: warn
- if: ${{ contains(matrix.target, 'linux') }}
name: Cosign Linux artifacts
uses: ./.github/actions/linux-code-sign
with:
target: ${{ matrix.target }}
artifacts-dir: ${{ github.workspace }}/codex-rs/target/${{ matrix.target }}/release
- if: ${{ runner.os == 'macOS' }}
name: MacOS code signing (binaries)
uses: ./.github/actions/macos-code-sign
with:
target: ${{ matrix.target }}
sign-binaries: "true"
sign-dmg: "false"
apple-certificate: ${{ secrets.APPLE_CERTIFICATE_P12 }}
apple-certificate-password: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
apple-notarization-key-p8: ${{ secrets.APPLE_NOTARIZATION_KEY_P8 }}
apple-notarization-key-id: ${{ secrets.APPLE_NOTARIZATION_KEY_ID }}
apple-notarization-issuer-id: ${{ secrets.APPLE_NOTARIZATION_ISSUER_ID }}
- if: ${{ runner.os == 'macOS' }}
name: Build macOS dmg
shell: bash
run: |
set -euo pipefail
target="${{ matrix.target }}"
release_dir="target/${target}/release"
dmg_root="${RUNNER_TEMP}/codex-dmg-root"
volname="Codex (${target})"
dmg_path="${release_dir}/codex-${target}.dmg"
# The previous "MacOS code signing (binaries)" step signs + notarizes the
# built artifacts in `${release_dir}`. This step packages *those same*
# signed binaries into a dmg.
codex_binary_path="${release_dir}/codex"
proxy_binary_path="${release_dir}/codex-responses-api-proxy"
rm -rf "$dmg_root"
mkdir -p "$dmg_root"
if [[ ! -f "$codex_binary_path" ]]; then
echo "Binary $codex_binary_path not found"
exit 1
fi
if [[ ! -f "$proxy_binary_path" ]]; then
echo "Binary $proxy_binary_path not found"
exit 1
fi
ditto "$codex_binary_path" "${dmg_root}/codex"
ditto "$proxy_binary_path" "${dmg_root}/codex-responses-api-proxy"
rm -f "$dmg_path"
hdiutil create \
-volname "$volname" \
-srcfolder "$dmg_root" \
-format UDZO \
-ov \
"$dmg_path"
if [[ ! -f "$dmg_path" ]]; then
echo "dmg $dmg_path not found after build"
exit 1
fi
- if: ${{ runner.os == 'macOS' }}
name: MacOS code signing (dmg)
uses: ./.github/actions/macos-code-sign
with:
target: ${{ matrix.target }}
sign-binaries: "false"
sign-dmg: "true"
apple-certificate: ${{ secrets.APPLE_CERTIFICATE_P12 }}
apple-certificate-password: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
apple-notarization-key-p8: ${{ secrets.APPLE_NOTARIZATION_KEY_P8 }}
apple-notarization-key-id: ${{ secrets.APPLE_NOTARIZATION_KEY_ID }}
apple-notarization-issuer-id: ${{ secrets.APPLE_NOTARIZATION_ISSUER_ID }}
run: cargo build --target ${{ matrix.target }} --release --bin codex
- name: Stage artifacts
shell: bash
@@ -326,17 +105,16 @@ jobs:
dest="dist/${{ matrix.target }}"
mkdir -p "$dest"
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy "$dest/codex-responses-api-proxy-${{ matrix.target }}"
if [[ "${{ matrix.target }}" == *linux* ]]; then
cp target/${{ matrix.target }}/release/codex.sigstore "$dest/codex-${{ matrix.target }}.sigstore"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.sigstore "$dest/codex-responses-api-proxy-${{ matrix.target }}.sigstore"
if [[ "${{ matrix.runner }}" == windows* ]]; then
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
else
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
fi
if [[ "${{ matrix.target }}" == *apple-darwin ]]; then
cp target/${{ matrix.target }}/release/codex-${{ matrix.target }}.dmg "$dest/codex-${{ matrix.target }}.dmg"
fi
- if: ${{ matrix.runner == 'windows-11-arm' }}
name: Install zstd
shell: powershell
run: choco install -y zstandard
- name: Compress artifacts
shell: bash
@@ -346,10 +124,11 @@ jobs:
dest="dist/${{ matrix.target }}"
# For compatibility with environments that lack the `zstd` tool we
# additionally create a `.tar.gz` alongside every binary we publish.
# The end result is:
# additionally create a `.tar.gz` for all platforms and `.zip` for
# Windows alongside every single binary that we publish. The end result is:
# codex-<target>.zst (existing)
# codex-<target>.tar.gz (new)
# codex-<target>.zip (only for Windows)
# 1. Produce a .tar.gz for every file in the directory *before* we
# run `zstd --rm`, because that flag deletes the original files.
@@ -357,24 +136,26 @@ jobs:
base="$(basename "$f")"
# Skip files that are already archives (shouldn't happen, but be
# safe).
if [[ "$base" == *.tar.gz || "$base" == *.zip || "$base" == *.dmg ]]; then
continue
fi
# Don't try to compress signature bundles.
if [[ "$base" == *.sigstore ]]; then
if [[ "$base" == *.tar.gz || "$base" == *.zip ]]; then
continue
fi
# Create per-binary tar.gz
tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base"
# Also create .zst and remove the uncompressed binaries to keep
# non-Windows artifact directories small.
# Create zip archive for Windows binaries
# Must run from inside the dest dir so 7z won't
# embed the directory path inside the zip.
if [[ "${{ matrix.runner }}" == windows* ]]; then
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
# Also create .zst (existing behaviour) *and* remove the original
# uncompressed binary to keep the directory small.
zstd -T0 -19 --rm "$dest/$base"
done
- uses: actions/upload-artifact@v7
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.target }}
# Upload the per-binary .zst files as well as the new .tar.gz
@@ -382,88 +163,22 @@ jobs:
path: |
codex-rs/dist/${{ matrix.target }}/*
build-windows:
needs: tag-check
uses: ./.github/workflows/rust-release-windows.yml
with:
release-lto: ${{ contains(github.ref_name, '-alpha') && 'thin' || 'fat' }}
secrets: inherit
argument-comment-lint-release-assets:
name: argument-comment-lint release assets
needs: tag-check
uses: ./.github/workflows/rust-release-argument-comment-lint.yml
with:
publish: true
zsh-release-assets:
name: zsh release assets
needs: tag-check
uses: ./.github/workflows/rust-release-zsh.yml
release:
needs:
- build
- build-windows
- argument-comment-lint-release-assets
- zsh-release-assets
needs: build
name: release
runs-on: ubuntu-latest
permissions:
contents: write
actions: read
outputs:
version: ${{ steps.release_name.outputs.name }}
tag: ${{ github.ref_name }}
should_publish_npm: ${{ steps.npm_publish_settings.outputs.should_publish }}
npm_tag: ${{ steps.npm_publish_settings.outputs.npm_tag }}
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Generate release notes from tag commit message
id: release_notes
shell: bash
run: |
set -euo pipefail
# On tag pushes, GITHUB_SHA may be a tag object for annotated tags;
# peel it to the underlying commit.
commit="$(git rev-parse "${GITHUB_SHA}^{commit}")"
notes_path="${RUNNER_TEMP}/release-notes.md"
# Use the commit message for the commit the tag points at (not the
# annotated tag message).
git log -1 --format=%B "${commit}" > "${notes_path}"
# Ensure trailing newline so GitHub's markdown renderer doesn't
# occasionally run the last line into subsequent content.
echo >> "${notes_path}"
echo "path=${notes_path}" >> "${GITHUB_OUTPUT}"
- uses: actions/download-artifact@v8
- uses: actions/download-artifact@v4
with:
path: dist
- name: List
run: ls -R dist/
- name: Delete entries from dist/ that should not go in the release
run: |
rm -rf dist/windows-binaries*
# cargo-timing.html appears under multiple target-specific directories.
# If included in files: dist/**, release upload races on duplicate
# asset names and can fail with 404s.
find dist -type f -name 'cargo-timing.html' -delete
find dist -type d -empty -delete
ls -R dist/
- name: Add config schema release asset
run: |
cp codex-rs/core/config.schema.json dist/config-schema.json
- name: Define release name
id: release_name
run: |
@@ -472,62 +187,27 @@ jobs:
version="${GITHUB_REF_NAME#rust-v}"
echo "name=${version}" >> $GITHUB_OUTPUT
- name: Determine npm publish settings
id: npm_publish_settings
env:
VERSION: ${{ steps.release_name.outputs.name }}
run: |
set -euo pipefail
version="${VERSION}"
if [[ "${version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "should_publish=true" >> "$GITHUB_OUTPUT"
echo "npm_tag=" >> "$GITHUB_OUTPUT"
elif [[ "${version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+-alpha\.[0-9]+$ ]]; then
echo "should_publish=true" >> "$GITHUB_OUTPUT"
echo "npm_tag=alpha" >> "$GITHUB_OUTPUT"
else
echo "should_publish=false" >> "$GITHUB_OUTPUT"
echo "npm_tag=" >> "$GITHUB_OUTPUT"
fi
- name: Setup pnpm
uses: pnpm/action-setup@v5
with:
run_install: false
- name: Setup Node.js for npm packaging
uses: actions/setup-node@v6
with:
node-version: 22
- name: Install dependencies
run: pnpm install --frozen-lockfile
# stage_npm_packages.py requires DotSlash when staging releases.
- uses: facebook/install-dotslash@v2
- name: Stage npm packages
- name: Stage npm package
env:
GH_TOKEN: ${{ github.token }}
RELEASE_VERSION: ${{ steps.release_name.outputs.name }}
run: |
./scripts/stage_npm_packages.py \
--release-version "$RELEASE_VERSION" \
--package codex \
--package codex-responses-api-proxy \
--package codex-sdk
- name: Stage installer scripts
run: |
cp scripts/install/install.sh dist/install.sh
cp scripts/install/install.ps1 dist/install.ps1
set -euo pipefail
TMP_DIR="${RUNNER_TEMP}/npm-stage"
python3 codex-cli/scripts/stage_rust_release.py \
--release-version "${{ steps.release_name.outputs.name }}" \
--tmp "${TMP_DIR}"
mkdir -p dist/npm
# Produce an npm-ready tarball using `npm pack` and store it in dist/npm.
# We then rename it to a stable name used by our publishing script.
(cd "$TMP_DIR" && npm pack --pack-destination "${GITHUB_WORKSPACE}/dist/npm")
mv "${GITHUB_WORKSPACE}"/dist/npm/*.tgz \
"${GITHUB_WORKSPACE}/dist/npm/codex-npm-${{ steps.release_name.outputs.name }}.tgz"
- name: Create GitHub Release
uses: softprops/action-gh-release@v2
with:
name: ${{ steps.release_name.outputs.name }}
tag_name: ${{ github.ref_name }}
body_path: ${{ steps.release_notes.outputs.path }}
files: dist/**
# Mark as prerelease only when the version has a suffix after x.y.z
# (e.g. -alpha, -beta). Otherwise publish a normal release.
@@ -539,184 +219,3 @@ jobs:
with:
tag: ${{ github.ref_name }}
config: .github/dotslash-config.json
- uses: facebook/dotslash-publish-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag: ${{ github.ref_name }}
config: .github/dotslash-zsh-config.json
- uses: facebook/dotslash-publish-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag: ${{ github.ref_name }}
config: .github/dotslash-argument-comment-lint-config.json
- name: Trigger developers.openai.com deploy
# Only trigger the deploy if the release is not a pre-release.
# The deploy is used to update the developers.openai.com website with the new config schema json file.
if: ${{ !contains(steps.release_name.outputs.name, '-') }}
continue-on-error: true
env:
DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL: ${{ secrets.DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL }}
run: |
if ! curl -sS -f -o /dev/null -X POST "$DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL"; then
echo "::warning title=developers.openai.com deploy hook failed::Vercel deploy hook POST failed for ${GITHUB_REF_NAME}"
exit 1
fi
# Publish to npm using OIDC authentication.
# July 31, 2025: https://github.blog/changelog/2025-07-31-npm-trusted-publishing-with-oidc-is-generally-available/
# npm docs: https://docs.npmjs.com/trusted-publishers
publish-npm:
# Publish to npm for stable releases and alpha pre-releases with numeric suffixes.
if: ${{ needs.release.outputs.should_publish_npm == 'true' }}
name: publish-npm
needs: release
runs-on: ubuntu-latest
permissions:
id-token: write # Required for OIDC
contents: read
steps:
- name: Setup Node.js
uses: actions/setup-node@v6
with:
node-version: 22
registry-url: "https://registry.npmjs.org"
scope: "@openai"
# Trusted publishing requires npm CLI version 11.5.1 or later.
- name: Update npm
run: npm install -g npm@latest
- name: Download npm tarballs from release
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
RELEASE_TAG: ${{ needs.release.outputs.tag }}
RELEASE_VERSION: ${{ needs.release.outputs.version }}
run: |
set -euo pipefail
version="$RELEASE_VERSION"
tag="$RELEASE_TAG"
mkdir -p dist/npm
patterns=(
"codex-npm-${version}.tgz"
"codex-npm-linux-*-${version}.tgz"
"codex-npm-darwin-*-${version}.tgz"
"codex-npm-win32-*-${version}.tgz"
"codex-responses-api-proxy-npm-${version}.tgz"
"codex-sdk-npm-${version}.tgz"
)
for pattern in "${patterns[@]}"; do
gh release download "$tag" \
--repo "${GITHUB_REPOSITORY}" \
--pattern "$pattern" \
--dir dist/npm
done
# No NODE_AUTH_TOKEN needed because we use OIDC.
- name: Publish to npm
env:
VERSION: ${{ needs.release.outputs.version }}
NPM_TAG: ${{ needs.release.outputs.npm_tag }}
run: |
set -euo pipefail
prefix=""
if [[ -n "${NPM_TAG}" ]]; then
prefix="${NPM_TAG}-"
fi
shopt -s nullglob
tarballs=(dist/npm/*-"${VERSION}".tgz)
if [[ ${#tarballs[@]} -eq 0 ]]; then
echo "No npm tarballs found in dist/npm for version ${VERSION}"
exit 1
fi
for tarball in "${tarballs[@]}"; do
filename="$(basename "${tarball}")"
tag=""
case "${filename}" in
codex-npm-linux-*-"${VERSION}".tgz|codex-npm-darwin-*-"${VERSION}".tgz|codex-npm-win32-*-"${VERSION}".tgz)
platform="${filename#codex-npm-}"
platform="${platform%-${VERSION}.tgz}"
tag="${prefix}${platform}"
;;
codex-npm-"${VERSION}".tgz|codex-responses-api-proxy-npm-"${VERSION}".tgz|codex-sdk-npm-"${VERSION}".tgz)
tag="${NPM_TAG}"
;;
*)
echo "Unexpected npm tarball: ${filename}"
exit 1
;;
esac
publish_cmd=(npm publish "${GITHUB_WORKSPACE}/${tarball}")
if [[ -n "${tag}" ]]; then
publish_cmd+=(--tag "${tag}")
fi
echo "+ ${publish_cmd[*]}"
set +e
publish_output="$("${publish_cmd[@]}" 2>&1)"
publish_status=$?
set -e
echo "${publish_output}"
if [[ ${publish_status} -eq 0 ]]; then
continue
fi
if grep -qiE "previously published|cannot publish over|version already exists" <<< "${publish_output}"; then
echo "Skipping already-published package version for ${filename}"
continue
fi
exit "${publish_status}"
done
winget:
name: winget
needs: release
# Only publish stable/mainline releases to WinGet; pre-releases include a
# '-' in the semver string (e.g., 1.2.3-alpha.1).
if: ${{ !contains(needs.release.outputs.version, '-') }}
# This job only invokes a GitHub Action to open/update the winget-pkgs PR;
# it does not execute Windows-only tooling, so Linux is sufficient.
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- name: Publish to WinGet
uses: vedantmgoyal9/winget-releaser@7bd472be23763def6e16bd06cc8b1cdfab0e2fd5
with:
identifier: OpenAI.Codex
version: ${{ needs.release.outputs.version }}
release-tag: ${{ needs.release.outputs.tag }}
fork-user: openai-oss-forks
installers-regex: '^codex-(?:x86_64|aarch64)-pc-windows-msvc\.exe\.zip$'
token: ${{ secrets.WINGET_PUBLISH_PAT }}
update-branch:
name: Update latest-alpha-cli branch
permissions:
contents: write
needs: release
runs-on: ubuntu-latest
steps:
- name: Update latest-alpha-cli branch
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -euo pipefail
gh api \
repos/${GITHUB_REPOSITORY}/git/refs/heads/latest-alpha-cli \
-X PATCH \
-f sha="${GITHUB_SHA}" \
-F force=true

View File

@@ -1,188 +0,0 @@
name: rusty-v8-release
on:
workflow_dispatch:
inputs:
release_tag:
description: Optional release tag. Defaults to rusty-v8-v<resolved_v8_version>.
required: false
type: string
publish:
description: Publish the staged musl artifacts to a GitHub release.
required: false
default: true
type: boolean
concurrency:
group: ${{ github.workflow }}::${{ inputs.release_tag || github.run_id }}
cancel-in-progress: false
jobs:
metadata:
runs-on: ubuntu-latest
outputs:
release_tag: ${{ steps.release_tag.outputs.release_tag }}
v8_version: ${{ steps.v8_version.outputs.version }}
steps:
- uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.12"
- name: Resolve exact v8 crate version
id: v8_version
shell: bash
run: |
set -euo pipefail
version="$(python3 .github/scripts/rusty_v8_bazel.py resolved-v8-crate-version)"
echo "version=${version}" >> "$GITHUB_OUTPUT"
- name: Resolve release tag
id: release_tag
env:
RELEASE_TAG_INPUT: ${{ inputs.release_tag }}
V8_VERSION: ${{ steps.v8_version.outputs.version }}
shell: bash
run: |
set -euo pipefail
release_tag="${RELEASE_TAG_INPUT}"
if [[ -z "${release_tag}" ]]; then
release_tag="rusty-v8-v${V8_VERSION}"
fi
echo "release_tag=${release_tag}" >> "$GITHUB_OUTPUT"
build:
name: Build ${{ matrix.target }}
needs: metadata
runs-on: ${{ matrix.runner }}
permissions:
contents: read
actions: read
strategy:
fail-fast: false
matrix:
include:
- runner: ubuntu-24.04
platform: linux_amd64_musl
target: x86_64-unknown-linux-musl
- runner: ubuntu-24.04-arm
platform: linux_arm64_musl
target: aarch64-unknown-linux-musl
steps:
- uses: actions/checkout@v6
- name: Set up Bazel
uses: bazelbuild/setup-bazelisk@v3
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.12"
- name: Build Bazel V8 release pair
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
PLATFORM: ${{ matrix.platform }}
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
target_suffix="${TARGET//-/_}"
pair_target="//third_party/v8:rusty_v8_release_pair_${target_suffix}"
extra_targets=()
if [[ "${TARGET}" == *-unknown-linux-musl ]]; then
extra_targets=(
"@llvm//runtimes/libcxx:libcxx.static"
"@llvm//runtimes/libcxx:libcxxabi.static"
)
fi
bazel_args=(
build
-c
opt
"--platforms=@llvm//platforms:${PLATFORM}"
"${pair_target}"
"${extra_targets[@]}"
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD)
)
bazel \
--noexperimental_remote_repo_contents_cache \
--bazelrc=.github/workflows/v8-ci.bazelrc \
"${bazel_args[@]}" \
"--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}"
- name: Stage release pair
env:
PLATFORM: ${{ matrix.platform }}
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
python3 .github/scripts/rusty_v8_bazel.py stage-release-pair \
--platform "${PLATFORM}" \
--target "${TARGET}" \
--compilation-mode opt \
--output-dir "dist/${TARGET}"
- name: Upload staged musl artifacts
uses: actions/upload-artifact@v7
with:
name: rusty-v8-${{ needs.metadata.outputs.v8_version }}-${{ matrix.target }}
path: dist/${{ matrix.target }}/*
publish-release:
if: ${{ inputs.publish }}
needs:
- metadata
- build
runs-on: ubuntu-latest
permissions:
contents: write
actions: read
steps:
- name: Ensure publishing from default branch
if: ${{ github.ref_name != github.event.repository.default_branch }}
env:
DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
shell: bash
run: |
set -euo pipefail
echo "Publishing is only allowed from ${DEFAULT_BRANCH}; current ref is ${GITHUB_REF_NAME}." >&2
exit 1
- name: Ensure release tag is new
env:
GH_TOKEN: ${{ github.token }}
RELEASE_TAG: ${{ needs.metadata.outputs.release_tag }}
shell: bash
run: |
set -euo pipefail
if gh release view "${RELEASE_TAG}" --repo "${GITHUB_REPOSITORY}" > /dev/null 2>&1; then
echo "Release tag ${RELEASE_TAG} already exists; musl artifact tags are immutable." >&2
exit 1
fi
- uses: actions/download-artifact@v8
with:
path: dist
- name: Create GitHub Release
uses: softprops/action-gh-release@v2
with:
tag_name: ${{ needs.metadata.outputs.release_tag }}
name: ${{ needs.metadata.outputs.release_tag }}
files: dist/**
# Keep V8 artifact releases out of Codex's normal "latest release" channel.
prerelease: true

View File

@@ -1,52 +0,0 @@
name: sdk
on:
push:
branches: [main]
pull_request: {}
jobs:
sdks:
runs-on:
group: codex-runners
labels: codex-linux-x64
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v6
- name: Install Linux bwrap build dependencies
shell: bash
run: |
set -euo pipefail
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
- name: Setup pnpm
uses: pnpm/action-setup@v5
with:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
with:
node-version: 22
cache: pnpm
- uses: dtolnay/rust-toolchain@1.93.0
- name: build codex
run: cargo build --bin codex
working-directory: codex-rs
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Build SDK packages
run: pnpm -r --filter ./sdk/typescript run build
- name: Lint SDK packages
run: pnpm -r --filter ./sdk/typescript run lint
- name: Test SDK packages
run: pnpm -r --filter ./sdk/typescript run test

View File

@@ -1,132 +0,0 @@
name: v8-canary
on:
pull_request:
paths:
- ".github/scripts/rusty_v8_bazel.py"
- ".github/workflows/rusty-v8-release.yml"
- ".github/workflows/v8-canary.yml"
- "MODULE.bazel"
- "MODULE.bazel.lock"
- "codex-rs/Cargo.toml"
- "patches/BUILD.bazel"
- "patches/v8_*.patch"
- "third_party/v8/**"
push:
branches:
- main
paths:
- ".github/scripts/rusty_v8_bazel.py"
- ".github/workflows/rusty-v8-release.yml"
- ".github/workflows/v8-canary.yml"
- "MODULE.bazel"
- "MODULE.bazel.lock"
- "codex-rs/Cargo.toml"
- "patches/BUILD.bazel"
- "patches/v8_*.patch"
- "third_party/v8/**"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}::${{ github.event.pull_request.number > 0 && format('pr-{0}', github.event.pull_request.number) || github.ref_name }}
cancel-in-progress: ${{ github.ref_name != 'main' }}
jobs:
metadata:
runs-on: ubuntu-latest
outputs:
v8_version: ${{ steps.v8_version.outputs.version }}
steps:
- uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.12"
- name: Resolve exact v8 crate version
id: v8_version
shell: bash
run: |
set -euo pipefail
version="$(python3 .github/scripts/rusty_v8_bazel.py resolved-v8-crate-version)"
echo "version=${version}" >> "$GITHUB_OUTPUT"
build:
name: Build ${{ matrix.target }}
needs: metadata
runs-on: ${{ matrix.runner }}
permissions:
contents: read
actions: read
strategy:
fail-fast: false
matrix:
include:
- runner: ubuntu-24.04
platform: linux_amd64_musl
target: x86_64-unknown-linux-musl
- runner: ubuntu-24.04-arm
platform: linux_arm64_musl
target: aarch64-unknown-linux-musl
steps:
- uses: actions/checkout@v6
- name: Set up Bazel
uses: bazelbuild/setup-bazelisk@v3
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.12"
- name: Build Bazel V8 release pair
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
PLATFORM: ${{ matrix.platform }}
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
target_suffix="${TARGET//-/_}"
pair_target="//third_party/v8:rusty_v8_release_pair_${target_suffix}"
extra_targets=(
"@llvm//runtimes/libcxx:libcxx.static"
"@llvm//runtimes/libcxx:libcxxabi.static"
)
bazel_args=(
build
"--platforms=@llvm//platforms:${PLATFORM}"
"${pair_target}"
"${extra_targets[@]}"
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD)
)
bazel \
--noexperimental_remote_repo_contents_cache \
--bazelrc=.github/workflows/v8-ci.bazelrc \
"${bazel_args[@]}" \
"--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}"
- name: Stage release pair
env:
PLATFORM: ${{ matrix.platform }}
TARGET: ${{ matrix.target }}
shell: bash
run: |
set -euo pipefail
python3 .github/scripts/rusty_v8_bazel.py stage-release-pair \
--platform "${PLATFORM}" \
--target "${TARGET}" \
--output-dir "dist/${TARGET}"
- name: Upload staged musl artifacts
uses: actions/upload-artifact@v7
with:
name: v8-canary-${{ needs.metadata.outputs.v8_version }}-${{ matrix.target }}
path: dist/${{ matrix.target }}/*

View File

@@ -1,5 +0,0 @@
import %workspace%/.github/workflows/ci.bazelrc
common --build_metadata=REPO_URL=https://github.com/openai/codex.git
common --build_metadata=ROLE=CI
common --build_metadata=VISIBILITY=PUBLIC

View File

@@ -1,46 +0,0 @@
#!/usr/bin/env dotslash
// This DotSlash file wraps zstd for Windows runners.
// The upstream release provides win32/win64 binaries; for windows-aarch64 we
// use the win64 artifact via Windows x64 emulation.
{
"name": "zstd",
"platforms": {
"windows-x86_64": {
"size": 1747181,
"hash": "sha256",
"digest": "acb4e8111511749dc7a3ebedca9b04190e37a17afeb73f55d4425dbf0b90fad9",
"format": "zip",
"path": "zstd-v1.5.7-win64/zstd.exe",
"providers": [
{
"url": "https://github.com/facebook/zstd/releases/download/v1.5.7/zstd-v1.5.7-win64.zip"
},
{
"type": "github-release",
"repo": "facebook/zstd",
"tag": "v1.5.7",
"name": "zstd-v1.5.7-win64.zip"
}
]
},
"windows-aarch64": {
"size": 1747181,
"hash": "sha256",
"digest": "acb4e8111511749dc7a3ebedca9b04190e37a17afeb73f55d4425dbf0b90fad9",
"format": "zip",
"path": "zstd-v1.5.7-win64/zstd.exe",
"providers": [
{
"url": "https://github.com/facebook/zstd/releases/download/v1.5.7/zstd-v1.5.7-win64.zip"
},
{
"type": "github-release",
"repo": "facebook/zstd",
"tag": "v1.5.7",
"name": "zstd-v1.5.7-win64.zip"
}
]
}
}
}

10
.gitignore vendored
View File

@@ -9,7 +9,6 @@ node_modules
# build
dist/
bazel-*
build/
out/
storybook-static/
@@ -31,7 +30,6 @@ result
# cli tools
CLAUDE.md
.claude/
AGENTS.override.md
# caches
.cache/
@@ -65,9 +63,6 @@ apply_patch/
# coverage
coverage/
# personal files
personal/
# os
.DS_Store
Thumbs.db
@@ -86,8 +81,3 @@ CHANGELOG.ignore.md
# nix related
.direnv
.envrc
# Python bytecode files
__pycache__/
*.pyc

View File

@@ -1,6 +0,0 @@
config:
MD013:
line_length: 100
globs:
- "docs/tui-chat-composer.md"

View File

@@ -1,9 +1,8 @@
{
"rust-analyzer.checkOnSave": true,
"rust-analyzer.check.command": "clippy",
"rust-analyzer.check.extraArgs": ["--tests"],
"rust-analyzer.check.extraArgs": ["--all-features", "--tests"],
"rust-analyzer.rustfmt.extraArgs": ["--config", "imports_granularity=Item"],
"rust-analyzer.cargo.targetDir": "${workspaceFolder}/codex-rs/target/rust-analyzer",
"[rust]": {
"editor.defaultFormatter": "rust-lang.rust-analyzer",
"editor.formatOnSave": true,

139
AGENTS.md
View File

@@ -4,51 +4,14 @@ In the codex-rs folder where the rust code lives:
- Crate names are prefixed with `codex-`. For example, the `core` folder's crate is named `codex-core`
- When using format! and you can inline variables into {}, always do that.
- Install any commands the repo relies on (for example `just`, `rg`, or `cargo-insta`) if they aren't already available before running instructions here.
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`.
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
- Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate.
- Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
- Avoid bool or ambiguous `Option` parameters that force callers to write hard-to-read code such as `foo(false)` or `bar(None)`. Prefer enums, named methods, newtypes, or other idiomatic Rust API shapes when they keep the callsite self-documenting.
- When you cannot make that API change and still need a small positional-literal callsite in Rust, follow the `argument_comment_lint` convention:
- Use an exact `/*param_name*/` comment before opaque literal arguments such as `None`, booleans, and numeric literals when passing them by position.
- Do not add these comments for string or char literals unless the comment adds real clarity; those literals are intentionally exempt from the lint.
- If you add one of these comments, the parameter name must exactly match the callee signature.
- When possible, make `match` statements exhaustive and avoid wildcard arms.
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
- If you change `ConfigToml` or nested config types, run `just write-config-schema` to update `codex-rs/core/config.schema.json`.
- If you change Rust dependencies (`Cargo.toml` or `Cargo.lock`), run `just bazel-lock-update` from the
repo root to refresh `MODULE.bazel.lock`, and include that lockfile update in the same change.
- After dependency changes, run `just bazel-lock-check` from the repo root so lockfile drift is caught
locally before CI.
- Bazel does not automatically make source-tree files available to compile-time Rust file access. If
you add `include_str!`, `include_bytes!`, `sqlx::migrate!`, or similar build-time file or
directory reads, update the crate's `BUILD.bazel` (`compile_data`, `build_script_data`, or test
data) or Bazel may fail even when Cargo passes.
- Do not create small helper methods that are referenced only once.
- Avoid large modules:
- Prefer adding new modules instead of growing existing ones.
- Target Rust modules under 500 LoC, excluding tests.
- If a file exceeds roughly 800 LoC, add new functionality in a new module instead of extending
the existing file unless there is a strong documented reason not to.
- This rule applies especially to high-touch files that already attract unrelated changes, such
as `codex-rs/tui/src/app.rs`, `codex-rs/tui/src/bottom_pane/chat_composer.rs`,
`codex-rs/tui/src/bottom_pane/footer.rs`, `codex-rs/tui/src/chatwidget.rs`,
`codex-rs/tui/src/bottom_pane/mod.rs`, and similarly central orchestration modules.
- When extracting code from a large module, move the related tests and module/type docs toward
the new implementation so the invariants stay close to the code that owns them.
Run `just fmt` (in `codex-rs` directory) automatically after you have finished making Rust code changes; do not ask for approval to run it. Additionally, run the tests:
Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspacewide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests:
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test` (or `just test` if `cargo-nextest` is installed). Avoid `--all-features` for routine local runs because it expands the build matrix and can significantly increase `target/` disk usage; use it only when you specifically need full feature coverage. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
Before finalizing a large change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspacewide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Do not re-run tests after running `fix` or `fmt`.
Also run `just argument-comment-lint` to ensure the codebase is clean of comment lint errors.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
## TUI style conventions
@@ -56,8 +19,6 @@ See `codex-rs/tui/styles.md`.
## TUI code conventions
- When a change lands in `codex-rs/tui` and `codex-rs/tui_app_server` has a parallel implementation of the same behavior, reflect the change in `codex-rs/tui_app_server` too unless there is a documented reason not to.
- Use concise styling helpers from ratatuis Stylize trait.
- Basic spans: use "text".into()
- Styled spans: use "text".red(), "text".green(), "text".magenta(), "text".dim(), etc.
@@ -66,7 +27,6 @@ See `codex-rs/tui/styles.md`.
- Desired: vec![" └ ".into(), "M".red(), " ".dim(), "tui/src/app.rs".dim()]
### TUI Styling (ratatui)
- Prefer Stylize helpers: use "text".dim(), .bold(), .cyan(), .italic(), .underlined() instead of manual Style where possible.
- Prefer simple conversions: use "text".into() for spans and vec![…].into() for lines; when inference is ambiguous (e.g., Paragraph::new/Cell::from), use Line::from(spans) or Span::from(text).
- Computed styles: if the Style is computed at runtime, using `Span::styled` is OK (`Span::from(text).set_style(style)` is also acceptable).
@@ -78,7 +38,6 @@ See `codex-rs/tui/styles.md`.
- Compactness: prefer the form that stays on one line after rustfmt; if only one of Line::from(vec![…]) or vec![…].into() avoids wrapping, choose that. If both wrap, pick the one with fewer wrapped lines.
### Text wrapping
- Always use textwrap::wrap to wrap plain strings.
- If you have a ratatui Line and you want to wrap it, use the helpers in tui/src/wrapping.rs, e.g. word_wrap_lines / word_wrap_line.
- If you need to indent wrapped lines, use the initial_indent / subsequent_indent options from RtOptions if you can, rather than writing custom logic.
@@ -88,14 +47,7 @@ See `codex-rs/tui/styles.md`.
### Snapshot tests
This repo uses snapshot tests (via `insta`), especially in `codex-rs/tui`, to validate rendered output.
**Requirement:** any change that affects user-visible UI (including adding new UI) must include
corresponding `insta` snapshot coverage (add a new snapshot test if one doesn't exist yet, or
update the existing snapshot). Review and accept snapshot updates as part of the PR so UI impact
is easy to review and future diffs stay visual.
When UI or text output changes intentionally, update the snapshots as follows:
This repo uses snapshot tests (via `insta`), especially in `codex-rs/tui`, to validate rendered output. When UI or text output changes intentionally, update the snapshots as follows:
- Run tests to generate any updated snapshots:
- `cargo test -p codex-tui`
@@ -107,91 +59,8 @@ When UI or text output changes intentionally, update the snapshots as follows:
- `cargo insta accept -p codex-tui`
If you dont have the tool:
- `cargo install cargo-insta`
### Test assertions
- Tests should use pretty_assertions::assert_eq for clearer diffs. Import this at the top of the test module if it isn't already.
- Prefer deep equals comparisons whenever possible. Perform `assert_eq!()` on entire objects, rather than individual fields.
- Avoid mutating process environment in tests; prefer passing environment-derived flags or dependencies from above.
### Spawning workspace binaries in tests (Cargo vs Bazel)
- Prefer `codex_utils_cargo_bin::cargo_bin("...")` over `assert_cmd::Command::cargo_bin(...)` or `escargot` when tests need to spawn first-party binaries.
- Under Bazel, binaries and resources may live under runfiles; use `codex_utils_cargo_bin::cargo_bin` to resolve absolute paths that remain stable after `chdir`.
- When locating fixture files or test resources under Bazel, avoid `env!("CARGO_MANIFEST_DIR")`. Prefer `codex_utils_cargo_bin::find_resource!` so paths resolve correctly under both Cargo and Bazel runfiles.
### Integration tests (core)
- Prefer the utilities in `core_test_support::responses` when writing end-to-end Codex tests.
- All `mount_sse*` helpers return a `ResponseMock`; hold onto it so you can assert against outbound `/responses` POST bodies.
- Use `ResponseMock::single_request()` when a test should only issue one POST, or `ResponseMock::requests()` to inspect every captured `ResponsesRequest`.
- `ResponsesRequest` exposes helpers (`body_json`, `input`, `function_call_output`, `custom_tool_call_output`, `call_output`, `header`, `path`, `query_param`) so assertions can target structured payloads instead of manual JSON digging.
- Build SSE payloads with the provided `ev_*` constructors and the `sse(...)`.
- Prefer `wait_for_event` over `wait_for_event_with_timeout`.
- Prefer `mount_sse_once` over `mount_sse_once_match` or `mount_sse_sequence`
- Typical pattern:
```rust
let mock = responses::mount_sse_once(&server, responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
responses::ev_completed("resp-1"),
])).await;
codex.submit(Op::UserTurn { ... }).await?;
// Assert request body if needed.
let request = mock.single_request();
// assert using request.function_call_output(call_id) or request.json_body() or other helpers.
```
## App-server API Development Best Practices
These guidelines apply to app-server protocol work in `codex-rs`, especially:
- `app-server-protocol/src/protocol/common.rs`
- `app-server-protocol/src/protocol/v2.rs`
- `app-server/README.md`
### Core Rules
- All active API development should happen in app-server v2. Do not add new API surface area to v1.
- Follow payload naming consistently:
`*Params` for request payloads, `*Response` for responses, and `*Notification` for notifications.
- Expose RPC methods as `<resource>/<method>` and keep `<resource>` singular (for example, `thread/read`, `app/list`).
- Always expose fields as camelCase on the wire with `#[serde(rename_all = "camelCase")]` unless a tagged union or explicit compatibility requirement needs a targeted rename.
- Exception: config RPC payloads are expected to use snake_case to mirror config.toml keys (see the config read/write/list APIs in `app-server-protocol/src/protocol/v2.rs`).
- Always set `#[ts(export_to = "v2/")]` on v2 request/response/notification types so generated TypeScript lands in the correct namespace.
- Never use `#[serde(skip_serializing_if = "Option::is_none")]` for v2 API payload fields.
Exception: client->server requests that intentionally have no params may use:
`params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>`.
- Keep Rust and TS wire renames aligned. If a field or variant uses `#[serde(rename = "...")]`, add matching `#[ts(rename = "...")]`.
- For discriminated unions, use explicit tagging in both serializers:
`#[serde(tag = "type", ...)]` and `#[ts(tag = "type", ...)]`.
- Prefer plain `String` IDs at the API boundary (do UUID parsing/conversion internally if needed).
- Timestamps should be integer Unix seconds (`i64`) and named `*_at` (for example, `created_at`, `updated_at`, `resets_at`).
- For experimental API surface area:
use `#[experimental("method/or/field")]`, derive `ExperimentalApi` when field-level gating is needed, and use `inspect_params: true` in `common.rs` when only some fields of a method are experimental.
### Client->server request payloads (`*Params`)
- Every optional field must be annotated with `#[ts(optional = nullable)]`. Do not use `#[ts(optional = nullable)]` outside client->server request payloads (`*Params`).
- Optional collection fields (for example `Vec`, `HashMap`) must use `Option<...>` + `#[ts(optional = nullable)]`. Do not use `#[serde(default)]` to model optional collections, and do not use `skip_serializing_if` on v2 payload fields.
- When you want omission to mean `false` for boolean fields, use `#[serde(default, skip_serializing_if = "std::ops::Not::not")] pub field: bool` over `Option<bool>`.
- For new list methods, implement cursor pagination by default:
request fields `pub cursor: Option<String>` and `pub limit: Option<u32>`,
response fields `pub data: Vec<...>` and `pub next_cursor: Option<String>`.
### Development Workflow
- Update docs/examples when API behavior changes (at minimum `app-server/README.md`).
- Regenerate schema fixtures when API shapes change:
`just write-app-server-schema`
(and `just write-app-server-schema --experimental` when experimental API fixtures are affected).
- Validate with `cargo test -p codex-app-server-protocol`.
- Avoid boilerplate tests that only assert experimental field markers for individual
request fields in `common.rs`; rely on schema generation/tests and behavioral coverage instead.

View File

@@ -1,35 +0,0 @@
load("@apple_support//xcode:xcode_config.bzl", "xcode_config")
xcode_config(name = "disable_xcode")
# We mark the local platform as glibc-compatible so that rust can grab a toolchain for us.
# TODO(zbarsky): Upstream a better libc constraint into rules_rust.
# We only enable this on linux though for sanity, and because it breaks remote execution.
platform(
name = "local_linux",
constraint_values = [
# We mark the local platform as glibc-compatible because musl-built rust cannot dlopen proc macros.
"@llvm//constraints/libc:gnu.2.28",
],
parents = ["@platforms//host"],
)
platform(
name = "local_windows",
constraint_values = [
# We just need to pick one of the ABIs. Do the same one we target.
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
],
parents = ["@platforms//host"],
)
alias(
name = "rbe",
actual = "@rbe_platform",
)
exports_files([
"AGENTS.md",
"workspace_root_test_launcher.bat.tpl",
"workspace_root_test_launcher.sh.tpl",
])

View File

@@ -1 +1 @@
The changelog can be found on the [releases page](https://github.com/openai/codex/releases).
The changelog can be found on the [releases page](https://github.com/openai/codex/releases)

View File

@@ -1,328 +0,0 @@
module(name = "codex")
bazel_dep(name = "bazel_skylib", version = "1.8.2")
bazel_dep(name = "platforms", version = "1.0.0")
bazel_dep(name = "llvm", version = "0.6.8")
register_toolchains("@llvm//toolchain:all")
osx = use_extension("@llvm//extensions:osx.bzl", "osx")
osx.from_archive(
sha256 = "6a4922f89487a96d7054ec6ca5065bfddd9f1d017c74d82f1d79cecf7feb8228",
strip_prefix = "Payload/Library/Developer/CommandLineTools/SDKs/MacOSX26.2.sdk",
type = "pkg",
urls = [
"https://swcdn.apple.com/content/downloads/26/44/047-81934-A_28TPKM5SD1/ps6pk6dk4x02vgfa5qsctq6tgf23t5f0w2/CLTools_macOSNMOS_SDK.pkg",
],
)
osx.frameworks(names = [
"ApplicationServices",
"AppKit",
"ColorSync",
"CoreFoundation",
"CoreGraphics",
"CoreServices",
"CoreText",
"AudioToolbox",
"CFNetwork",
"FontServices",
"AudioUnit",
"CoreAudio",
"CoreAudioTypes",
"Foundation",
"ImageIO",
"IOKit",
"Kernel",
"OSLog",
"Security",
"SystemConfiguration",
])
use_repo(osx, "macos_sdk")
# Needed to disable xcode...
bazel_dep(name = "apple_support", version = "2.1.0")
bazel_dep(name = "rules_cc", version = "0.2.16")
bazel_dep(name = "rules_platform", version = "0.1.0")
bazel_dep(name = "rules_rs", version = "0.0.43")
rules_rust = use_extension("@rules_rs//rs/experimental:rules_rust.bzl", "rules_rust")
use_repo(rules_rust, "rules_rust")
toolchains = use_extension("@rules_rs//rs/experimental/toolchains:module_extension.bzl", "toolchains")
toolchains.toolchain(
edition = "2024",
version = "1.93.0",
)
use_repo(toolchains, "default_rust_toolchains")
register_toolchains("@default_rust_toolchains//:all")
crate = use_extension("@rules_rs//rs:extensions.bzl", "crate")
crate.from_cargo(
cargo_lock = "//codex-rs:Cargo.lock",
cargo_toml = "//codex-rs:Cargo.toml",
platform_triples = [
"aarch64-unknown-linux-gnu",
"aarch64-unknown-linux-musl",
"aarch64-apple-darwin",
"aarch64-pc-windows-gnullvm",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"x86_64-apple-darwin",
"x86_64-pc-windows-gnullvm",
],
use_experimental_platforms = True,
)
bazel_dep(name = "zstd", version = "1.5.7")
crate.annotation(
crate = "zstd-sys",
gen_build_script = "off",
deps = ["@zstd"],
)
crate.annotation(
build_script_env = {
"AWS_LC_SYS_NO_JITTER_ENTROPY": "1",
},
crate = "aws-lc-sys",
patch_args = ["-p1"],
patches = [
"//patches:aws-lc-sys_memcmp_check.patch",
],
)
inject_repo(crate, "zstd")
bazel_dep(name = "bzip2", version = "1.0.8.bcr.3")
crate.annotation(
crate = "bzip2-sys",
gen_build_script = "off",
deps = ["@bzip2//:bz2"],
)
inject_repo(crate, "bzip2")
bazel_dep(name = "zlib", version = "1.3.1.bcr.8")
crate.annotation(
crate = "libz-sys",
gen_build_script = "off",
deps = ["@zlib"],
)
inject_repo(crate, "zlib")
# TODO(zbarsky): Enable annotation after fixing windows arm64 builds.
crate.annotation(
crate = "lzma-sys",
gen_build_script = "on",
)
bazel_dep(name = "openssl", version = "3.5.4.bcr.0")
crate.annotation(
build_script_data = [
"@openssl//:gen_dir",
],
build_script_env = {
"OPENSSL_DIR": "$(execpath @openssl//:gen_dir)",
"OPENSSL_NO_VENDOR": "1",
"OPENSSL_STATIC": "1",
},
crate = "openssl-sys",
data = ["@openssl//:gen_dir"],
gen_build_script = "on",
)
inject_repo(crate, "openssl")
crate.annotation(
crate = "runfiles",
workspace_cargo_toml = "rust/runfiles/Cargo.toml",
)
http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_file = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
new_local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "new_local_repository")
new_local_repository(
name = "v8_targets",
build_file = "//third_party/v8:BUILD.bazel",
path = "third_party/v8",
)
crate.annotation(
build_script_data = [
"@v8_targets//:rusty_v8_archive_for_target",
"@v8_targets//:rusty_v8_binding_for_target",
],
build_script_env = {
"RUSTY_V8_ARCHIVE": "$(execpath @v8_targets//:rusty_v8_archive_for_target)",
"RUSTY_V8_SRC_BINDING_PATH": "$(execpath @v8_targets//:rusty_v8_binding_for_target)",
},
crate = "v8",
gen_build_script = "on",
patch_args = ["-p1"],
patches = [
"//patches:rusty_v8_prebuilt_out_dir.patch",
],
)
inject_repo(crate, "v8_targets")
llvm = use_extension("@llvm//extensions:llvm.bzl", "llvm")
use_repo(llvm, "llvm-project")
crate.annotation(
# Provide the hermetic SDK path so the build script doesn't try to invoke an unhermetic `xcrun --show-sdk-path`.
build_script_data = [
"@macos_sdk//sysroot",
],
build_script_env = {
"BINDGEN_EXTRA_CLANG_ARGS": "-Xclang -internal-isystem -Xclang $(location @llvm//:builtin_resource_dir)/include",
"COREAUDIO_SDK_PATH": "$(location @macos_sdk//sysroot)",
"LIBCLANG_PATH": "$(location @llvm-project//clang:libclang_interface_output)",
},
build_script_tools = [
"@llvm-project//clang:libclang_interface_output",
"@llvm//:builtin_resource_dir",
],
crate = "coreaudio-sys",
gen_build_script = "on",
)
inject_repo(crate, "llvm", "llvm-project", "macos_sdk")
# Fix readme inclusions
crate.annotation(
crate = "windows-link",
patch_args = ["-p1"],
patches = [
"//patches:windows-link.patch",
],
)
bazel_dep(name = "alsa_lib", version = "1.2.9.bcr.4")
crate.annotation(
crate = "alsa-sys",
gen_build_script = "off",
deps = ["@alsa_lib"],
)
inject_repo(crate, "alsa_lib")
bazel_dep(name = "v8", version = "14.6.202.9")
archive_override(
module_name = "v8",
integrity = "sha256-JphDwLAzsd9KvgRZ7eQvNtPU6qGd3XjFt/a/1QITAJU=",
patch_strip = 3,
patches = [
"//patches:v8_module_deps.patch",
"//patches:v8_bazel_rules.patch",
"//patches:v8_source_portability.patch",
],
strip_prefix = "v8-14.6.202.9",
urls = ["https://github.com/v8/v8/archive/refs/tags/14.6.202.9.tar.gz"],
)
http_archive(
name = "v8_crate_146_4_0",
build_file = "//third_party/v8:v8_crate.BUILD.bazel",
sha256 = "d97bcac5cdc5a195a4813f1855a6bc658f240452aac36caa12fd6c6f16026ab1",
strip_prefix = "v8-146.4.0",
type = "tar.gz",
urls = ["https://static.crates.io/crates/v8/v8-146.4.0.crate"],
)
http_file(
name = "rusty_v8_146_4_0_aarch64_apple_darwin_archive",
downloaded_file_path = "librusty_v8_release_aarch64-apple-darwin.a.gz",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_aarch64-apple-darwin.a.gz",
],
)
http_file(
name = "rusty_v8_146_4_0_aarch64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_aarch64-unknown-linux-gnu.a.gz",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_aarch64-unknown-linux-gnu.a.gz",
],
)
http_file(
name = "rusty_v8_146_4_0_aarch64_pc_windows_msvc_archive",
downloaded_file_path = "rusty_v8_release_aarch64-pc-windows-msvc.lib.gz",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/rusty_v8_release_aarch64-pc-windows-msvc.lib.gz",
],
)
http_file(
name = "rusty_v8_146_4_0_x86_64_apple_darwin_archive",
downloaded_file_path = "librusty_v8_release_x86_64-apple-darwin.a.gz",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_x86_64-apple-darwin.a.gz",
],
)
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
],
)
http_file(
name = "rusty_v8_146_4_0_x86_64_pc_windows_msvc_archive",
downloaded_file_path = "rusty_v8_release_x86_64-pc-windows-msvc.lib.gz",
urls = [
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/rusty_v8_release_x86_64-pc-windows-msvc.lib.gz",
],
)
http_file(
name = "rusty_v8_146_4_0_aarch64_unknown_linux_musl_archive",
downloaded_file_path = "librusty_v8_release_aarch64-unknown-linux-musl.a.gz",
urls = [
"https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/librusty_v8_release_aarch64-unknown-linux-musl.a.gz",
],
)
http_file(
name = "rusty_v8_146_4_0_aarch64_unknown_linux_musl_binding",
downloaded_file_path = "src_binding_release_aarch64-unknown-linux-musl.rs",
urls = [
"https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/src_binding_release_aarch64-unknown-linux-musl.rs",
],
)
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_archive",
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-musl.a.gz",
urls = [
"https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/librusty_v8_release_x86_64-unknown-linux-musl.a.gz",
],
)
http_file(
name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_binding",
downloaded_file_path = "src_binding_release_x86_64-unknown-linux-musl.rs",
urls = [
"https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/src_binding_release_x86_64-unknown-linux-musl.rs",
],
)
use_repo(crate, "crates")
bazel_dep(name = "libcap", version = "2.27.bcr.1")
rbe_platform_repository = use_repo_rule("//:rbe.bzl", "rbe_platform_repository")
rbe_platform_repository(
name = "rbe_platform",
)

1663
MODULE.bazel.lock generated

File diff suppressed because one or more lines are too long

3
NOTICE
View File

@@ -4,6 +4,3 @@ Copyright 2025 OpenAI
This project includes code derived from [Ratatui](https://github.com/ratatui/ratatui), licensed under the MIT license.
Copyright (c) 2016-2022 Florian Dehau
Copyright (c) 2023-2025 The Ratatui Developers
This project includes Meriyah parser assets from [meriyah](https://github.com/meriyah/meriyah), licensed under the ISC license.
Copyright (c) 2019 and later, KFlash and others.

70
PNPM.md Normal file
View File

@@ -0,0 +1,70 @@
# Migration to pnpm
This project has been migrated from npm to pnpm to improve dependency management and developer experience.
## Why pnpm?
- **Faster installation**: pnpm is significantly faster than npm and yarn
- **Disk space savings**: pnpm uses a content-addressable store to avoid duplication
- **Phantom dependency prevention**: pnpm creates a strict node_modules structure
- **Native workspaces support**: simplified monorepo management
## How to use pnpm
### Installation
```bash
# Global installation of pnpm
npm install -g pnpm@10.8.1
# Or with corepack (available with Node.js 22+)
corepack enable
corepack prepare pnpm@10.8.1 --activate
```
### Common commands
| npm command | pnpm equivalent |
| --------------- | ---------------- |
| `npm install` | `pnpm install` |
| `npm run build` | `pnpm run build` |
| `npm test` | `pnpm test` |
| `npm run lint` | `pnpm run lint` |
### Workspace-specific commands
| Action | Command |
| ------------------------------------------ | ---------------------------------------- |
| Run a command in a specific package | `pnpm --filter @openai/codex run build` |
| Install a dependency in a specific package | `pnpm --filter @openai/codex add lodash` |
| Run a command in all packages | `pnpm -r run test` |
## Monorepo structure
```
codex/
├── pnpm-workspace.yaml # Workspace configuration
├── .npmrc # pnpm configuration
├── package.json # Root dependencies and scripts
├── codex-cli/ # Main package
│ └── package.json # codex-cli specific dependencies
└── docs/ # Documentation (future package)
```
## Configuration files
- **pnpm-workspace.yaml**: Defines the packages included in the monorepo
- **.npmrc**: Configures pnpm behavior
- **Root package.json**: Contains shared scripts and dependencies
## CI/CD
CI/CD workflows have been updated to use pnpm instead of npm. Make sure your CI environments use pnpm 10.8.1 or higher.
## Known issues
If you encounter issues with pnpm, try the following solutions:
1. Remove the `node_modules` folder and `pnpm-lock.yaml` file, then run `pnpm install`
2. Make sure you're using pnpm 10.8.1 or higher
3. Verify that Node.js 22 or higher is installed

View File

@@ -1,12 +1,12 @@
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install --cask codex</code></p>
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
<h1 align="center">OpenAI Codex CLI</h1>
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install codex</code></p>
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, see <a href="https://chatgpt.com/codex">chatgpt.com/codex</a>.</p>
<p align="center">
<img src="https://github.com/openai/codex/blob/main/.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />
</p>
</br>
If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE.</a>
</br>If you want the desktop app experience, run <code>codex app</code> or visit <a href="https://chatgpt.com/codex?app-landing-page=true">the Codex App page</a>.
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a>.</p>
<img src="./.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />
</p>
---
@@ -14,19 +14,23 @@ If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="http
### Installing and running Codex CLI
Install globally with your preferred package manager:
Install globally with your preferred package manager. If you use npm:
```shell
# Install using npm
npm install -g @openai/codex
```
Alternatively, if you use Homebrew:
```shell
# Install using Homebrew
brew install --cask codex
brew install codex
```
Then simply run `codex` to get started.
Then simply run `codex` to get started:
```shell
codex
```
<details>
<summary>You can also go to the <a href="https://github.com/openai/codex/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
@@ -46,15 +50,53 @@ Each archive contains a single entry with the platform baked into the name (e.g.
### Using Codex with your ChatGPT plan
<p align="center">
<img src="./.github/codex-cli-login.png" alt="Codex CLI login" width="80%" />
</p>
Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt).
You can also use Codex with an API key, but this requires [additional setup](https://developers.openai.com/codex/auth#sign-in-with-an-api-key).
You can also use Codex with an API key, but this requires [additional setup](./docs/authentication.md#usage-based-billing-alternative-use-an-openai-api-key). If you previously used an API key for usage-based billing, see the [migration steps](./docs/authentication.md#migrating-from-usage-based-billing-api-key). If you're having trouble with login, please comment on [this issue](https://github.com/openai/codex/issues/1243).
## Docs
### Model Context Protocol (MCP)
- [**Codex Documentation**](https://developers.openai.com/codex)
Codex CLI supports [MCP servers](./docs/advanced.md#model-context-protocol-mcp). Enable by adding an `mcp_servers` section to your `~/.codex/config.toml`.
### Configuration
Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md).
---
### Docs & FAQ
- [**Getting started**](./docs/getting-started.md)
- [CLI usage](./docs/getting-started.md#cli-usage)
- [Running with a prompt as input](./docs/getting-started.md#running-with-a-prompt-as-input)
- [Example prompts](./docs/getting-started.md#example-prompts)
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
- [Configuration](./docs/config.md)
- [**Sandbox & approvals**](./docs/sandbox.md)
- [**Authentication**](./docs/authentication.md)
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
- [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine)
- [**Advanced**](./docs/advanced.md)
- [Non-interactive / CI mode](./docs/advanced.md#non-interactive--ci-mode)
- [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging)
- [Model Context Protocol (MCP)](./docs/advanced.md#model-context-protocol-mcp)
- [**Zero data retention (ZDR)**](./docs/zdr.md)
- [**Contributing**](./docs/contributing.md)
- [**Installing & building**](./docs/install.md)
- [**Install & build**](./docs/install.md)
- [System Requirements](./docs/install.md#system-requirements)
- [DotSlash](./docs/install.md#dotslash)
- [Build from source](./docs/install.md#build-from-source)
- [**FAQ**](./docs/faq.md)
- [**Open source fund**](./docs/open-source-fund.md)
---
## License
This repository is licensed under the [Apache-2.0 License](LICENSE).

View File

@@ -1,13 +0,0 @@
# Security Policy
Thank you for helping us keep Codex secure!
## Reporting Security Issues
The security is essential to OpenAI's mission. We appreciate the work of security researchers acting in good faith to identify and responsibly report potential vulnerabilities, helping us maintain strong privacy and security standards for our users and technology.
Our security program is managed through Bugcrowd, and we ask that any validated vulnerabilities be reported via the [Bugcrowd program](https://bugcrowd.com/engagements/openai).
## Vulnerability Disclosure Program
Our Vulnerability Program Guidelines are defined on our [Bugcrowd program page](https://bugcrowd.com/engagements/openai).

4
android/.gitignore vendored
View File

@@ -1,4 +0,0 @@
.gradle/
local.properties
**/build/
*.iml

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 791 KiB

View File

@@ -1,122 +0,0 @@
import org.gradle.api.GradleException
import org.gradle.api.tasks.Sync
plugins {
id("com.android.application")
}
val minAndroidJavaVersion = 17
val maxAndroidJavaVersion = 21
val hostJavaMajorVersion = JavaVersion.current().majorVersion.toIntOrNull()
?: throw GradleException("Unable to determine Java version from ${JavaVersion.current()}.")
if (hostJavaMajorVersion < minAndroidJavaVersion) {
throw GradleException(
"Android service build requires Java ${minAndroidJavaVersion}+ (tested through Java ${maxAndroidJavaVersion}). Found Java ${hostJavaMajorVersion}."
)
}
val androidJavaTargetVersion = hostJavaMajorVersion.coerceAtMost(maxAndroidJavaVersion)
val androidJavaVersion = JavaVersion.toVersion(androidJavaTargetVersion)
android {
namespace = "com.openai.codex.agent"
compileSdk = 34
defaultConfig {
applicationId = "com.openai.codex.agent"
minSdk = 26
targetSdk = 34
versionCode = 1
versionName = "0.1.0"
}
buildTypes {
release {
isMinifyEnabled = false
proguardFiles(
getDefaultProguardFile("proguard-android-optimize.txt"),
"proguard-rules.pro",
)
}
}
compileOptions {
sourceCompatibility = androidJavaVersion
targetCompatibility = androidJavaVersion
}
packaging {
jniLibs.useLegacyPackaging = true
}
}
val repoRoot = rootProject.projectDir.parentFile
val skipAndroidLto = providers
.gradleProperty("codexAndroidSkipLto")
.orElse(providers.environmentVariable("CODEX_ANDROID_SKIP_LTO"))
.orNull
?.let { it == "1" || it.equals("true", ignoreCase = true) }
?: false
val codexCargoProfileDir = if (skipAndroidLto) "android-release-no-lto" else "release"
val agentPlatformStubSdkZip = providers
.gradleProperty("agentPlatformStubSdkZip")
.orElse(providers.environmentVariable("ANDROID_AGENT_PLATFORM_STUB_SDK_ZIP"))
val extractedAgentPlatformJar = layout.buildDirectory.file(
"generated/agent-platform/android-agent-platform-stub-sdk.jar"
)
val codexTargets = mapOf(
"arm64-v8a" to "aarch64-linux-android",
"x86_64" to "x86_64-linux-android",
)
val codexJniDir = layout.buildDirectory.dir("generated/codex-jni")
val extractAgentPlatformStubSdk = tasks.register<Sync>("extractAgentPlatformStubSdk") {
val sdkZip = agentPlatformStubSdkZip.orNull
?: throw GradleException(
"Set ANDROID_AGENT_PLATFORM_STUB_SDK_ZIP or -PagentPlatformStubSdkZip to the Android Agent Platform stub SDK zip."
)
val outputDir = extractedAgentPlatformJar.get().asFile.parentFile
from(zipTree(sdkZip)) {
include("payloads/compile_only/android-agent-platform-stub-sdk.jar")
eachFile { path = name }
includeEmptyDirs = false
}
into(outputDir)
}
val syncCodexCliJniLibs = tasks.register<Sync>("syncCodexCliJniLibs") {
val outputDir = codexJniDir
into(outputDir)
dependsOn(rootProject.tasks.named("buildCodexCliNative"))
codexTargets.forEach { (abi, triple) ->
val binary = file("${repoRoot}/codex-rs/target/android/${triple}/${codexCargoProfileDir}/codex")
from(binary) {
into(abi)
rename { "libcodex.so" }
}
}
doFirst {
codexTargets.forEach { (abi, triple) ->
val binary = file("${repoRoot}/codex-rs/target/android/${triple}/${codexCargoProfileDir}/codex")
if (!binary.exists()) {
throw GradleException(
"Missing codex binary for ${abi} at ${binary}. The Gradle native build task should have produced it."
)
}
}
}
}
android.sourceSets["main"].jniLibs.srcDir(codexJniDir.get().asFile)
tasks.named("preBuild").configure {
dependsOn(syncCodexCliJniLibs)
dependsOn(extractAgentPlatformStubSdk)
}
dependencies {
implementation(project(":bridge"))
compileOnly(files(extractedAgentPlatformJar))
testImplementation("junit:junit:4.13.2")
testImplementation("org.json:json:20240303")
}

View File

@@ -1 +0,0 @@
# Keep empty for now.

View File

@@ -1,67 +0,0 @@
<manifest xmlns:android="http://schemas.android.com/apk/res/android">
<uses-permission android:name="android.permission.INTERNET" />
<uses-permission android:name="android.permission.POST_NOTIFICATIONS" />
<uses-permission android:name="android.permission.DUMP" />
<uses-permission android:name="android.permission.FOREGROUND_SERVICE" />
<uses-permission android:name="android.permission.FOREGROUND_SERVICE_DATA_SYNC" />
<uses-permission android:name="android.permission.MANAGE_AGENTS" />
<uses-permission android:name="android.permission.START_AGENT_REQUESTS" />
<uses-permission android:name="android.permission.START_GENIE_EXECUTION" />
<uses-permission android:name="android.permission.OBSERVE_AGENT_SESSIONS" />
<queries>
<intent>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent>
</queries>
<application
android:label="@string/app_name"
android:allowBackup="false"
android:extractNativeLibs="true"
android:icon="@mipmap/ic_launcher"
android:roundIcon="@mipmap/ic_launcher_round">
<service
android:name=".CodexAgentService"
android:exported="true"
android:permission="android.permission.BIND_AGENT_SERVICE">
<intent-filter>
<action android:name="android.app.agent.AgentService" />
</intent-filter>
</service>
<activity
android:name=".MainActivity"
android:exported="true"
android:launchMode="singleTop">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
<activity
android:name=".CreateSessionActivity"
android:exported="true"
android:excludeFromRecents="true"
android:launchMode="singleTop"
android:taskAffinity="com.openai.codex.agent.create"
android:theme="@style/CodexCreateSessionTheme">
<intent-filter>
<action android:name="com.openai.codex.agent.action.CREATE_SESSION" />
<category android:name="android.intent.category.DEFAULT" />
</intent-filter>
<intent-filter>
<action android:name="android.app.agent.action.HANDLE_SESSION" />
<category android:name="android.intent.category.DEFAULT" />
</intent-filter>
</activity>
<activity
android:name=".SessionDetailActivity"
android:exported="false"
android:launchMode="singleTop" />
</application>
</manifest>

View File

@@ -1,781 +0,0 @@
package com.openai.codex.agent
import android.app.agent.AgentManager
import android.content.Context
import android.util.Log
import com.openai.codex.bridge.HostedCodexConfig
import com.openai.codex.bridge.SessionExecutionSettings
import java.io.BufferedWriter
import java.io.File
import java.io.IOException
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.CopyOnWriteArraySet
import java.util.concurrent.LinkedBlockingQueue
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.atomic.AtomicInteger
import kotlin.concurrent.thread
import org.json.JSONArray
import org.json.JSONObject
object AgentCodexAppServerClient {
private const val TAG = "AgentCodexClient"
private const val REQUEST_TIMEOUT_MS = 30_000L
private const val DEFAULT_AGENT_MODEL = "gpt-5.3-codex"
private const val AGENT_APP_SERVER_RUST_LOG = "warn"
data class RuntimeStatus(
val authenticated: Boolean,
val accountEmail: String?,
val clientCount: Int,
val modelProviderId: String,
val configuredModel: String?,
val effectiveModel: String?,
val upstreamBaseUrl: String,
val frameworkResponsesPath: String,
)
data class ChatGptLoginSession(
val loginId: String,
val authUrl: String,
)
fun interface RuntimeStatusListener {
fun onRuntimeStatusChanged(status: RuntimeStatus?)
}
private val lifecycleLock = Any()
private val requestIdSequence = AtomicInteger(1)
private val activeRequests = AtomicInteger(0)
private val pendingResponses = ConcurrentHashMap<String, LinkedBlockingQueue<JSONObject>>()
private val notifications = LinkedBlockingQueue<JSONObject>()
private val runtimeStatusListeners = CopyOnWriteArraySet<RuntimeStatusListener>()
private var process: Process? = null
private var writer: BufferedWriter? = null
private var stdoutThread: Thread? = null
private var stderrThread: Thread? = null
private var localProxy: AgentLocalCodexProxy? = null
private var initialized = false
@Volatile
private var cachedRuntimeStatus: RuntimeStatus? = null
@Volatile
private var applicationContext: Context? = null
@Volatile
private var activeFrameworkSessionId: String? = null
private val runtimeStatusRefreshInFlight = AtomicBoolean(false)
fun currentRuntimeStatus(): RuntimeStatus? = cachedRuntimeStatus
fun registerRuntimeStatusListener(listener: RuntimeStatusListener) {
runtimeStatusListeners += listener
listener.onRuntimeStatusChanged(cachedRuntimeStatus)
}
fun unregisterRuntimeStatusListener(listener: RuntimeStatusListener) {
runtimeStatusListeners -= listener
}
fun refreshRuntimeStatusAsync(
context: Context,
refreshToken: Boolean = false,
) {
if (!runtimeStatusRefreshInFlight.compareAndSet(false, true)) {
return
}
thread(name = "AgentRuntimeStatusRefresh") {
try {
runCatching {
readRuntimeStatus(context, refreshToken)
}.onFailure {
updateCachedRuntimeStatus(null)
}
} finally {
runtimeStatusRefreshInFlight.set(false)
}
}
}
fun requestText(
context: Context,
instructions: String,
prompt: String,
outputSchema: JSONObject? = null,
dynamicTools: JSONArray? = null,
toolCallHandler: ((String, JSONObject) -> JSONObject)? = null,
requestUserInputHandler: ((JSONArray) -> JSONObject)? = null,
executionSettings: SessionExecutionSettings = SessionExecutionSettings.default,
requestTimeoutMs: Long = REQUEST_TIMEOUT_MS,
frameworkSessionId: String? = null,
): String = synchronized(lifecycleLock) {
ensureStarted(context.applicationContext)
val previousFrameworkSessionId = activeFrameworkSessionId
activeFrameworkSessionId = frameworkSessionId?.trim()?.ifEmpty { null }
activeRequests.incrementAndGet()
updateClientCount()
try {
Log.i(
TAG,
"requestText start tools=${dynamicTools?.length() ?: 0} prompt=${prompt.take(160)}",
)
notifications.clear()
val threadId = startThread(
context = context.applicationContext,
instructions = instructions,
dynamicTools = dynamicTools,
executionSettings = executionSettings,
)
startTurn(
threadId = threadId,
prompt = prompt,
outputSchema = outputSchema,
executionSettings = executionSettings,
)
waitForTurnCompletion(toolCallHandler, requestUserInputHandler, requestTimeoutMs).also { response ->
Log.i(TAG, "requestText completed response=${response.take(160)}")
}
} finally {
activeRequests.decrementAndGet()
updateClientCount()
activeFrameworkSessionId = previousFrameworkSessionId
}
}
fun readRuntimeStatus(
context: Context,
refreshToken: Boolean = false,
): RuntimeStatus = synchronized(lifecycleLock) {
ensureStarted(context.applicationContext)
activeRequests.incrementAndGet()
updateClientCount()
try {
val accountResponse = request(
method = "account/read",
params = JSONObject().put("refreshToken", refreshToken),
)
val configResponse = request(
method = "config/read",
params = JSONObject().put("includeLayers", false),
)
parseRuntimeStatus(context.applicationContext, accountResponse, configResponse)
.also(::updateCachedRuntimeStatus)
} finally {
activeRequests.decrementAndGet()
updateClientCount()
}
}
fun startChatGptLogin(context: Context): ChatGptLoginSession = synchronized(lifecycleLock) {
ensureStarted(context.applicationContext)
val response = request(
method = "account/login/start",
params = JSONObject().put("type", "chatgpt"),
)
if (response.optString("type") != "chatgpt") {
throw IOException("Unexpected login response type: ${response.optString("type")}")
}
return ChatGptLoginSession(
loginId = response.optString("loginId"),
authUrl = response.optString("authUrl"),
)
}
fun logoutAccount(context: Context) = synchronized(lifecycleLock) {
ensureStarted(context.applicationContext)
request(
method = "account/logout",
params = null,
)
refreshRuntimeStatusAsync(context.applicationContext)
}
fun listModels(context: Context): List<AgentModelOption> = synchronized(lifecycleLock) {
ensureStarted(context.applicationContext)
val models = mutableListOf<AgentModelOption>()
var cursor: String? = null
do {
val result = request(
method = "model/list",
params = JSONObject().apply {
put("includeHidden", false)
cursor?.let { put("cursor", it) }
},
)
val data = result.optJSONArray("data") ?: JSONArray()
for (index in 0 until data.length()) {
val item = data.optJSONObject(index) ?: continue
models += AgentModelOption(
id = item.optString("id"),
model = item.optString("model"),
displayName = item.optString("displayName").ifBlank { item.optString("model") },
description = item.optString("description"),
supportedReasoningEfforts = buildList {
val efforts = item.optJSONArray("supportedReasoningEfforts") ?: JSONArray()
for (effortIndex in 0 until efforts.length()) {
val effort = efforts.optJSONObject(effortIndex) ?: continue
add(
AgentReasoningEffortOption(
reasoningEffort = effort.optString("reasoningEffort"),
description = effort.optString("description"),
),
)
}
},
defaultReasoningEffort = item.optString("defaultReasoningEffort"),
isDefault = item.optBoolean("isDefault"),
)
}
cursor = result.optNullableString("nextCursor")
} while (cursor != null)
models
}
private fun ensureStarted(context: Context) {
if (process?.isAlive == true && writer != null && initialized) {
return
}
closeProcess()
applicationContext = context
notifications.clear()
pendingResponses.clear()
val codexHome = File(context.filesDir, "codex-home").apply(File::mkdirs)
localProxy = AgentLocalCodexProxy { requestBody ->
forwardResponsesRequest(context, requestBody)
}.also(AgentLocalCodexProxy::start)
val proxyBaseUrl = localProxy?.baseUrl
?: throw IOException("local Agent proxy did not start")
HostedCodexConfig.write(context, codexHome, proxyBaseUrl)
val startedProcess = ProcessBuilder(
listOf(
CodexCliBinaryLocator.resolve(context).absolutePath,
"-c",
"enable_request_compression=false",
"app-server",
"--listen",
"stdio://",
),
).apply {
environment()["CODEX_HOME"] = codexHome.absolutePath
environment()["RUST_LOG"] = AGENT_APP_SERVER_RUST_LOG
}.start()
process = startedProcess
writer = startedProcess.outputStream.bufferedWriter()
startStdoutPump(startedProcess)
startStderrPump(startedProcess)
initialize()
initialized = true
}
private fun closeProcess() {
stdoutThread?.interrupt()
stderrThread?.interrupt()
runCatching { writer?.close() }
writer = null
localProxy?.close()
localProxy = null
process?.destroy()
process = null
initialized = false
updateCachedRuntimeStatus(null)
}
private fun forwardResponsesRequest(
context: Context,
requestBody: String,
): AgentResponsesProxy.HttpResponse {
val frameworkSessionId = activeFrameworkSessionId
if (frameworkSessionId.isNullOrBlank()) {
return AgentResponsesProxy.sendResponsesRequest(context, requestBody)
}
val agentManager = context.getSystemService(AgentManager::class.java)
?: throw IOException("AgentManager unavailable for framework session transport")
return AgentResponsesProxy.sendResponsesRequestThroughFramework(
agentManager = agentManager,
sessionId = frameworkSessionId,
context = context,
requestBody = requestBody,
)
}
private fun initialize() {
request(
method = "initialize",
params = JSONObject()
.put(
"clientInfo",
JSONObject()
.put("name", "android_agent")
.put("title", "Android Agent")
.put("version", "0.1.0"),
)
.put("capabilities", JSONObject().put("experimentalApi", true)),
)
notify("initialized", JSONObject())
}
private fun startThread(
context: Context,
instructions: String,
dynamicTools: JSONArray?,
executionSettings: SessionExecutionSettings,
): String {
val params = JSONObject()
.put("approvalPolicy", "never")
.put("sandbox", "read-only")
.put("ephemeral", true)
.put("cwd", context.filesDir.absolutePath)
.put("serviceName", "android_agent")
.put("baseInstructions", instructions)
executionSettings.model
?.takeIf(String::isNotBlank)
?.let { params.put("model", it) }
if (dynamicTools != null) {
params.put("dynamicTools", dynamicTools)
}
val result = request(
method = "thread/start",
params = params,
)
return result.getJSONObject("thread").getString("id")
}
private fun startTurn(
threadId: String,
prompt: String,
outputSchema: JSONObject?,
executionSettings: SessionExecutionSettings,
) {
val turnParams = JSONObject()
.put("threadId", threadId)
.put(
"input",
JSONArray().put(
JSONObject()
.put("type", "text")
.put("text", prompt),
),
)
executionSettings.model
?.takeIf(String::isNotBlank)
?.let { turnParams.put("model", it) }
executionSettings.reasoningEffort
?.takeIf(String::isNotBlank)
?.let { turnParams.put("effort", it) }
if (outputSchema != null) {
turnParams.put("outputSchema", outputSchema)
}
request(
method = "turn/start",
params = turnParams,
)
}
private fun waitForTurnCompletion(
toolCallHandler: ((String, JSONObject) -> JSONObject)?,
requestUserInputHandler: ((JSONArray) -> JSONObject)?,
requestTimeoutMs: Long,
): String {
val streamedAgentMessages = mutableMapOf<String, StringBuilder>()
var finalAgentMessage: String? = null
val deadline = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(requestTimeoutMs)
while (true) {
val remainingNanos = deadline - System.nanoTime()
if (remainingNanos <= 0L) {
throw IOException("Timed out waiting for Agent turn completion")
}
val notification = notifications.poll(remainingNanos, TimeUnit.NANOSECONDS)
if (notification == null) {
checkProcessAlive()
continue
}
if (notification.has("id") && notification.has("method")) {
handleServerRequest(notification, toolCallHandler, requestUserInputHandler)
continue
}
val params = notification.optJSONObject("params") ?: JSONObject()
when (notification.optString("method")) {
"item/agentMessage/delta" -> {
val itemId = params.optString("itemId")
if (itemId.isNotBlank()) {
streamedAgentMessages.getOrPut(itemId, ::StringBuilder)
.append(params.optString("delta"))
}
}
"item/commandExecution/outputDelta" -> {
val itemId = params.optString("itemId")
val delta = params.optString("delta")
if (delta.isNotBlank()) {
Log.i(
TAG,
"commandExecution/outputDelta itemId=$itemId delta=${delta.take(400)}",
)
}
}
"item/started" -> {
val item = params.optJSONObject("item")
Log.i(
TAG,
"item/started type=${item?.optString("type")} tool=${item?.optString("tool")}",
)
}
"item/completed" -> {
val item = params.optJSONObject("item") ?: continue
Log.i(
TAG,
"item/completed type=${item.optString("type")} status=${item.optString("status")} tool=${item.optString("tool")}",
)
if (item.optString("type") == "commandExecution") {
Log.i(TAG, "commandExecution/completed item=$item")
}
if (item.optString("type") == "agentMessage") {
val itemId = item.optString("id")
val text = item.optString("text").ifBlank {
streamedAgentMessages[itemId]?.toString().orEmpty()
}
if (text.isNotBlank()) {
finalAgentMessage = text
}
}
}
"turn/completed" -> {
val turn = params.optJSONObject("turn") ?: JSONObject()
Log.i(
TAG,
"turn/completed status=${turn.optString("status")} error=${turn.opt("error")} finalMessage=${finalAgentMessage?.take(160)}",
)
return when (turn.optString("status")) {
"completed" -> finalAgentMessage?.takeIf(String::isNotBlank)
?: throw IOException("Agent turn completed without an assistant message")
"interrupted" -> throw IOException("Agent turn interrupted")
else -> throw IOException(
turn.opt("error")?.toString()
?: "Agent turn failed with status ${turn.optString("status", "unknown")}",
)
}
}
}
}
}
private fun handleServerRequest(
message: JSONObject,
toolCallHandler: ((String, JSONObject) -> JSONObject)?,
requestUserInputHandler: ((JSONArray) -> JSONObject)?,
) {
val requestId = message.opt("id") ?: return
val method = message.optString("method", "unknown")
val params = message.optJSONObject("params") ?: JSONObject()
Log.i(TAG, "handleServerRequest method=$method")
when (method) {
"item/tool/call" -> {
if (toolCallHandler == null) {
sendError(
requestId = requestId,
code = -32601,
message = "No Agent tool handler registered for $method",
)
return
}
val toolName = params.optString("tool").trim()
val arguments = params.optJSONObject("arguments") ?: JSONObject()
Log.i(TAG, "tool/call tool=$toolName arguments=$arguments")
val result = runCatching { toolCallHandler(toolName, arguments) }
.getOrElse { err ->
sendError(
requestId = requestId,
code = -32000,
message = err.message ?: "Agent tool call failed",
)
return
}
Log.i(TAG, "tool/call completed tool=$toolName result=$result")
sendResult(requestId, result)
}
"item/tool/requestUserInput" -> {
if (requestUserInputHandler == null) {
sendError(
requestId = requestId,
code = -32601,
message = "No Agent user-input handler registered for $method",
)
return
}
val questions = params.optJSONArray("questions") ?: JSONArray()
Log.i(TAG, "requestUserInput questions=$questions")
val result = runCatching { requestUserInputHandler(questions) }
.getOrElse { err ->
sendError(
requestId = requestId,
code = -32000,
message = err.message ?: "Agent user input request failed",
)
return
}
Log.i(TAG, "requestUserInput completed result=$result")
sendResult(requestId, result)
}
else -> {
sendError(
requestId = requestId,
code = -32601,
message = "Unsupported Agent app-server request: $method",
)
return
}
}
}
private fun sendResult(
requestId: Any,
result: JSONObject,
) {
sendMessage(
JSONObject()
.put("id", requestId)
.put("result", result),
)
}
private fun sendError(
requestId: Any,
code: Int,
message: String,
) {
sendMessage(
JSONObject()
.put("id", requestId)
.put(
"error",
JSONObject()
.put("code", code)
.put("message", message),
),
)
}
private fun request(
method: String,
params: JSONObject?,
): JSONObject {
val requestId = requestIdSequence.getAndIncrement().toString()
val responseQueue = LinkedBlockingQueue<JSONObject>(1)
pendingResponses[requestId] = responseQueue
try {
val message = JSONObject()
.put("id", requestId)
.put("method", method)
if (params != null) {
message.put("params", params)
}
sendMessage(message)
val response = responseQueue.poll(REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS)
?: throw IOException("Timed out waiting for $method response")
val error = response.optJSONObject("error")
if (error != null) {
throw IOException("$method failed: ${error.optString("message", error.toString())}")
}
return response.optJSONObject("result") ?: JSONObject()
} finally {
pendingResponses.remove(requestId)
}
}
private fun notify(
method: String,
params: JSONObject,
) {
sendMessage(
JSONObject()
.put("method", method)
.put("params", params),
)
}
private fun sendMessage(message: JSONObject) {
val activeWriter = writer ?: throw IOException("Agent app-server writer unavailable")
activeWriter.write(message.toString())
activeWriter.newLine()
activeWriter.flush()
}
private fun startStdoutPump(process: Process) {
stdoutThread = Thread {
process.inputStream.bufferedReader().useLines { lines ->
lines.forEach { line ->
if (line.isBlank()) {
return@forEach
}
val message = runCatching { JSONObject(line) }
.getOrElse { err ->
Log.w(TAG, "Failed to parse Agent app-server stdout line", err)
return@forEach
}
routeInbound(message)
}
}
}.also {
it.name = "AgentCodexStdout"
it.start()
}
}
private fun startStderrPump(process: Process) {
stderrThread = Thread {
process.errorStream.bufferedReader().useLines { lines ->
lines.forEach { line ->
logAgentStderrLine(line)
}
}
}.also {
it.name = "AgentCodexStderr"
it.start()
}
}
private fun routeInbound(message: JSONObject) {
if (message.has("id") && !message.has("method")) {
pendingResponses[message.get("id").toString()]?.offer(message)
return
}
handleInboundSideEffects(message)
notifications.offer(message)
}
private fun handleInboundSideEffects(message: JSONObject) {
when (message.optString("method")) {
"account/updated" -> {
applicationContext?.let { context ->
refreshRuntimeStatusAsync(context)
}
}
"account/login/completed" -> {
applicationContext?.let { context ->
refreshRuntimeStatusAsync(context, refreshToken = true)
}
}
}
}
private fun checkProcessAlive() {
val activeProcess = process ?: throw IOException("Agent app-server unavailable")
if (!activeProcess.isAlive) {
initialized = false
updateCachedRuntimeStatus(null)
throw IOException("Agent app-server exited with code ${activeProcess.exitValue()}")
}
}
private fun logAgentStderrLine(line: String) {
if (line.isBlank()) {
return
}
when {
line.contains(" ERROR ") || line.startsWith("ERROR") -> Log.e(TAG, line)
line.contains(" WARN ") || line.startsWith("WARN") -> Log.w(TAG, line)
}
}
private fun updateClientCount() {
val currentStatus = cachedRuntimeStatus ?: return
val updatedStatus = currentStatus.copy(clientCount = activeRequests.get())
updateCachedRuntimeStatus(updatedStatus)
}
private fun updateCachedRuntimeStatus(status: RuntimeStatus?) {
if (cachedRuntimeStatus == status) {
return
}
cachedRuntimeStatus = status
runtimeStatusListeners.forEach { listener ->
runCatching {
listener.onRuntimeStatusChanged(status)
}.onFailure { err ->
Log.w(TAG, "Runtime status listener failed", err)
}
}
}
private fun parseRuntimeStatus(
context: Context,
accountResponse: JSONObject,
configResponse: JSONObject,
): RuntimeStatus {
val account = accountResponse.optJSONObject("account")
val config = configResponse.optJSONObject("config") ?: JSONObject()
val configuredModel = config.optNullableString("model")
val effectiveModel = configuredModel ?: DEFAULT_AGENT_MODEL
val configuredProvider = config.optNullableString("model_provider")
val accountType = account?.optNullableString("type").orEmpty()
val authMode = runCatching {
AgentResponsesProxy.loadAuthSnapshot(File(context.filesDir, "codex-home/auth.json")).authMode
}.getOrElse {
if (accountType == "apiKey") {
"apiKey"
} else {
"chatgpt"
}
}
val upstreamBaseUrl = AgentResponsesProxy.buildResponsesBaseUrl(
upstreamBaseUrl = resolveUpstreamBaseUrl(
config = config,
accountType = accountType,
configuredProvider = configuredProvider,
),
authMode = authMode,
)
return RuntimeStatus(
authenticated = account != null,
accountEmail = account?.optNullableString("email"),
clientCount = activeRequests.get(),
modelProviderId = configuredProvider ?: inferModelProviderId(accountType),
configuredModel = configuredModel,
effectiveModel = effectiveModel,
upstreamBaseUrl = upstreamBaseUrl,
frameworkResponsesPath = AgentResponsesProxy.buildFrameworkResponsesPath(upstreamBaseUrl),
)
}
private fun inferModelProviderId(accountType: String): String {
return when (accountType) {
"chatgpt" -> "chatgpt"
"apiKey" -> "openai"
else -> "unknown"
}
}
private fun JSONObject.optNullableString(name: String): String? = when {
isNull(name) -> null
else -> optString(name).ifBlank { null }
}
private fun resolveUpstreamBaseUrl(
config: JSONObject,
accountType: String,
configuredProvider: String?,
): String {
val modelProviders = config.optJSONObject("model_providers")
val configuredProviderBaseUrl = configuredProvider?.let { providerId ->
modelProviders
?.optJSONObject(providerId)
?.optString("base_url")
?.ifBlank { null }
}
if (
configuredProviderBaseUrl != null &&
configuredProvider != HostedCodexConfig.ANDROID_HTTP_PROVIDER_ID
) {
return configuredProviderBaseUrl
}
return when (accountType) {
"chatgpt" -> config.optString("chatgpt_base_url")
.ifBlank { "https://chatgpt.com/backend-api/codex" }
"apiKey" -> config.optString("openai_base_url")
.ifBlank { "https://api.openai.com/v1" }
else -> config.optString("openai_base_url")
.ifBlank {
config.optString("chatgpt_base_url")
.ifBlank { "provider-default" }
}
}
}
}

View File

@@ -1,347 +0,0 @@
package com.openai.codex.agent
import android.content.Context
import android.util.Log
import java.io.IOException
import org.json.JSONArray
import org.json.JSONObject
class AgentFrameworkToolBridge(
private val context: Context,
private val sessionController: AgentSessionController,
) {
companion object {
private const val TAG = "AgentFrameworkTool"
private val DISALLOWED_TARGET_PACKAGES = setOf(
"com.android.shell",
"com.android.systemui",
"com.openai.codex.agent",
"com.openai.codex.genie",
)
const val START_DIRECT_SESSION_TOOL = "android_framework_sessions_start_direct"
const val LIST_SESSIONS_TOOL = "android_framework_sessions_list"
const val ANSWER_QUESTION_TOOL = "android_framework_sessions_answer_question"
const val ATTACH_TARGET_TOOL = "android_framework_sessions_attach_target"
const val CANCEL_SESSION_TOOL = "android_framework_sessions_cancel"
internal fun parseStartDirectSessionArguments(
arguments: JSONObject,
userObjective: String,
isEligibleTargetPackage: (String) -> Boolean,
): StartDirectSessionRequest {
val targetsJson = arguments.optJSONArray("targets")
?: throw IOException("Framework session tool arguments missing targets")
val rejectedPackages = mutableListOf<String>()
val targets = buildList {
for (index in 0 until targetsJson.length()) {
val target = targetsJson.optJSONObject(index) ?: continue
val packageName = target.optString("packageName").trim()
if (packageName.isEmpty()) {
continue
}
if (!isEligibleTargetPackage(packageName)) {
rejectedPackages += packageName
continue
}
val objective = target.optString("objective").trim().ifEmpty { userObjective }
val finalPresentationPolicy = target.optString("finalPresentationPolicy").trim()
val defaultFinalPresentationPolicy = arguments.optString("finalPresentationPolicy").trim()
add(
AgentDelegationTarget(
packageName = packageName,
objective = objective,
finalPresentationPolicy =
SessionFinalPresentationPolicy.fromWireValue(finalPresentationPolicy)
?: SessionFinalPresentationPolicy.fromWireValue(defaultFinalPresentationPolicy)
?: SessionFinalPresentationPolicy.AGENT_CHOICE,
),
)
}
}.distinctBy(AgentDelegationTarget::packageName)
if (targets.isEmpty()) {
if (rejectedPackages.isNotEmpty()) {
throw IOException(
"Framework session tool selected missing or disallowed package(s): ${rejectedPackages.joinToString(", ")}",
)
}
throw IOException("Framework session tool did not select an eligible target package")
}
val allowDetachedMode = arguments.optBoolean("allowDetachedMode", true)
val detachedPolicyTargets = targets.filter { it.finalPresentationPolicy.requiresDetachedMode() }
if (!allowDetachedMode && detachedPolicyTargets.isNotEmpty()) {
throw IOException(
"Framework session tool selected detached final presentation without allowDetachedMode: ${detachedPolicyTargets.joinToString(", ") { it.packageName }}",
)
}
return StartDirectSessionRequest(
plan = AgentDelegationPlan(
originalObjective = userObjective,
targets = targets,
rationale = arguments.optString("reason").trim().ifEmpty { null },
usedOverride = false,
),
allowDetachedMode = allowDetachedMode,
)
}
}
data class StartDirectSessionRequest(
val plan: AgentDelegationPlan,
val allowDetachedMode: Boolean,
)
fun buildPlanningToolSpecs(): JSONArray {
return JSONArray().put(buildStartDirectSessionToolSpec())
}
fun buildQuestionResolutionToolSpecs(): JSONArray {
return JSONArray()
.put(buildListSessionsToolSpec())
.put(buildAnswerQuestionToolSpec())
}
fun buildSessionManagementToolSpecs(): JSONArray {
return buildQuestionResolutionToolSpecs()
.put(buildAttachTargetToolSpec())
.put(buildCancelSessionToolSpec())
}
fun handleToolCall(
toolName: String,
arguments: JSONObject,
userObjective: String,
onSessionStarted: ((SessionStartResult) -> Unit)? = null,
focusedSessionId: String? = null,
): JSONObject {
Log.i(TAG, "handleToolCall tool=$toolName arguments=$arguments")
return when (toolName) {
START_DIRECT_SESSION_TOOL -> {
val request = parseStartDirectSessionArguments(
arguments = arguments,
userObjective = userObjective,
isEligibleTargetPackage = ::isEligibleTargetPackage,
)
val startedSession = sessionController.startDirectSession(
plan = request.plan,
allowDetachedMode = request.allowDetachedMode,
)
Log.i(
TAG,
"Started framework sessions parent=${startedSession.parentSessionId} children=${startedSession.childSessionIds}",
)
onSessionStarted?.invoke(startedSession)
successText(
JSONObject()
.put("parentSessionId", startedSession.parentSessionId)
.put("childSessionIds", JSONArray(startedSession.childSessionIds))
.put("plannedTargets", JSONArray(startedSession.plannedTargets))
.put("geniePackage", startedSession.geniePackage)
.toString(),
)
}
LIST_SESSIONS_TOOL -> {
val snapshot = sessionController.loadSnapshot(focusedSessionId)
successText(renderSessionSnapshot(snapshot).toString())
}
ANSWER_QUESTION_TOOL -> {
val sessionId = requireString(arguments, "sessionId")
val answer = requireString(arguments, "answer")
val parentSessionId = arguments.optString("parentSessionId").trim().ifEmpty { null }
sessionController.answerQuestion(sessionId, answer, parentSessionId)
successText("Answered framework session $sessionId.")
}
ATTACH_TARGET_TOOL -> {
val sessionId = requireString(arguments, "sessionId")
sessionController.attachTarget(sessionId)
successText("Requested target attach for framework session $sessionId.")
}
CANCEL_SESSION_TOOL -> {
val sessionId = requireString(arguments, "sessionId")
sessionController.cancelSession(sessionId)
successText("Cancelled framework session $sessionId.")
}
else -> throw IOException("Unsupported framework session tool: $toolName")
}
}
private fun buildStartDirectSessionToolSpec(): JSONObject {
return JSONObject()
.put("name", START_DIRECT_SESSION_TOOL)
.put(
"description",
"Start direct parent and child framework sessions for one or more target Android packages.",
)
.put(
"inputSchema",
JSONObject()
.put("type", "object")
.put(
"properties",
JSONObject()
.put(
"targets",
JSONObject()
.put("type", "array")
.put(
"items",
JSONObject()
.put("type", "object")
.put(
"properties",
JSONObject()
.put("packageName", stringSchema("Installed target Android package name."))
.put("objective", stringSchema("Delegated free-form objective for the child Genie."))
.put(
"finalPresentationPolicy",
stringSchema(
"Required final target presentation: ATTACHED, DETACHED_HIDDEN, DETACHED_SHOWN, or AGENT_CHOICE.",
),
),
)
.put(
"required",
JSONArray()
.put("packageName")
.put("finalPresentationPolicy"),
)
.put("additionalProperties", false),
),
)
.put("reason", stringSchema("Short explanation for why these target packages were selected."))
.put(
"allowDetachedMode",
JSONObject()
.put("type", "boolean")
.put("description", "Whether Genie child sessions may use detached target mode."),
),
)
.put("required", JSONArray().put("targets"))
.put("additionalProperties", false),
)
}
private fun buildListSessionsToolSpec(): JSONObject {
return JSONObject()
.put("name", LIST_SESSIONS_TOOL)
.put("description", "List the current Android framework sessions visible to the Agent.")
.put(
"inputSchema",
JSONObject()
.put("type", "object")
.put("properties", JSONObject())
.put("additionalProperties", false),
)
}
private fun buildAnswerQuestionToolSpec(): JSONObject {
return JSONObject()
.put("name", ANSWER_QUESTION_TOOL)
.put("description", "Answer a waiting Android framework session question.")
.put(
"inputSchema",
JSONObject()
.put("type", "object")
.put(
"properties",
JSONObject()
.put("sessionId", stringSchema("Framework session id to answer."))
.put("answer", stringSchema("Free-form answer text."))
.put("parentSessionId", stringSchema("Optional parent framework session id for trace publication.")),
)
.put("required", JSONArray().put("sessionId").put("answer"))
.put("additionalProperties", false),
)
}
private fun buildAttachTargetToolSpec(): JSONObject {
return JSONObject()
.put("name", ATTACH_TARGET_TOOL)
.put("description", "Request the framework to attach the detached target back to the current display.")
.put(
"inputSchema",
JSONObject()
.put("type", "object")
.put(
"properties",
JSONObject().put("sessionId", stringSchema("Framework session id whose target should be attached.")),
)
.put("required", JSONArray().put("sessionId"))
.put("additionalProperties", false),
)
}
private fun buildCancelSessionToolSpec(): JSONObject {
return JSONObject()
.put("name", CANCEL_SESSION_TOOL)
.put("description", "Cancel an Android framework session.")
.put(
"inputSchema",
JSONObject()
.put("type", "object")
.put(
"properties",
JSONObject().put("sessionId", stringSchema("Framework session id to cancel.")),
)
.put("required", JSONArray().put("sessionId"))
.put("additionalProperties", false),
)
}
private fun renderSessionSnapshot(snapshot: AgentSnapshot): JSONObject {
val sessions = JSONArray()
snapshot.sessions.forEach { session ->
sessions.put(
JSONObject()
.put("sessionId", session.sessionId)
.put("parentSessionId", session.parentSessionId)
.put("targetPackage", session.targetPackage)
.put("state", session.stateLabel)
.put("targetDetached", session.targetDetached)
.put("targetPresentation", session.targetPresentationLabel)
.put("targetRuntime", session.targetRuntimeLabel)
.put(
"requiredFinalPresentation",
session.requiredFinalPresentationPolicy?.wireValue,
),
)
}
return JSONObject()
.put("available", snapshot.available)
.put("selectedGeniePackage", snapshot.selectedGeniePackage)
.put("selectedSessionId", snapshot.selectedSession?.sessionId)
.put("parentSessionId", snapshot.parentSession?.sessionId)
.put("sessions", sessions)
}
private fun isEligibleTargetPackage(packageName: String): Boolean {
if (packageName in DISALLOWED_TARGET_PACKAGES) {
return false
}
return sessionController.canStartSessionForTarget(packageName)
}
private fun requireString(arguments: JSONObject, fieldName: String): String {
return arguments.optString(fieldName).trim().ifEmpty {
throw IOException("Framework session tool requires non-empty $fieldName")
}
}
private fun successText(text: String): JSONObject {
return JSONObject()
.put("success", true)
.put(
"contentItems",
JSONArray().put(
JSONObject()
.put("type", "inputText")
.put("text", text),
),
)
}
private fun stringSchema(description: String): JSONObject {
return JSONObject()
.put("type", "string")
.put("description", description)
}
}

View File

@@ -1,247 +0,0 @@
package com.openai.codex.agent
import android.util.Log
import java.io.ByteArrayOutputStream
import java.io.Closeable
import java.io.EOFException
import java.io.IOException
import java.net.InetAddress
import java.net.ServerSocket
import java.net.Socket
import java.nio.charset.StandardCharsets
import java.util.Collections
import java.util.UUID
import java.util.concurrent.atomic.AtomicBoolean
class AgentLocalCodexProxy(
private val requestForwarder: (String) -> AgentResponsesProxy.HttpResponse,
) : Closeable {
companion object {
private const val TAG = "AgentLocalProxy"
}
private val pathSecret = UUID.randomUUID().toString().replace("-", "")
private val loopbackAddress = InetAddress.getByName("127.0.0.1")
private val serverSocket = ServerSocket(0, 50, loopbackAddress)
private val closed = AtomicBoolean(false)
private val clientSockets = Collections.synchronizedSet(mutableSetOf<Socket>())
private val acceptThread = Thread(::acceptLoop, "AgentLocalProxy")
val baseUrl: String = "http://${loopbackAddress.hostAddress}:${serverSocket.localPort}/${pathSecret}/v1"
fun start() {
acceptThread.start()
logInfo("Listening on $baseUrl")
}
override fun close() {
if (!closed.compareAndSet(false, true)) {
return
}
runCatching { serverSocket.close() }
synchronized(clientSockets) {
clientSockets.forEach { socket -> runCatching { socket.close() } }
clientSockets.clear()
}
acceptThread.interrupt()
}
private fun acceptLoop() {
while (!closed.get()) {
val socket = try {
serverSocket.accept()
} catch (err: IOException) {
if (!closed.get()) {
logWarn("Failed to accept local proxy connection", err)
}
return
}
clientSockets += socket
Thread(
{ handleClient(socket) },
"AgentLocalProxyClient",
).start()
}
}
private fun handleClient(socket: Socket) {
socket.use { client ->
try {
val request = readRequest(client)
logInfo("Forwarding ${request.method} ${request.forwardPath}")
val response = forwardResponsesRequest(request)
writeResponse(
socket = client,
statusCode = response.statusCode,
body = response.body,
path = request.forwardPath,
)
} catch (err: Exception) {
if (!closed.get()) {
logWarn("Local proxy request failed", err)
runCatching {
writeResponse(
socket = client,
statusCode = 502,
body = err.message ?: err::class.java.simpleName,
path = "/error",
)
}
}
} finally {
clientSockets -= client
}
}
}
private fun forwardResponsesRequest(request: ParsedRequest): AgentResponsesProxy.HttpResponse {
if (request.method != "POST") {
return AgentResponsesProxy.HttpResponse(
statusCode = 405,
body = "Unsupported local proxy method: ${request.method}",
)
}
if (request.forwardPath != "/v1/responses") {
return AgentResponsesProxy.HttpResponse(
statusCode = 404,
body = "Unsupported local proxy path: ${request.forwardPath}",
)
}
return requestForwarder(request.body.orEmpty())
}
private fun readRequest(socket: Socket): ParsedRequest {
val input = socket.getInputStream()
val headerBuffer = ByteArrayOutputStream()
var matched = 0
while (matched < 4) {
val next = input.read()
if (next == -1) {
throw EOFException("unexpected EOF while reading local proxy request headers")
}
headerBuffer.write(next)
matched = when {
matched == 0 && next == '\r'.code -> 1
matched == 1 && next == '\n'.code -> 2
matched == 2 && next == '\r'.code -> 3
matched == 3 && next == '\n'.code -> 4
next == '\r'.code -> 1
else -> 0
}
}
val headerBytes = headerBuffer.toByteArray()
val headerText = headerBytes
.copyOfRange(0, headerBytes.size - 4)
.toString(StandardCharsets.US_ASCII)
val lines = headerText.split("\r\n")
val requestLine = lines.firstOrNull()
?: throw IOException("local proxy request line missing")
val requestParts = requestLine.split(" ", limit = 3)
if (requestParts.size < 2) {
throw IOException("invalid local proxy request line: $requestLine")
}
val headers = mutableMapOf<String, String>()
lines.drop(1).forEach { line ->
val separatorIndex = line.indexOf(':')
if (separatorIndex <= 0) {
return@forEach
}
val name = line.substring(0, separatorIndex).trim().lowercase()
val value = line.substring(separatorIndex + 1).trim()
headers[name] = value
}
if (headers["transfer-encoding"]?.contains("chunked", ignoreCase = true) == true) {
throw IOException("chunked local proxy requests are unsupported")
}
val contentLength = headers["content-length"]?.toIntOrNull() ?: 0
val bodyBytes = ByteArray(contentLength)
var offset = 0
while (offset < bodyBytes.size) {
val read = input.read(bodyBytes, offset, bodyBytes.size - offset)
if (read == -1) {
throw EOFException("unexpected EOF while reading local proxy request body")
}
offset += read
}
val rawPath = requestParts[1]
val forwardPath = normalizeForwardPath(rawPath)
return ParsedRequest(
method = requestParts[0],
forwardPath = forwardPath,
body = if (bodyBytes.isEmpty()) null else bodyBytes.toString(StandardCharsets.UTF_8),
)
}
private fun normalizeForwardPath(rawPath: String): String {
val expectedPrefix = "/$pathSecret"
if (!rawPath.startsWith(expectedPrefix)) {
throw IOException("unexpected local proxy path: $rawPath")
}
val strippedPath = rawPath.removePrefix(expectedPrefix)
return if (strippedPath.isBlank()) "/" else strippedPath
}
private fun writeResponse(
socket: Socket,
statusCode: Int,
body: String,
path: String,
) {
val bodyBytes = body.toByteArray(StandardCharsets.UTF_8)
val contentType = when {
path.startsWith("/v1/responses") -> "text/event-stream; charset=utf-8"
body.trimStart().startsWith("{") || body.trimStart().startsWith("[") -> {
"application/json; charset=utf-8"
}
else -> "text/plain; charset=utf-8"
}
val responseHeaders = buildString {
append("HTTP/1.1 $statusCode ${reasonPhrase(statusCode)}\r\n")
append("Content-Type: $contentType\r\n")
append("Content-Length: ${bodyBytes.size}\r\n")
append("Connection: close\r\n")
append("\r\n")
}
val output = socket.getOutputStream()
output.write(responseHeaders.toByteArray(StandardCharsets.US_ASCII))
output.write(bodyBytes)
output.flush()
}
private fun reasonPhrase(statusCode: Int): String {
return when (statusCode) {
200 -> "OK"
400 -> "Bad Request"
401 -> "Unauthorized"
403 -> "Forbidden"
404 -> "Not Found"
500 -> "Internal Server Error"
502 -> "Bad Gateway"
503 -> "Service Unavailable"
else -> "Response"
}
}
private fun logInfo(message: String) {
runCatching { Log.i(TAG, message) }
}
private fun logWarn(
message: String,
err: Throwable,
) {
runCatching { Log.w(TAG, message, err) }
}
private data class ParsedRequest(
val method: String,
val forwardPath: String,
val body: String?,
)
}

View File

@@ -1,16 +0,0 @@
package com.openai.codex.agent
data class AgentModelOption(
val id: String,
val model: String,
val displayName: String,
val description: String,
val supportedReasoningEfforts: List<AgentReasoningEffortOption>,
val defaultReasoningEffort: String,
val isDefault: Boolean,
)
data class AgentReasoningEffortOption(
val reasoningEffort: String,
val description: String,
)

View File

@@ -1,198 +0,0 @@
package com.openai.codex.agent
import android.app.agent.AgentSessionInfo
object AgentSessionStateValues {
const val CREATED = AgentSessionInfo.STATE_CREATED
const val RUNNING = AgentSessionInfo.STATE_RUNNING
const val WAITING_FOR_USER = AgentSessionInfo.STATE_WAITING_FOR_USER
const val COMPLETED = AgentSessionInfo.STATE_COMPLETED
const val CANCELLED = AgentSessionInfo.STATE_CANCELLED
const val FAILED = AgentSessionInfo.STATE_FAILED
const val QUEUED = AgentSessionInfo.STATE_QUEUED
}
data class ParentSessionChildSummary(
val sessionId: String,
val targetPackage: String?,
val state: Int,
val targetPresentation: Int,
val requiredFinalPresentationPolicy: SessionFinalPresentationPolicy?,
val latestResult: String?,
val latestError: String?,
)
data class ParentSessionRollup(
val state: Int,
val resultMessage: String?,
val errorMessage: String?,
val sessionsToAttach: List<String>,
)
object AgentParentSessionAggregator {
fun rollup(childSessions: List<ParentSessionChildSummary>): ParentSessionRollup {
val baseState = computeParentState(childSessions.map(ParentSessionChildSummary::state))
if (
baseState == AgentSessionInfo.STATE_CREATED ||
baseState == AgentSessionInfo.STATE_RUNNING ||
baseState == AgentSessionInfo.STATE_WAITING_FOR_USER ||
baseState == AgentSessionInfo.STATE_QUEUED
) {
return ParentSessionRollup(
state = baseState,
resultMessage = null,
errorMessage = null,
sessionsToAttach = emptyList(),
)
}
val terminalPresentationMismatches = childSessions.mapNotNull { childSession ->
childSession.presentationMismatch()
}
val sessionsToAttach = terminalPresentationMismatches
.filter { it.requiredPolicy == SessionFinalPresentationPolicy.ATTACHED }
.map(PresentationMismatch::sessionId)
val blockingMismatches = terminalPresentationMismatches
.filterNot { it.requiredPolicy == SessionFinalPresentationPolicy.ATTACHED }
if (sessionsToAttach.isNotEmpty() && baseState == AgentSessionInfo.STATE_COMPLETED) {
return ParentSessionRollup(
state = AgentSessionInfo.STATE_RUNNING,
resultMessage = null,
errorMessage = null,
sessionsToAttach = sessionsToAttach,
)
}
if (blockingMismatches.isNotEmpty()) {
return ParentSessionRollup(
state = AgentSessionInfo.STATE_FAILED,
resultMessage = null,
errorMessage = buildPresentationMismatchError(blockingMismatches),
sessionsToAttach = emptyList(),
)
}
return when (baseState) {
AgentSessionInfo.STATE_COMPLETED -> ParentSessionRollup(
state = baseState,
resultMessage = buildParentResult(childSessions),
errorMessage = null,
sessionsToAttach = emptyList(),
)
AgentSessionInfo.STATE_FAILED -> ParentSessionRollup(
state = baseState,
resultMessage = null,
errorMessage = buildParentError(childSessions),
sessionsToAttach = emptyList(),
)
else -> ParentSessionRollup(
state = baseState,
resultMessage = null,
errorMessage = null,
sessionsToAttach = emptyList(),
)
}
}
private fun computeParentState(childStates: List<Int>): Int {
var anyWaiting = false
var anyRunning = false
var anyQueued = false
var anyFailed = false
var anyCancelled = false
var anyCompleted = false
childStates.forEach { state ->
when (state) {
AgentSessionInfo.STATE_WAITING_FOR_USER -> anyWaiting = true
AgentSessionInfo.STATE_RUNNING -> anyRunning = true
AgentSessionInfo.STATE_QUEUED -> anyQueued = true
AgentSessionInfo.STATE_FAILED -> anyFailed = true
AgentSessionInfo.STATE_CANCELLED -> anyCancelled = true
AgentSessionInfo.STATE_COMPLETED -> anyCompleted = true
}
}
return when {
anyWaiting -> AgentSessionInfo.STATE_WAITING_FOR_USER
anyRunning || anyQueued -> AgentSessionInfo.STATE_RUNNING
anyFailed -> AgentSessionInfo.STATE_FAILED
anyCompleted -> AgentSessionInfo.STATE_COMPLETED
anyCancelled -> AgentSessionInfo.STATE_CANCELLED
else -> AgentSessionInfo.STATE_CREATED
}
}
private fun buildParentResult(childSessions: List<ParentSessionChildSummary>): String {
return buildString {
append("Completed delegated session")
childSessions.forEach { childSession ->
append("; ")
append(childSession.targetPackage ?: childSession.sessionId)
append(": ")
append(
childSession.latestResult
?: childSession.latestError
?: stateToString(childSession.state),
)
}
}
}
private fun buildParentError(childSessions: List<ParentSessionChildSummary>): String {
return buildString {
append("Delegated session failed")
childSessions.forEach { childSession ->
if (childSession.state != AgentSessionInfo.STATE_FAILED) {
return@forEach
}
append("; ")
append(childSession.targetPackage ?: childSession.sessionId)
append(": ")
append(childSession.latestError ?: stateToString(childSession.state))
}
}
}
private fun buildPresentationMismatchError(mismatches: List<PresentationMismatch>): String {
return buildString {
append("Delegated session completed without the required final presentation")
mismatches.forEach { mismatch ->
append("; ")
append(mismatch.targetPackage ?: mismatch.sessionId)
append(": required ")
append(mismatch.requiredPolicy.wireValue)
append(", actual ")
append(targetPresentationToString(mismatch.actualPresentation))
}
}
}
private fun stateToString(state: Int): String {
return when (state) {
AgentSessionInfo.STATE_CREATED -> "CREATED"
AgentSessionInfo.STATE_RUNNING -> "RUNNING"
AgentSessionInfo.STATE_WAITING_FOR_USER -> "WAITING_FOR_USER"
AgentSessionInfo.STATE_QUEUED -> "QUEUED"
AgentSessionInfo.STATE_COMPLETED -> "COMPLETED"
AgentSessionInfo.STATE_CANCELLED -> "CANCELLED"
AgentSessionInfo.STATE_FAILED -> "FAILED"
else -> state.toString()
}
}
private fun ParentSessionChildSummary.presentationMismatch(): PresentationMismatch? {
val requiredPolicy = requiredFinalPresentationPolicy ?: return null
if (state != AgentSessionInfo.STATE_COMPLETED || requiredPolicy.matches(targetPresentation)) {
return null
}
return PresentationMismatch(
sessionId = sessionId,
targetPackage = targetPackage,
requiredPolicy = requiredPolicy,
actualPresentation = targetPresentation,
)
}
}
private data class PresentationMismatch(
val sessionId: String,
val targetPackage: String?,
val requiredPolicy: SessionFinalPresentationPolicy,
val actualPresentation: Int,
)

View File

@@ -1,471 +0,0 @@
package com.openai.codex.agent
import android.app.agent.AgentManager
import android.content.Context
import android.util.Log
import com.openai.codex.bridge.HostedCodexConfig
import com.openai.codex.bridge.SessionExecutionSettings
import java.io.BufferedWriter
import java.io.Closeable
import java.io.File
import java.io.IOException
import java.io.InterruptedIOException
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.LinkedBlockingQueue
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.atomic.AtomicBoolean
import kotlin.concurrent.thread
import org.json.JSONArray
import org.json.JSONObject
object AgentPlannerRuntimeManager {
private const val TAG = "AgentPlannerRuntime"
private val activePlannerSessions = ConcurrentHashMap<String, Boolean>()
fun requestText(
context: Context,
instructions: String,
prompt: String,
outputSchema: JSONObject? = null,
requestUserInputHandler: ((JSONArray) -> JSONObject)? = null,
executionSettings: SessionExecutionSettings = SessionExecutionSettings.default,
requestTimeoutMs: Long = 90_000L,
frameworkSessionId: String? = null,
): String {
val applicationContext = context.applicationContext
val plannerSessionId = frameworkSessionId?.trim()?.ifEmpty { null }
?: throw IOException("Planner runtime requires a parent session id")
check(activePlannerSessions.putIfAbsent(plannerSessionId, true) == null) {
"Planner runtime already active for parent session $plannerSessionId"
}
try {
AgentPlannerRuntime(
context = applicationContext,
frameworkSessionId = plannerSessionId,
).use { runtime ->
return runtime.requestText(
instructions = instructions,
prompt = prompt,
outputSchema = outputSchema,
requestUserInputHandler = requestUserInputHandler,
executionSettings = executionSettings,
requestTimeoutMs = requestTimeoutMs,
)
}
} finally {
activePlannerSessions.remove(plannerSessionId)
}
}
private class AgentPlannerRuntime(
private val context: Context,
private val frameworkSessionId: String?,
) : Closeable {
companion object {
private const val REQUEST_TIMEOUT_MS = 30_000L
private const val AGENT_APP_SERVER_RUST_LOG = "warn"
}
private val requestIdSequence = AtomicInteger(1)
private val pendingResponses = ConcurrentHashMap<String, LinkedBlockingQueue<JSONObject>>()
private val notifications = LinkedBlockingQueue<JSONObject>()
private lateinit var process: Process
private lateinit var writer: BufferedWriter
private lateinit var codexHome: File
private val closing = AtomicBoolean(false)
private var stdoutThread: Thread? = null
private var stderrThread: Thread? = null
private var localProxy: AgentLocalCodexProxy? = null
fun requestText(
instructions: String,
prompt: String,
outputSchema: JSONObject?,
requestUserInputHandler: ((JSONArray) -> JSONObject)?,
executionSettings: SessionExecutionSettings,
requestTimeoutMs: Long,
): String {
startProcess()
initialize()
val threadId = startThread(
instructions = instructions,
executionSettings = executionSettings,
)
startTurn(
threadId = threadId,
prompt = prompt,
outputSchema = outputSchema,
executionSettings = executionSettings,
)
return waitForTurnCompletion(requestUserInputHandler, requestTimeoutMs)
}
override fun close() {
closing.set(true)
stdoutThread?.interrupt()
stderrThread?.interrupt()
if (::writer.isInitialized) {
runCatching { writer.close() }
}
localProxy?.close()
if (::codexHome.isInitialized) {
runCatching { codexHome.deleteRecursively() }
}
if (::process.isInitialized) {
runCatching { process.destroy() }
}
}
private fun startProcess() {
codexHome = File(context.cacheDir, "planner-codex-home/$frameworkSessionId").apply {
deleteRecursively()
mkdirs()
}
localProxy = AgentLocalCodexProxy { requestBody ->
forwardResponsesRequest(requestBody)
}.also(AgentLocalCodexProxy::start)
HostedCodexConfig.write(
context,
codexHome,
localProxy?.baseUrl
?: throw IOException("planner local proxy did not start"),
)
process = ProcessBuilder(
listOf(
CodexCliBinaryLocator.resolve(context).absolutePath,
"-c",
"enable_request_compression=false",
"app-server",
"--listen",
"stdio://",
),
).apply {
environment()["CODEX_HOME"] = codexHome.absolutePath
environment()["RUST_LOG"] = AGENT_APP_SERVER_RUST_LOG
}.start()
writer = process.outputStream.bufferedWriter()
startStdoutPump()
startStderrPump()
}
private fun initialize() {
request(
method = "initialize",
params = JSONObject()
.put(
"clientInfo",
JSONObject()
.put("name", "android_agent_planner")
.put("title", "Android Agent Planner")
.put("version", "0.1.0"),
)
.put("capabilities", JSONObject().put("experimentalApi", true)),
)
notify("initialized", JSONObject())
}
private fun startThread(
instructions: String,
executionSettings: SessionExecutionSettings,
): String {
val params = JSONObject()
.put("approvalPolicy", "never")
.put("sandbox", "read-only")
.put("ephemeral", true)
.put("cwd", context.filesDir.absolutePath)
.put("serviceName", "android_agent_planner")
.put("baseInstructions", instructions)
executionSettings.model
?.takeIf(String::isNotBlank)
?.let { params.put("model", it) }
val result = request(
method = "thread/start",
params = params,
)
return result.getJSONObject("thread").getString("id")
}
private fun startTurn(
threadId: String,
prompt: String,
outputSchema: JSONObject?,
executionSettings: SessionExecutionSettings,
) {
val turnParams = JSONObject()
.put("threadId", threadId)
.put(
"input",
JSONArray().put(
JSONObject()
.put("type", "text")
.put("text", prompt),
),
)
executionSettings.model
?.takeIf(String::isNotBlank)
?.let { turnParams.put("model", it) }
executionSettings.reasoningEffort
?.takeIf(String::isNotBlank)
?.let { turnParams.put("effort", it) }
if (outputSchema != null) {
turnParams.put("outputSchema", outputSchema)
}
request(
method = "turn/start",
params = turnParams,
)
}
private fun waitForTurnCompletion(
requestUserInputHandler: ((JSONArray) -> JSONObject)?,
requestTimeoutMs: Long,
): String {
val streamedAgentMessages = mutableMapOf<String, StringBuilder>()
var finalAgentMessage: String? = null
val deadline = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(requestTimeoutMs)
while (true) {
val remainingNanos = deadline - System.nanoTime()
if (remainingNanos <= 0L) {
throw IOException("Timed out waiting for planner turn completion")
}
val notification = notifications.poll(remainingNanos, TimeUnit.NANOSECONDS)
if (notification == null) {
checkProcessAlive()
continue
}
if (notification.has("id") && notification.has("method")) {
handleServerRequest(notification, requestUserInputHandler)
continue
}
val params = notification.optJSONObject("params") ?: JSONObject()
when (notification.optString("method")) {
"item/agentMessage/delta" -> {
val itemId = params.optString("itemId")
if (itemId.isNotBlank()) {
streamedAgentMessages.getOrPut(itemId, ::StringBuilder)
.append(params.optString("delta"))
}
}
"item/completed" -> {
val item = params.optJSONObject("item") ?: continue
if (item.optString("type") == "agentMessage") {
val itemId = item.optString("id")
val text = item.optString("text").ifBlank {
streamedAgentMessages[itemId]?.toString().orEmpty()
}
if (text.isNotBlank()) {
finalAgentMessage = text
}
}
}
"turn/completed" -> {
val turn = params.optJSONObject("turn") ?: JSONObject()
return when (turn.optString("status")) {
"completed" -> finalAgentMessage?.takeIf(String::isNotBlank)
?: throw IOException("Planner turn completed without an assistant message")
"interrupted" -> throw IOException("Planner turn interrupted")
else -> throw IOException(
turn.opt("error")?.toString()
?: "Planner turn failed with status ${turn.optString("status", "unknown")}",
)
}
}
}
}
}
private fun handleServerRequest(
message: JSONObject,
requestUserInputHandler: ((JSONArray) -> JSONObject)?,
) {
val requestId = message.opt("id") ?: return
val method = message.optString("method", "unknown")
val params = message.optJSONObject("params") ?: JSONObject()
when (method) {
"item/tool/requestUserInput" -> {
if (requestUserInputHandler == null) {
sendError(
requestId = requestId,
code = -32601,
message = "No Agent user-input handler registered for $method",
)
return
}
val questions = params.optJSONArray("questions") ?: JSONArray()
val result = runCatching { requestUserInputHandler(questions) }
.getOrElse { err ->
sendError(
requestId = requestId,
code = -32000,
message = err.message ?: "Agent user input request failed",
)
return
}
sendResult(requestId, result)
}
else -> {
sendError(
requestId = requestId,
code = -32601,
message = "Unsupported planner app-server request: $method",
)
}
}
}
private fun forwardResponsesRequest(requestBody: String): AgentResponsesProxy.HttpResponse {
val activeFrameworkSessionId = frameworkSessionId
check(!activeFrameworkSessionId.isNullOrBlank()) {
"Planner runtime requires a framework session id for /responses transport"
}
val agentManager = context.getSystemService(AgentManager::class.java)
?: throw IOException("AgentManager unavailable for framework session transport")
return AgentResponsesProxy.sendResponsesRequestThroughFramework(
agentManager = agentManager,
sessionId = activeFrameworkSessionId,
context = context,
requestBody = requestBody,
)
}
private fun request(
method: String,
params: JSONObject?,
): JSONObject {
val requestId = requestIdSequence.getAndIncrement().toString()
val responseQueue = LinkedBlockingQueue<JSONObject>(1)
pendingResponses[requestId] = responseQueue
try {
val message = JSONObject()
.put("id", requestId)
.put("method", method)
if (params != null) {
message.put("params", params)
}
sendMessage(message)
val response = responseQueue.poll(REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS)
?: throw IOException("Timed out waiting for $method response")
val error = response.optJSONObject("error")
if (error != null) {
throw IOException("$method failed: ${error.optString("message", error.toString())}")
}
return response.optJSONObject("result") ?: JSONObject()
} finally {
pendingResponses.remove(requestId)
}
}
private fun notify(
method: String,
params: JSONObject,
) {
sendMessage(
JSONObject()
.put("method", method)
.put("params", params),
)
}
private fun sendResult(
requestId: Any,
result: JSONObject,
) {
sendMessage(
JSONObject()
.put("id", requestId)
.put("result", result),
)
}
private fun sendError(
requestId: Any,
code: Int,
message: String,
) {
sendMessage(
JSONObject()
.put("id", requestId)
.put(
"error",
JSONObject()
.put("code", code)
.put("message", message),
),
)
}
private fun sendMessage(message: JSONObject) {
writer.write(message.toString())
writer.newLine()
writer.flush()
}
private fun startStdoutPump() {
stdoutThread = thread(name = "AgentPlannerStdout-$frameworkSessionId") {
try {
process.inputStream.bufferedReader().useLines { lines ->
lines.forEach { line ->
if (line.isBlank()) {
return@forEach
}
val message = runCatching { JSONObject(line) }
.getOrElse { err ->
Log.w(TAG, "Failed to parse planner app-server stdout line", err)
return@forEach
}
if (message.has("id") && !message.has("method")) {
pendingResponses[message.get("id").toString()]?.offer(message)
} else {
notifications.offer(message)
}
}
}
} catch (err: InterruptedIOException) {
if (!closing.get()) {
Log.w(TAG, "Planner stdout pump interrupted unexpectedly", err)
}
} catch (err: IOException) {
if (!closing.get()) {
Log.w(TAG, "Planner stdout pump failed", err)
}
}
}
}
private fun startStderrPump() {
stderrThread = thread(name = "AgentPlannerStderr-$frameworkSessionId") {
try {
process.errorStream.bufferedReader().useLines { lines ->
lines.forEach { line ->
if (line.contains(" ERROR ") || line.startsWith("ERROR")) {
Log.e(TAG, line)
} else if (line.contains(" WARN ") || line.startsWith("WARN")) {
Log.w(TAG, line)
}
}
}
} catch (err: InterruptedIOException) {
if (!closing.get()) {
Log.w(TAG, "Planner stderr pump interrupted unexpectedly", err)
}
} catch (err: IOException) {
if (!closing.get()) {
Log.w(TAG, "Planner stderr pump failed", err)
}
}
}
}
private fun checkProcessAlive() {
if (!process.isAlive) {
throw IOException("Planner app-server exited with code ${process.exitValue()}")
}
}
}
}

View File

@@ -1,79 +0,0 @@
package com.openai.codex.agent
import android.app.Notification
import android.app.NotificationChannel
import android.app.NotificationManager
import android.app.PendingIntent
import android.content.Context
import android.content.Intent
import android.os.Build
object AgentQuestionNotifier {
private const val CHANNEL_ID = "codex_agent_questions"
private const val CHANNEL_NAME = "Codex Agent Questions"
fun showQuestion(
context: Context,
sessionId: String,
targetPackage: String?,
question: String,
) {
val manager = context.getSystemService(NotificationManager::class.java) ?: return
ensureChannel(manager)
manager.notify(notificationId(sessionId), buildNotification(context, sessionId, targetPackage, question))
}
fun cancel(context: Context, sessionId: String) {
val manager = context.getSystemService(NotificationManager::class.java) ?: return
manager.cancel(notificationId(sessionId))
}
private fun buildNotification(
context: Context,
sessionId: String,
targetPackage: String?,
question: String,
): Notification {
val title = targetPackage?.let { "Question for $it" } ?: "Question for Codex Agent"
val contentIntent = PendingIntent.getActivity(
context,
notificationId(sessionId),
Intent(context, SessionDetailActivity::class.java).apply {
putExtra(SessionDetailActivity.EXTRA_SESSION_ID, sessionId)
addFlags(Intent.FLAG_ACTIVITY_NEW_TASK or Intent.FLAG_ACTIVITY_SINGLE_TOP)
},
PendingIntent.FLAG_UPDATE_CURRENT or PendingIntent.FLAG_IMMUTABLE,
)
return Notification.Builder(context, CHANNEL_ID)
.setSmallIcon(android.R.drawable.ic_dialog_info)
.setContentTitle(title)
.setContentText(question)
.setStyle(Notification.BigTextStyle().bigText(question))
.setContentIntent(contentIntent)
.setAutoCancel(false)
.setOngoing(true)
.build()
}
private fun ensureChannel(manager: NotificationManager) {
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.O) {
return
}
if (manager.getNotificationChannel(CHANNEL_ID) != null) {
return
}
val channel = NotificationChannel(
CHANNEL_ID,
CHANNEL_NAME,
NotificationManager.IMPORTANCE_HIGH,
).apply {
description = "Questions that need user input for Codex Agent sessions"
setShowBadge(true)
}
manager.createNotificationChannel(channel)
}
private fun notificationId(sessionId: String): Int {
return sessionId.hashCode()
}
}

View File

@@ -1,328 +0,0 @@
package com.openai.codex.agent
import android.app.agent.AgentManager
import android.content.Context
import android.os.Bundle
import android.util.Log
import com.openai.codex.bridge.FrameworkSessionTransportCompat
import java.io.File
import java.io.IOException
import java.net.HttpURLConnection
import java.net.SocketException
import java.net.URL
import java.nio.charset.StandardCharsets
import org.json.JSONObject
object AgentResponsesProxy {
private const val TAG = "AgentResponsesProxy"
private const val CONNECT_TIMEOUT_MS = 30_000
private const val READ_TIMEOUT_MS = 0
private const val DEFAULT_OPENAI_BASE_URL = "https://api.openai.com/v1"
private const val DEFAULT_CHATGPT_BASE_URL = "https://chatgpt.com/backend-api/codex"
private const val DEFAULT_ORIGINATOR = "codex_cli_rs"
private const val DEFAULT_USER_AGENT = "codex_cli_rs/android_agent_bridge"
private const val HEADER_AUTHORIZATION = "Authorization"
private const val HEADER_CONTENT_TYPE = "Content-Type"
private const val HEADER_ACCEPT = "Accept"
private const val HEADER_ACCEPT_ENCODING = "Accept-Encoding"
private const val HEADER_CHATGPT_ACCOUNT_ID = "ChatGPT-Account-ID"
private const val HEADER_ORIGINATOR = "originator"
private const val HEADER_USER_AGENT = "User-Agent"
private const val HEADER_VALUE_BEARER_PREFIX = "Bearer "
private const val HEADER_VALUE_APPLICATION_JSON = "application/json"
private const val HEADER_VALUE_TEXT_EVENT_STREAM = "text/event-stream"
private const val HEADER_VALUE_IDENTITY = "identity"
internal data class AuthSnapshot(
val authMode: String,
val bearerToken: String,
val accountId: String?,
)
data class HttpResponse(
val statusCode: Int,
val body: String,
)
internal data class FrameworkTransportTarget(
val baseUrl: String,
val responsesPath: String,
)
fun sendResponsesRequest(
context: Context,
requestBody: String,
): HttpResponse {
val authSnapshot = loadAuthSnapshot(File(context.filesDir, "codex-home/auth.json"))
val upstreamUrl = buildResponsesUrl(upstreamBaseUrl = "provider-default", authMode = authSnapshot.authMode)
val requestBodyBytes = requestBody.toByteArray(StandardCharsets.UTF_8)
Log.i(
TAG,
"Proxying /v1/responses -> $upstreamUrl (auth_mode=${authSnapshot.authMode}, bytes=${requestBodyBytes.size})",
)
return executeRequest(upstreamUrl, requestBodyBytes, authSnapshot)
}
fun sendResponsesRequestThroughFramework(
agentManager: AgentManager,
sessionId: String,
context: Context,
requestBody: String,
): HttpResponse {
val authSnapshot = loadAuthSnapshot(File(context.filesDir, "codex-home/auth.json"))
val requestBodyBytes = requestBody.toByteArray(StandardCharsets.UTF_8)
val transportTarget = buildFrameworkTransportTarget(
buildResponsesBaseUrl(upstreamBaseUrl = "provider-default", authMode = authSnapshot.authMode),
)
Log.i(
TAG,
"Proxying /v1/responses via framework session $sessionId -> ${transportTarget.baseUrl}${transportTarget.responsesPath} (auth_mode=${authSnapshot.authMode}, bytes=${requestBodyBytes.size})",
)
FrameworkSessionTransportCompat.setSessionNetworkConfig(
agentManager = agentManager,
sessionId = sessionId,
config = buildFrameworkSessionNetworkConfig(
context = context,
upstreamBaseUrl = "provider-default",
),
)
val response = FrameworkSessionTransportCompat.executeStreamingRequest(
agentManager = agentManager,
sessionId = sessionId,
request = FrameworkSessionTransportCompat.HttpRequest(
method = "POST",
path = transportTarget.responsesPath,
headers = buildResponsesRequestHeaders(),
body = requestBodyBytes,
),
)
Log.i(
TAG,
"Framework responses proxy completed status=${response.statusCode} response_bytes=${response.body.size}",
)
return HttpResponse(
statusCode = response.statusCode,
body = response.bodyString,
)
}
internal fun buildFrameworkSessionNetworkConfig(
context: Context,
upstreamBaseUrl: String,
): FrameworkSessionTransportCompat.SessionNetworkConfig {
val authSnapshot = loadAuthSnapshot(File(context.filesDir, "codex-home/auth.json"))
val transportTarget = buildFrameworkTransportTarget(
buildResponsesBaseUrl(upstreamBaseUrl, authSnapshot.authMode),
)
return FrameworkSessionTransportCompat.SessionNetworkConfig(
baseUrl = transportTarget.baseUrl,
defaultHeaders = buildDefaultHeaders(authSnapshot),
connectTimeoutMillis = CONNECT_TIMEOUT_MS,
readTimeoutMillis = READ_TIMEOUT_MS,
)
}
internal fun buildFrameworkResponsesPath(responsesBaseUrl: String): String {
return buildFrameworkTransportTarget(responsesBaseUrl).responsesPath
}
internal fun buildResponsesBaseUrl(
upstreamBaseUrl: String,
authMode: String,
): String {
val normalizedUpstreamBaseUrl = upstreamBaseUrl.trim()
return when {
normalizedUpstreamBaseUrl.isBlank() ||
normalizedUpstreamBaseUrl == "provider-default" ||
normalizedUpstreamBaseUrl == "null" -> {
if (authMode == "chatgpt") {
DEFAULT_CHATGPT_BASE_URL
} else {
DEFAULT_OPENAI_BASE_URL
}
}
else -> normalizedUpstreamBaseUrl
}.trimEnd('/')
}
internal fun buildResponsesUrl(
upstreamBaseUrl: String,
authMode: String,
): String {
return "${buildResponsesBaseUrl(upstreamBaseUrl, authMode)}/responses"
}
internal fun buildFrameworkTransportTarget(responsesBaseUrl: String): FrameworkTransportTarget {
val upstreamUrl = URL(responsesBaseUrl)
val baseUrl = buildString {
append(upstreamUrl.protocol)
append("://")
append(upstreamUrl.host)
if (upstreamUrl.port != -1) {
append(":")
append(upstreamUrl.port)
}
}
val normalizedPath = upstreamUrl.path.trimEnd('/').ifBlank { "/" }
val responsesPath = if (normalizedPath == "/") {
"/responses"
} else {
"$normalizedPath/responses"
}
return FrameworkTransportTarget(
baseUrl = baseUrl,
responsesPath = responsesPath,
)
}
internal fun loadAuthSnapshot(authFile: File): AuthSnapshot {
if (!authFile.isFile) {
throw IOException("Missing Agent auth file at ${authFile.absolutePath}")
}
val json = JSONObject(authFile.readText())
val openAiApiKey = json.stringOrNull("OPENAI_API_KEY")
val authMode = when (json.stringOrNull("auth_mode")) {
"apiKey", "apikey", "api_key" -> "apiKey"
"chatgpt", "chatgptAuthTokens", "chatgpt_auth_tokens" -> "chatgpt"
null -> if (openAiApiKey != null) "apiKey" else "chatgpt"
else -> if (openAiApiKey != null) "apiKey" else "chatgpt"
}
return if (authMode == "apiKey") {
val apiKey = openAiApiKey
?: throw IOException("Agent auth file is missing OPENAI_API_KEY")
AuthSnapshot(
authMode = authMode,
bearerToken = apiKey,
accountId = null,
)
} else {
val tokens = json.optJSONObject("tokens")
?: throw IOException("Agent auth file is missing chatgpt tokens")
val accessToken = tokens.stringOrNull("access_token")
?: throw IOException("Agent auth file is missing access_token")
AuthSnapshot(
authMode = "chatgpt",
bearerToken = accessToken,
accountId = tokens.stringOrNull("account_id"),
)
}
}
private fun executeRequest(
upstreamUrl: String,
requestBodyBytes: ByteArray,
authSnapshot: AuthSnapshot,
): HttpResponse {
val connection = openConnection(upstreamUrl, authSnapshot)
return try {
try {
connection.outputStream.use { output ->
output.write(requestBodyBytes)
output.flush()
}
} catch (err: IOException) {
throw wrapRequestFailure("write request body", upstreamUrl, err)
}
val statusCode = try {
connection.responseCode
} catch (err: IOException) {
throw wrapRequestFailure("read response status", upstreamUrl, err)
}
val responseBody = try {
val stream = if (statusCode >= 400) connection.errorStream else connection.inputStream
stream?.bufferedReader(StandardCharsets.UTF_8)?.use { it.readText() }.orEmpty()
} catch (err: IOException) {
throw wrapRequestFailure("read response body", upstreamUrl, err)
}
Log.i(
TAG,
"Responses proxy completed status=$statusCode response_bytes=${responseBody.toByteArray(StandardCharsets.UTF_8).size}",
)
HttpResponse(
statusCode = statusCode,
body = responseBody,
)
} finally {
connection.disconnect()
}
}
private fun openConnection(
upstreamUrl: String,
authSnapshot: AuthSnapshot,
): HttpURLConnection {
return try {
(URL(upstreamUrl).openConnection() as HttpURLConnection).apply {
requestMethod = "POST"
connectTimeout = CONNECT_TIMEOUT_MS
readTimeout = READ_TIMEOUT_MS
doInput = true
doOutput = true
instanceFollowRedirects = true
val defaultHeaders = buildDefaultHeaders(authSnapshot)
defaultHeaders.keySet().forEach { key ->
defaultHeaders.getString(key)?.let { value ->
setRequestProperty(key, value)
}
}
val requestHeaders = buildResponsesRequestHeaders()
requestHeaders.keySet().forEach { key ->
requestHeaders.getString(key)?.let { value ->
setRequestProperty(key, value)
}
}
}
} catch (err: IOException) {
throw wrapRequestFailure("open connection", upstreamUrl, err)
}
}
internal fun buildDefaultHeaders(authSnapshot: AuthSnapshot): Bundle {
return Bundle().apply {
putString(HEADER_AUTHORIZATION, "$HEADER_VALUE_BEARER_PREFIX${authSnapshot.bearerToken}")
putString(HEADER_ORIGINATOR, DEFAULT_ORIGINATOR)
putString(HEADER_USER_AGENT, DEFAULT_USER_AGENT)
if (authSnapshot.authMode == "chatgpt" && !authSnapshot.accountId.isNullOrBlank()) {
putString(HEADER_CHATGPT_ACCOUNT_ID, authSnapshot.accountId)
}
}
}
internal fun buildResponsesRequestHeaders(): Bundle {
return Bundle().apply {
putString(HEADER_CONTENT_TYPE, HEADER_VALUE_APPLICATION_JSON)
putString(HEADER_ACCEPT, HEADER_VALUE_TEXT_EVENT_STREAM)
putString(HEADER_ACCEPT_ENCODING, HEADER_VALUE_IDENTITY)
}
}
internal fun describeRequestFailure(
phase: String,
upstreamUrl: String,
err: IOException,
): String {
val reason = err.message?.ifBlank { err::class.java.simpleName } ?: err::class.java.simpleName
return "Responses proxy failed during $phase for $upstreamUrl: ${err::class.java.simpleName}: $reason"
}
private fun wrapRequestFailure(
phase: String,
upstreamUrl: String,
err: IOException,
): IOException {
val wrapped = IOException(describeRequestFailure(phase, upstreamUrl, err), err)
if (err is SocketException) {
Log.w(TAG, wrapped.message, err)
} else {
Log.e(TAG, wrapped.message, err)
}
return wrapped
}
private fun JSONObject.stringOrNull(key: String): String? {
if (!has(key) || isNull(key)) {
return null
}
return optString(key).ifBlank { null }
}
}

View File

@@ -1,192 +0,0 @@
package com.openai.codex.agent
import android.app.agent.AgentManager
import android.content.Context
import android.os.ParcelFileDescriptor
import android.util.Log
import com.openai.codex.bridge.HostedCodexConfig
import java.io.BufferedInputStream
import java.io.BufferedOutputStream
import java.io.Closeable
import java.io.DataInputStream
import java.io.DataOutputStream
import java.io.EOFException
import java.io.FileInputStream
import java.io.FileOutputStream
import java.io.IOException
import java.io.File
import java.nio.charset.StandardCharsets
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicBoolean
import kotlin.concurrent.thread
import org.json.JSONObject
object AgentSessionBridgeServer {
private val runningBridges = ConcurrentHashMap<String, RunningBridge>()
fun ensureStarted(
context: Context,
agentManager: AgentManager,
sessionId: String,
) {
runningBridges.computeIfAbsent(sessionId) {
RunningBridge(
context = context.applicationContext,
agentManager = agentManager,
sessionId = sessionId,
).also(RunningBridge::start)
}
}
fun closeSession(sessionId: String) {
runningBridges.remove(sessionId)?.close()
}
private class RunningBridge(
private val context: Context,
private val agentManager: AgentManager,
private val sessionId: String,
) : Closeable {
companion object {
private const val TAG = "AgentSessionBridge"
private const val METHOD_GET_RUNTIME_STATUS = "getRuntimeStatus"
private const val METHOD_READ_INSTALLED_AGENTS_FILE = "readInstalledAgentsFile"
private const val METHOD_READ_SESSION_EXECUTION_SETTINGS = "readSessionExecutionSettings"
private const val WRITE_CHUNK_BYTES = 4096
}
private val closed = AtomicBoolean(false)
private var bridgeFd: ParcelFileDescriptor? = null
private var input: DataInputStream? = null
private var output: DataOutputStream? = null
private val executionSettingsStore = SessionExecutionSettingsStore(context)
private val serveThread = thread(
start = false,
name = "AgentSessionBridge-$sessionId",
) {
serveLoop()
}
fun start() {
serveThread.start()
}
override fun close() {
if (!closed.compareAndSet(false, true)) {
return
}
runCatching { input?.close() }
runCatching { output?.close() }
runCatching { bridgeFd?.close() }
serveThread.interrupt()
}
private fun serveLoop() {
try {
val fd = agentManager.openSessionBridge(sessionId)
bridgeFd = fd
input = DataInputStream(BufferedInputStream(FileInputStream(fd.fileDescriptor)))
output = DataOutputStream(BufferedOutputStream(FileOutputStream(fd.fileDescriptor)))
Log.i(TAG, "Opened framework session bridge for $sessionId")
while (!closed.get()) {
val request = try {
readMessage(input ?: break)
} catch (_: EOFException) {
return
}
val response = handleRequest(request)
writeMessage(output ?: break, response)
}
} catch (err: Exception) {
if (!closed.get() && !isExpectedSessionShutdown(err)) {
Log.w(TAG, "Session bridge failed for $sessionId", err)
}
} finally {
runningBridges.remove(sessionId, this)
close()
}
}
private fun isExpectedSessionShutdown(err: Exception): Boolean {
return err is IllegalStateException
&& err.message?.contains("No active Genie runtime for session") == true
}
private fun handleRequest(request: JSONObject): JSONObject {
val requestId = request.optString("requestId")
return runCatching {
when (request.optString("method")) {
METHOD_GET_RUNTIME_STATUS -> {
val status = AgentCodexAppServerClient.readRuntimeStatus(context)
JSONObject()
.put("requestId", requestId)
.put("ok", true)
.put(
"runtimeStatus",
JSONObject()
.put("authenticated", status.authenticated)
.put("accountEmail", status.accountEmail)
.put("clientCount", status.clientCount)
.put("modelProviderId", status.modelProviderId)
.put("configuredModel", status.configuredModel)
.put("effectiveModel", status.effectiveModel)
.put("upstreamBaseUrl", status.upstreamBaseUrl)
.put("frameworkResponsesPath", status.frameworkResponsesPath),
)
}
METHOD_READ_INSTALLED_AGENTS_FILE -> {
val codexHome = File(context.filesDir, "codex-home")
HostedCodexConfig.installBundledAgentsFile(context, codexHome)
JSONObject()
.put("requestId", requestId)
.put("ok", true)
.put("agentsMarkdown", HostedCodexConfig.readInstalledAgentsMarkdown(codexHome))
}
METHOD_READ_SESSION_EXECUTION_SETTINGS -> {
JSONObject()
.put("requestId", requestId)
.put("ok", true)
.put("executionSettings", executionSettingsStore.toJson(sessionId))
}
else -> {
JSONObject()
.put("requestId", requestId)
.put("ok", false)
.put("error", "Unsupported bridge method: ${request.optString("method")}")
}
}
}.getOrElse { err ->
JSONObject()
.put("requestId", requestId)
.put("ok", false)
.put("error", err.message ?: err::class.java.simpleName)
}
}
private fun readMessage(input: DataInputStream): JSONObject {
val size = input.readInt()
if (size <= 0) {
throw IOException("Invalid session bridge message length: $size")
}
val payload = ByteArray(size)
input.readFully(payload)
return JSONObject(payload.toString(StandardCharsets.UTF_8))
}
private fun writeMessage(
output: DataOutputStream,
message: JSONObject,
) {
val payload = message.toString().toByteArray(StandardCharsets.UTF_8)
output.writeInt(payload.size)
output.flush()
var offset = 0
while (offset < payload.size) {
val chunkSize = minOf(WRITE_CHUNK_BYTES, payload.size - offset)
output.write(payload, offset, chunkSize)
output.flush()
offset += chunkSize
}
}
}
}

View File

@@ -1,797 +0,0 @@
package com.openai.codex.agent
import android.app.agent.AgentManager
import android.app.agent.AgentSessionEvent
import android.app.agent.AgentSessionInfo
import android.content.Context
import android.os.Binder
import android.os.Process
import android.util.Log
import com.openai.codex.bridge.DetachedTargetCompat
import com.openai.codex.bridge.FrameworkSessionTransportCompat
import com.openai.codex.bridge.SessionExecutionSettings
import java.util.concurrent.Executor
class AgentSessionController(context: Context) {
companion object {
private const val TAG = "AgentSessionController"
private const val BRIDGE_REQUEST_PREFIX = "__codex_bridge__ "
private const val BRIDGE_RESPONSE_PREFIX = "__codex_bridge_result__ "
private const val DIAGNOSTIC_NOT_LOADED = "Diagnostics not loaded."
private const val MAX_TIMELINE_EVENTS = 12
private const val PREFERRED_GENIE_PACKAGE = "com.openai.codex.genie"
private const val QUESTION_ANSWER_RETRY_COUNT = 10
private const val QUESTION_ANSWER_RETRY_DELAY_MS = 50L
}
private val appContext = context.applicationContext
private val agentManager = appContext.getSystemService(AgentManager::class.java)
private val presentationPolicyStore = SessionPresentationPolicyStore(context)
private val executionSettingsStore = SessionExecutionSettingsStore(context)
fun isAvailable(): Boolean = agentManager != null
fun canStartSessionForTarget(packageName: String): Boolean {
val manager = agentManager ?: return false
return manager.canStartSessionForTarget(packageName, currentUserId())
}
fun registerSessionListener(
executor: Executor,
listener: AgentManager.SessionListener,
): Boolean {
val manager = agentManager ?: return false
manager.registerSessionListener(currentUserId(), executor, listener)
return true
}
fun unregisterSessionListener(listener: AgentManager.SessionListener) {
agentManager?.unregisterSessionListener(listener)
}
fun registerSessionUiLease(parentSessionId: String, token: Binder) {
agentManager?.registerSessionUiLease(parentSessionId, token)
}
fun unregisterSessionUiLease(parentSessionId: String, token: Binder) {
agentManager?.unregisterSessionUiLease(parentSessionId, token)
}
fun acknowledgeSessionUi(parentSessionId: String) {
val manager = agentManager ?: return
val token = Binder()
runCatching {
manager.registerSessionUiLease(parentSessionId, token)
}
runCatching {
manager.unregisterSessionUiLease(parentSessionId, token)
}
}
fun loadSnapshot(focusedSessionId: String?): AgentSnapshot {
val manager = agentManager ?: return AgentSnapshot.unavailable
val roleHolders = manager.getGenieRoleHolders(currentUserId())
val selectedGeniePackage = selectGeniePackage(roleHolders)
val sessions = manager.getSessions(currentUserId())
presentationPolicyStore.prunePolicies(sessions.map { it.sessionId }.toSet())
executionSettingsStore.pruneSettings(sessions.map { it.sessionId }.toSet())
var sessionDetails = sessions.map { session ->
val targetRuntime = DetachedTargetCompat.getTargetRuntime(session)
AgentSessionDetails(
sessionId = session.sessionId,
parentSessionId = session.parentSessionId,
targetPackage = session.targetPackage,
anchor = session.anchor,
state = session.state,
stateLabel = stateToString(session.state),
targetPresentation = session.targetPresentation,
targetPresentationLabel = targetPresentationToString(session.targetPresentation),
targetRuntime = targetRuntime.value,
targetRuntimeLabel = targetRuntime.label,
targetDetached = session.isTargetDetached,
requiredFinalPresentationPolicy = presentationPolicyStore.getPolicy(session.sessionId),
latestQuestion = null,
latestResult = null,
latestError = null,
latestTrace = null,
timeline = DIAGNOSTIC_NOT_LOADED,
)
}
val selectedSessionId = chooseSelectedSession(sessionDetails, focusedSessionId)?.sessionId
val parentSessionId = selectedSessionId?.let { selectedId ->
findParentSession(sessionDetails, sessionDetails.firstOrNull { it.sessionId == selectedId })?.sessionId
}
val diagnosticSessionIds = linkedSetOf<String>().apply {
parentSessionId?.let(::add)
selectedSessionId?.let(::add)
}
val diagnosticsBySessionId = diagnosticSessionIds.associateWith { sessionId ->
loadSessionDiagnostics(manager, sessionId)
}
sessionDetails = sessionDetails.map { session ->
diagnosticsBySessionId[session.sessionId]?.let(session::withDiagnostics) ?: session
}
sessionDetails = deriveDirectParentUiState(sessionDetails)
val selectedSession = chooseSelectedSession(sessionDetails, focusedSessionId)
val parentSession = findParentSession(sessionDetails, selectedSession)
val relatedSessions = if (parentSession == null) {
selectedSession?.let(::listOf) ?: emptyList()
} else {
sessionDetails.filter { session ->
session.sessionId == parentSession.sessionId ||
session.parentSessionId == parentSession.sessionId
}.sortedWith(compareBy<AgentSessionDetails> { it.parentSessionId != null }.thenBy { it.sessionId })
}
return AgentSnapshot(
available = true,
roleHolders = roleHolders,
selectedGeniePackage = selectedGeniePackage,
sessions = sessionDetails,
selectedSession = selectedSession,
parentSession = parentSession,
relatedSessions = relatedSessions,
)
}
fun startDirectSession(
plan: AgentDelegationPlan,
allowDetachedMode: Boolean,
executionSettings: SessionExecutionSettings = SessionExecutionSettings.default,
): SessionStartResult {
val pendingSession = createPendingDirectSession(
objective = plan.originalObjective,
executionSettings = executionSettings,
)
return startDirectSessionChildren(
parentSessionId = pendingSession.parentSessionId,
geniePackage = pendingSession.geniePackage,
plan = plan,
allowDetachedMode = allowDetachedMode,
executionSettings = executionSettings,
cancelParentOnFailure = true,
)
}
fun createPendingDirectSession(
objective: String,
executionSettings: SessionExecutionSettings = SessionExecutionSettings.default,
): PendingDirectSessionStart {
val manager = requireAgentManager()
val geniePackage = selectGeniePackage(manager.getGenieRoleHolders(currentUserId()))
?: throw IllegalStateException("No GENIE role holder configured")
val parentSession = manager.createDirectSession(currentUserId())
try {
executionSettingsStore.saveSettings(parentSession.sessionId, executionSettings)
manager.publishTrace(
parentSession.sessionId,
"Planning Codex direct session for objective: $objective",
)
manager.updateSessionState(parentSession.sessionId, AgentSessionInfo.STATE_RUNNING)
return PendingDirectSessionStart(
parentSessionId = parentSession.sessionId,
geniePackage = geniePackage,
)
} catch (err: RuntimeException) {
runCatching { manager.cancelSession(parentSession.sessionId) }
executionSettingsStore.removeSettings(parentSession.sessionId)
throw err
}
}
fun startDirectSessionChildren(
parentSessionId: String,
geniePackage: String,
plan: AgentDelegationPlan,
allowDetachedMode: Boolean,
executionSettings: SessionExecutionSettings = SessionExecutionSettings.default,
cancelParentOnFailure: Boolean = false,
): SessionStartResult {
val manager = requireAgentManager()
requireActiveDirectParentSession(manager, parentSessionId)
val detachedPolicyTargets = plan.targets.filter { it.finalPresentationPolicy.requiresDetachedMode() }
check(allowDetachedMode || detachedPolicyTargets.isEmpty()) {
"Detached final presentation requires detached mode for ${detachedPolicyTargets.joinToString(", ") { it.packageName }}"
}
val childSessionIds = mutableListOf<String>()
try {
manager.publishTrace(
parentSessionId,
"Starting Codex direct session for objective: ${plan.originalObjective}",
)
plan.rationale?.let { rationale ->
manager.publishTrace(parentSessionId, "Planning rationale: $rationale")
}
plan.targets.forEach { target ->
requireActiveDirectParentSession(manager, parentSessionId)
val childSession = manager.createChildSession(parentSessionId, target.packageName)
childSessionIds += childSession.sessionId
presentationPolicyStore.savePolicy(childSession.sessionId, target.finalPresentationPolicy)
executionSettingsStore.saveSettings(childSession.sessionId, executionSettings)
provisionSessionNetworkConfig(childSession.sessionId)
manager.publishTrace(
parentSessionId,
"Created child session ${childSession.sessionId} for ${target.packageName} with required final presentation ${target.finalPresentationPolicy.wireValue}.",
)
requireActiveDirectParentSession(manager, parentSessionId)
manager.startGenieSession(
childSession.sessionId,
geniePackage,
buildDelegatedPrompt(target),
allowDetachedMode,
)
}
return SessionStartResult(
parentSessionId = parentSessionId,
childSessionIds = childSessionIds,
plannedTargets = plan.targets.map(AgentDelegationTarget::packageName),
geniePackage = geniePackage,
anchor = AgentSessionInfo.ANCHOR_AGENT,
)
} catch (err: RuntimeException) {
childSessionIds.forEach { childSessionId ->
runCatching { manager.cancelSession(childSessionId) }
presentationPolicyStore.removePolicy(childSessionId)
executionSettingsStore.removeSettings(childSessionId)
}
if (cancelParentOnFailure) {
runCatching { manager.cancelSession(parentSessionId) }
executionSettingsStore.removeSettings(parentSessionId)
}
throw err
}
}
fun startHomeSession(
targetPackage: String,
prompt: String,
allowDetachedMode: Boolean,
finalPresentationPolicy: SessionFinalPresentationPolicy,
executionSettings: SessionExecutionSettings = SessionExecutionSettings.default,
): SessionStartResult {
val manager = requireAgentManager()
check(canStartSessionForTarget(targetPackage)) {
"Target package $targetPackage is not eligible for session start"
}
val geniePackage = selectGeniePackage(manager.getGenieRoleHolders(currentUserId()))
?: throw IllegalStateException("No GENIE role holder configured")
val session = manager.createAppScopedSession(targetPackage, currentUserId())
presentationPolicyStore.savePolicy(session.sessionId, finalPresentationPolicy)
executionSettingsStore.saveSettings(session.sessionId, executionSettings)
try {
provisionSessionNetworkConfig(session.sessionId)
manager.publishTrace(
session.sessionId,
"Starting Codex app-scoped session for $targetPackage with required final presentation ${finalPresentationPolicy.wireValue}.",
)
manager.startGenieSession(
session.sessionId,
geniePackage,
buildDelegatedPrompt(
AgentDelegationTarget(
packageName = targetPackage,
objective = prompt,
finalPresentationPolicy = finalPresentationPolicy,
),
),
allowDetachedMode,
)
return SessionStartResult(
parentSessionId = session.sessionId,
childSessionIds = listOf(session.sessionId),
plannedTargets = listOf(targetPackage),
geniePackage = geniePackage,
anchor = AgentSessionInfo.ANCHOR_HOME,
)
} catch (err: RuntimeException) {
presentationPolicyStore.removePolicy(session.sessionId)
executionSettingsStore.removeSettings(session.sessionId)
runCatching { manager.cancelSession(session.sessionId) }
throw err
}
}
fun startExistingHomeSession(
sessionId: String,
targetPackage: String,
prompt: String,
allowDetachedMode: Boolean,
finalPresentationPolicy: SessionFinalPresentationPolicy,
executionSettings: SessionExecutionSettings = SessionExecutionSettings.default,
): SessionStartResult {
val manager = requireAgentManager()
check(canStartSessionForTarget(targetPackage)) {
"Target package $targetPackage is not eligible for session start"
}
val geniePackage = selectGeniePackage(manager.getGenieRoleHolders(currentUserId()))
?: throw IllegalStateException("No GENIE role holder configured")
presentationPolicyStore.savePolicy(sessionId, finalPresentationPolicy)
executionSettingsStore.saveSettings(sessionId, executionSettings)
try {
provisionSessionNetworkConfig(sessionId)
manager.publishTrace(
sessionId,
"Starting Codex app-scoped session for $targetPackage with required final presentation ${finalPresentationPolicy.wireValue}.",
)
manager.startGenieSession(
sessionId,
geniePackage,
buildDelegatedPrompt(
AgentDelegationTarget(
packageName = targetPackage,
objective = prompt,
finalPresentationPolicy = finalPresentationPolicy,
),
),
allowDetachedMode,
)
return SessionStartResult(
parentSessionId = sessionId,
childSessionIds = listOf(sessionId),
plannedTargets = listOf(targetPackage),
geniePackage = geniePackage,
anchor = AgentSessionInfo.ANCHOR_HOME,
)
} catch (err: RuntimeException) {
presentationPolicyStore.removePolicy(sessionId)
executionSettingsStore.removeSettings(sessionId)
throw err
}
}
fun continueDirectSessionInPlace(
parentSessionId: String,
target: AgentDelegationTarget,
executionSettings: SessionExecutionSettings = SessionExecutionSettings.default,
): SessionStartResult {
val manager = requireAgentManager()
check(canStartSessionForTarget(target.packageName)) {
"Target package ${target.packageName} is not eligible for session continuation"
}
val geniePackage = selectGeniePackage(manager.getGenieRoleHolders(currentUserId()))
?: throw IllegalStateException("No GENIE role holder configured")
executionSettingsStore.saveSettings(parentSessionId, executionSettings)
Log.i(TAG, "Continuing AGENT session $parentSessionId with target ${target.packageName}")
manager.publishTrace(
parentSessionId,
"Continuing Codex direct session for ${target.packageName} with required final presentation ${target.finalPresentationPolicy.wireValue}.",
)
val childSession = manager.createChildSession(parentSessionId, target.packageName)
AgentSessionBridgeServer.ensureStarted(appContext, manager, childSession.sessionId)
presentationPolicyStore.savePolicy(childSession.sessionId, target.finalPresentationPolicy)
executionSettingsStore.saveSettings(childSession.sessionId, executionSettings)
provisionSessionNetworkConfig(childSession.sessionId)
manager.startGenieSession(
childSession.sessionId,
geniePackage,
buildDelegatedPrompt(target),
/* allowDetachedMode = */ true,
)
return SessionStartResult(
parentSessionId = parentSessionId,
childSessionIds = listOf(childSession.sessionId),
plannedTargets = listOf(target.packageName),
geniePackage = geniePackage,
anchor = AgentSessionInfo.ANCHOR_AGENT,
)
}
fun executionSettingsForSession(sessionId: String): SessionExecutionSettings {
return executionSettingsStore.getSettings(sessionId)
}
fun answerQuestion(sessionId: String, answer: String, parentSessionId: String?) {
val manager = requireAgentManager()
repeat(QUESTION_ANSWER_RETRY_COUNT) { attempt ->
runCatching {
manager.answerQuestion(sessionId, answer)
}.onSuccess {
if (parentSessionId != null) {
manager.publishTrace(parentSessionId, "Answered question for $sessionId: $answer")
}
return
}.onFailure { err ->
if (attempt == QUESTION_ANSWER_RETRY_COUNT - 1 || !shouldRetryAnswerQuestion(sessionId, err)) {
throw err
}
Thread.sleep(QUESTION_ANSWER_RETRY_DELAY_MS)
}
}
}
fun isSessionWaitingForUser(sessionId: String): Boolean {
val manager = agentManager ?: return false
return manager.getSessions(currentUserId()).any { session ->
session.sessionId == sessionId &&
session.state == AgentSessionInfo.STATE_WAITING_FOR_USER
}
}
fun attachTarget(sessionId: String) {
requireAgentManager().attachTarget(sessionId)
}
fun cancelSession(sessionId: String) {
requireAgentManager().cancelSession(sessionId)
}
fun failDirectSession(
sessionId: String,
message: String,
) {
val manager = requireAgentManager()
manager.publishError(sessionId, message)
manager.updateSessionState(sessionId, AgentSessionInfo.STATE_FAILED)
}
fun isTerminalSession(sessionId: String): Boolean {
val manager = agentManager ?: return true
val session = manager.getSessions(currentUserId()).firstOrNull { it.sessionId == sessionId } ?: return true
return isTerminalState(session.state)
}
fun cancelActiveSessions(): CancelActiveSessionsResult {
val manager = requireAgentManager()
val activeSessions = manager.getSessions(currentUserId())
.filterNot { isTerminalState(it.state) }
.sortedWith(
compareByDescending<AgentSessionInfo> { it.parentSessionId != null }
.thenBy { it.sessionId },
)
val cancelledSessionIds = mutableListOf<String>()
val failedSessionIds = mutableMapOf<String, String>()
activeSessions.forEach { session ->
runCatching {
manager.cancelSession(session.sessionId)
}.onSuccess {
cancelledSessionIds += session.sessionId
}.onFailure { err ->
failedSessionIds[session.sessionId] = err.message ?: err::class.java.simpleName
}
}
return CancelActiveSessionsResult(
cancelledSessionIds = cancelledSessionIds,
failedSessionIds = failedSessionIds,
)
}
private fun requireAgentManager(): AgentManager {
return checkNotNull(agentManager) { "AgentManager unavailable" }
}
private fun provisionSessionNetworkConfig(sessionId: String) {
val manager = requireAgentManager()
FrameworkSessionTransportCompat.setSessionNetworkConfig(
agentManager = manager,
sessionId = sessionId,
config = AgentResponsesProxy.buildFrameworkSessionNetworkConfig(
context = appContext,
upstreamBaseUrl = "provider-default",
),
)
Log.i(TAG, "Configured framework-owned /responses transport for $sessionId")
}
private fun requireActiveDirectParentSession(
manager: AgentManager,
parentSessionId: String,
) {
val parentSession = manager.getSessions(currentUserId()).firstOrNull { session ->
session.sessionId == parentSessionId
} ?: throw IllegalStateException("Parent session $parentSessionId is no longer available")
check(isDirectParentSession(parentSession)) {
"Session $parentSessionId is not an active direct parent session"
}
check(!isTerminalState(parentSession.state)) {
"Parent session $parentSessionId is no longer active"
}
}
private fun shouldRetryAnswerQuestion(
sessionId: String,
err: Throwable,
): Boolean {
return err.message?.contains("not waiting for user input", ignoreCase = true) == true ||
!isSessionWaitingForUser(sessionId)
}
private fun chooseSelectedSession(
sessions: List<AgentSessionDetails>,
focusedSessionId: String?,
): AgentSessionDetails? {
val sessionsById = sessions.associateBy(AgentSessionDetails::sessionId)
val focusedSession = focusedSessionId?.let(sessionsById::get)
if (focusedSession != null) {
if (focusedSession.parentSessionId != null) {
return focusedSession
}
val childCandidate = sessions.firstOrNull { session ->
session.parentSessionId == focusedSession.sessionId &&
session.state == AgentSessionInfo.STATE_WAITING_FOR_USER
} ?: sessions.firstOrNull { session ->
session.parentSessionId == focusedSession.sessionId &&
!isTerminalState(session.state)
}
val latestChild = sessions.lastOrNull { session ->
session.parentSessionId == focusedSession.sessionId
}
return childCandidate ?: latestChild ?: focusedSession
}
return sessions.firstOrNull { session ->
session.parentSessionId != null &&
session.state == AgentSessionInfo.STATE_WAITING_FOR_USER
} ?: sessions.firstOrNull { session ->
session.parentSessionId != null && !isTerminalState(session.state)
} ?: sessions.firstOrNull(::isDirectParentSession) ?: sessions.firstOrNull()
}
private fun findParentSession(
sessions: List<AgentSessionDetails>,
selectedSession: AgentSessionDetails?,
): AgentSessionDetails? {
if (selectedSession == null) {
return null
}
if (selectedSession.parentSessionId == null) {
return if (isDirectParentSession(selectedSession)) {
selectedSession
} else {
null
}
}
return sessions.firstOrNull { it.sessionId == selectedSession.parentSessionId }
}
private fun selectGeniePackage(roleHolders: List<String>): String? {
return when {
roleHolders.contains(PREFERRED_GENIE_PACKAGE) -> PREFERRED_GENIE_PACKAGE
else -> roleHolders.firstOrNull()
}
}
private fun deriveDirectParentUiState(sessions: List<AgentSessionDetails>): List<AgentSessionDetails> {
val childrenByParent = sessions
.filter { it.parentSessionId != null }
.groupBy { it.parentSessionId }
return sessions.map { session ->
if (!isDirectParentSession(session)) {
return@map session
}
val childSessions = childrenByParent[session.sessionId].orEmpty()
if (childSessions.isEmpty()) {
return@map session
}
val rollup = AgentParentSessionAggregator.rollup(
childSessions.map { childSession ->
ParentSessionChildSummary(
sessionId = childSession.sessionId,
targetPackage = childSession.targetPackage,
state = childSession.state,
targetPresentation = childSession.targetPresentation,
requiredFinalPresentationPolicy = childSession.requiredFinalPresentationPolicy,
latestResult = childSession.latestResult,
latestError = childSession.latestError,
)
},
)
val isRollupTerminal = isTerminalState(rollup.state)
session.copy(
state = rollup.state,
stateLabel = stateToString(rollup.state),
latestResult = rollup.resultMessage ?: session.latestResult.takeIf { isRollupTerminal },
latestError = rollup.errorMessage ?: session.latestError.takeIf { isRollupTerminal },
latestTrace = when (rollup.state) {
AgentSessionInfo.STATE_RUNNING -> "Child session running."
AgentSessionInfo.STATE_WAITING_FOR_USER -> "Child session waiting for user input."
AgentSessionInfo.STATE_QUEUED -> "Child session queued."
else -> session.latestTrace
},
)
}
}
private fun buildDelegatedPrompt(target: AgentDelegationTarget): String {
return buildString {
appendLine(target.objective)
appendLine()
appendLine("Required final target presentation: ${target.finalPresentationPolicy.wireValue}")
append(target.finalPresentationPolicy.promptGuidance())
}.trim()
}
private fun findLastEventMessage(events: List<AgentSessionEvent>, type: Int): String? {
for (index in events.indices.reversed()) {
val event = events[index]
if (event.type == type && event.message != null) {
return normalizeEventMessage(event.message)
}
}
return null
}
private fun loadSessionDiagnostics(manager: AgentManager, sessionId: String): SessionDiagnostics {
val events = manager.getSessionEvents(sessionId)
return SessionDiagnostics(
latestQuestion = findLastEventMessage(events, AgentSessionEvent.TYPE_QUESTION),
latestResult = findLastEventMessage(events, AgentSessionEvent.TYPE_RESULT),
latestError = findLastEventMessage(events, AgentSessionEvent.TYPE_ERROR),
latestTrace = findLastEventMessage(events, AgentSessionEvent.TYPE_TRACE),
timeline = renderTimeline(events),
)
}
private fun renderTimeline(events: List<AgentSessionEvent>): String {
if (events.isEmpty()) {
return "No framework events yet."
}
return events.takeLast(MAX_TIMELINE_EVENTS).joinToString("\n") { event ->
"${eventTypeToString(event.type)}: ${normalizeEventMessage(event.message).orEmpty()}"
}
}
private fun normalizeEventMessage(message: String?): String? {
val trimmed = message?.trim()?.takeIf(String::isNotEmpty) ?: return null
if (trimmed.startsWith(BRIDGE_REQUEST_PREFIX)) {
return summarizeBridgeRequest(trimmed)
}
if (trimmed.startsWith(BRIDGE_RESPONSE_PREFIX)) {
return summarizeBridgeResponse(trimmed)
}
return trimmed
}
private fun summarizeBridgeRequest(message: String): String {
val request = runCatching {
org.json.JSONObject(message.removePrefix(BRIDGE_REQUEST_PREFIX))
}.getOrNull()
val method = request?.optString("method")?.ifEmpty { "unknown" } ?: "unknown"
val requestId = request?.optString("requestId")?.takeIf(String::isNotBlank)
return buildString {
append("Bridge request: ")
append(method)
requestId?.let { append(" (#$it)") }
}
}
private fun summarizeBridgeResponse(message: String): String {
val response = runCatching {
org.json.JSONObject(message.removePrefix(BRIDGE_RESPONSE_PREFIX))
}.getOrNull()
val requestId = response?.optString("requestId")?.takeIf(String::isNotBlank)
val statusCode = response?.optJSONObject("httpResponse")?.optInt("statusCode")
val ok = response?.optBoolean("ok")
return buildString {
append("Bridge response")
requestId?.let { append(" (#$it)") }
if (statusCode != null) {
append(": HTTP $statusCode")
} else if (ok != null) {
append(": ")
append(if (ok) "ok" else "error")
}
}
}
private fun eventTypeToString(type: Int): String {
return when (type) {
AgentSessionEvent.TYPE_TRACE -> "Trace"
AgentSessionEvent.TYPE_QUESTION -> "Question"
AgentSessionEvent.TYPE_RESULT -> "Result"
AgentSessionEvent.TYPE_ERROR -> "Error"
AgentSessionEvent.TYPE_POLICY -> "Policy"
AgentSessionEvent.TYPE_DETACHED_ACTION -> "DetachedAction"
AgentSessionEvent.TYPE_ANSWER -> "Answer"
else -> "Event($type)"
}
}
private fun isDirectParentSession(session: AgentSessionDetails): Boolean {
return session.anchor == AgentSessionInfo.ANCHOR_AGENT &&
session.parentSessionId == null &&
session.targetPackage == null
}
private fun isDirectParentSession(session: AgentSessionInfo): Boolean {
return session.anchor == AgentSessionInfo.ANCHOR_AGENT &&
session.parentSessionId == null &&
session.targetPackage == null
}
private fun isTerminalState(state: Int): Boolean {
return state == AgentSessionInfo.STATE_COMPLETED ||
state == AgentSessionInfo.STATE_CANCELLED ||
state == AgentSessionInfo.STATE_FAILED
}
private fun stateToString(state: Int): String {
return when (state) {
AgentSessionInfo.STATE_CREATED -> "CREATED"
AgentSessionInfo.STATE_RUNNING -> "RUNNING"
AgentSessionInfo.STATE_WAITING_FOR_USER -> "WAITING_FOR_USER"
AgentSessionInfo.STATE_QUEUED -> "QUEUED"
AgentSessionInfo.STATE_COMPLETED -> "COMPLETED"
AgentSessionInfo.STATE_CANCELLED -> "CANCELLED"
AgentSessionInfo.STATE_FAILED -> "FAILED"
else -> state.toString()
}
}
private fun currentUserId(): Int = Process.myUid() / 100000
}
data class AgentSnapshot(
val available: Boolean,
val roleHolders: List<String>,
val selectedGeniePackage: String?,
val sessions: List<AgentSessionDetails>,
val selectedSession: AgentSessionDetails?,
val parentSession: AgentSessionDetails?,
val relatedSessions: List<AgentSessionDetails>,
) {
companion object {
val unavailable = AgentSnapshot(
available = false,
roleHolders = emptyList(),
selectedGeniePackage = null,
sessions = emptyList(),
selectedSession = null,
parentSession = null,
relatedSessions = emptyList(),
)
}
}
data class AgentSessionDetails(
val sessionId: String,
val parentSessionId: String?,
val targetPackage: String?,
val anchor: Int,
val state: Int,
val stateLabel: String,
val targetPresentation: Int,
val targetPresentationLabel: String,
val targetRuntime: Int?,
val targetRuntimeLabel: String,
val targetDetached: Boolean,
val requiredFinalPresentationPolicy: SessionFinalPresentationPolicy?,
val latestQuestion: String?,
val latestResult: String?,
val latestError: String?,
val latestTrace: String?,
val timeline: String,
) {
fun withDiagnostics(diagnostics: SessionDiagnostics): AgentSessionDetails {
return copy(
latestQuestion = diagnostics.latestQuestion,
latestResult = diagnostics.latestResult,
latestError = diagnostics.latestError,
latestTrace = diagnostics.latestTrace,
timeline = diagnostics.timeline,
)
}
}
data class SessionDiagnostics(
val latestQuestion: String?,
val latestResult: String?,
val latestError: String?,
val latestTrace: String?,
val timeline: String,
)
data class SessionStartResult(
val parentSessionId: String,
val childSessionIds: List<String>,
val plannedTargets: List<String>,
val geniePackage: String,
val anchor: Int,
)
data class PendingDirectSessionStart(
val parentSessionId: String,
val geniePackage: String,
)
data class CancelActiveSessionsResult(
val cancelledSessionIds: List<String>,
val failedSessionIds: Map<String, String>,
)

View File

@@ -1,173 +0,0 @@
package com.openai.codex.agent
import android.app.agent.AgentSessionInfo
import android.content.Context
import com.openai.codex.bridge.SessionExecutionSettings
import kotlin.concurrent.thread
import org.json.JSONArray
import org.json.JSONObject
data class LaunchSessionRequest(
val prompt: String,
val targetPackage: String?,
val model: String?,
val reasoningEffort: String?,
val existingSessionId: String? = null,
)
object AgentSessionLauncher {
fun startSessionAsync(
context: Context,
request: LaunchSessionRequest,
sessionController: AgentSessionController,
requestUserInputHandler: ((JSONArray) -> JSONObject)? = null,
): SessionStartResult {
val executionSettings = SessionExecutionSettings(
model = request.model?.trim()?.ifEmpty { null },
reasoningEffort = request.reasoningEffort?.trim()?.ifEmpty { null },
)
val targetPackage = request.targetPackage?.trim()?.ifEmpty { null }
val existingSessionId = request.existingSessionId?.trim()?.ifEmpty { null }
if (targetPackage != null || existingSessionId != null) {
return startSession(
context = context,
request = request,
sessionController = sessionController,
requestUserInputHandler = requestUserInputHandler,
)
}
val pendingSession = sessionController.createPendingDirectSession(
objective = request.prompt,
executionSettings = executionSettings,
)
val applicationContext = context.applicationContext
thread(name = "CodexAgentPlanner-${pendingSession.parentSessionId}") {
runCatching {
AgentTaskPlanner.planSession(
context = applicationContext,
userObjective = request.prompt,
executionSettings = executionSettings,
sessionController = sessionController,
requestUserInputHandler = null,
frameworkSessionId = pendingSession.parentSessionId,
)
}.onFailure { err ->
if (!sessionController.isTerminalSession(pendingSession.parentSessionId)) {
sessionController.failDirectSession(
pendingSession.parentSessionId,
"Planning failed: ${err.message ?: err::class.java.simpleName}",
)
}
}.onSuccess { plannedRequest ->
if (!sessionController.isTerminalSession(pendingSession.parentSessionId)) {
runCatching {
sessionController.startDirectSessionChildren(
parentSessionId = pendingSession.parentSessionId,
geniePackage = pendingSession.geniePackage,
plan = plannedRequest.plan,
allowDetachedMode = plannedRequest.allowDetachedMode,
executionSettings = executionSettings,
)
}.onFailure { err ->
if (!sessionController.isTerminalSession(pendingSession.parentSessionId)) {
sessionController.failDirectSession(
pendingSession.parentSessionId,
"Failed to start planned child session: ${err.message ?: err::class.java.simpleName}",
)
}
}
}
}
}
return SessionStartResult(
parentSessionId = pendingSession.parentSessionId,
childSessionIds = emptyList(),
plannedTargets = emptyList(),
geniePackage = pendingSession.geniePackage,
anchor = AgentSessionInfo.ANCHOR_AGENT,
)
}
fun startSession(
context: Context,
request: LaunchSessionRequest,
sessionController: AgentSessionController,
requestUserInputHandler: ((JSONArray) -> JSONObject)? = null,
): SessionStartResult {
val executionSettings = SessionExecutionSettings(
model = request.model?.trim()?.ifEmpty { null },
reasoningEffort = request.reasoningEffort?.trim()?.ifEmpty { null },
)
val targetPackage = request.targetPackage?.trim()?.ifEmpty { null }
val existingSessionId = request.existingSessionId?.trim()?.ifEmpty { null }
return if (targetPackage == null) {
check(existingSessionId == null) {
"Existing HOME sessions require a target package"
}
AgentTaskPlanner.startSession(
context = context,
userObjective = request.prompt,
targetPackageOverride = null,
allowDetachedMode = true,
executionSettings = executionSettings,
sessionController = sessionController,
requestUserInputHandler = requestUserInputHandler,
)
} else {
if (existingSessionId != null) {
sessionController.startExistingHomeSession(
sessionId = existingSessionId,
targetPackage = targetPackage,
prompt = request.prompt,
allowDetachedMode = true,
finalPresentationPolicy = SessionFinalPresentationPolicy.AGENT_CHOICE,
executionSettings = executionSettings,
)
} else {
sessionController.startHomeSession(
targetPackage = targetPackage,
prompt = request.prompt,
allowDetachedMode = true,
finalPresentationPolicy = SessionFinalPresentationPolicy.AGENT_CHOICE,
executionSettings = executionSettings,
)
}
}
}
fun continueSessionInPlace(
sourceTopLevelSession: AgentSessionDetails,
selectedSession: AgentSessionDetails,
prompt: String,
sessionController: AgentSessionController,
): SessionStartResult {
val executionSettings = sessionController.executionSettingsForSession(sourceTopLevelSession.sessionId)
return when (sourceTopLevelSession.anchor) {
AgentSessionInfo.ANCHOR_HOME -> {
throw UnsupportedOperationException(
"In-place continuation is not supported for app-scoped HOME sessions on the current framework",
)
}
else -> {
val targetPackage = checkNotNull(selectedSession.targetPackage) {
"Select a target child session to continue"
}
sessionController.continueDirectSessionInPlace(
parentSessionId = sourceTopLevelSession.sessionId,
target = AgentDelegationTarget(
packageName = targetPackage,
objective = SessionContinuationPromptBuilder.build(
sourceTopLevelSession = sourceTopLevelSession,
selectedSession = selectedSession,
prompt = prompt,
),
finalPresentationPolicy = selectedSession.requiredFinalPresentationPolicy
?: SessionFinalPresentationPolicy.AGENT_CHOICE,
),
executionSettings = executionSettings,
)
}
}
}
}

View File

@@ -1,298 +0,0 @@
package com.openai.codex.agent
import android.content.Context
import android.util.Log
import com.openai.codex.bridge.SessionExecutionSettings
import java.io.IOException
import org.json.JSONArray
import org.json.JSONObject
import org.json.JSONTokener
data class AgentDelegationTarget(
val packageName: String,
val objective: String,
val finalPresentationPolicy: SessionFinalPresentationPolicy,
)
data class AgentDelegationPlan(
val originalObjective: String,
val targets: List<AgentDelegationTarget>,
val rationale: String?,
val usedOverride: Boolean,
) {
val primaryTargetPackage: String
get() = targets.first().packageName
}
object AgentTaskPlanner {
private const val TAG = "AgentTaskPlanner"
private const val PLANNER_ATTEMPTS = 2
private const val PLANNER_REQUEST_TIMEOUT_MS = 90_000L
private val PLANNER_INSTRUCTIONS =
"""
You are Codex acting as the Android Agent orchestrator.
The user interacts only with the Agent. Decide which installed Android packages should receive delegated Genie sessions.
Use the standard Android shell tools already available in this runtime, such as `cmd package`, `pm`, and `am`, to inspect installed packages and resolve the correct targets.
Return exactly one JSON object and nothing else. Do not wrap it in markdown fences.
JSON schema:
{
"targets": [
{
"packageName": "installed.package",
"objective": "free-form delegated objective for the child Genie",
"finalPresentationPolicy": "ATTACHED | DETACHED_HIDDEN | DETACHED_SHOWN | AGENT_CHOICE"
}
],
"reason": "short rationale",
"allowDetachedMode": true
}
Rules:
- Choose the fewest packages needed to complete the request.
- `targets` must be non-empty.
- Each delegated `objective` should be written for the child Genie, not the user.
- Each target must include `finalPresentationPolicy`.
- Use `ATTACHED` when the user wants the target left on the main screen or explicitly visible to them.
- Use `DETACHED_SHOWN` when the target should remain visible but stay detached.
- Use `DETACHED_HIDDEN` when the target should complete in the background without remaining visible.
- Use `AGENT_CHOICE` only when the final presentation state does not matter.
- Stop after at most 6 shell commands.
- Start from the installed package list, then narrow to the most likely candidates.
- Prefer direct package-manager commands over broad shell pipelines.
- Verify each chosen package by inspecting focused query-activities or resolve-activity output before returning it.
- Only choose packages that directly own the requested app behavior. Never choose helper packages such as `com.android.shell`, `com.android.systemui`, or the Codex Agent/Genie packages unless the user explicitly asked for them.
- If the user objective already names a specific installed package, use it directly after verification.
- `pm list packages PACKAGE_NAME` alone is not sufficient verification.
- Prefer focused verification commands such as `pm list packages clock`, `cmd package query-activities --brief -p PACKAGE -a android.intent.action.MAIN`, and `cmd package resolve-activity --brief -a RELEVANT_ACTION PACKAGE`.
- Do not enumerate every launcher activity on the device. Query specific candidate packages instead.
""".trimIndent()
private val PLANNER_OUTPUT_SCHEMA =
JSONObject()
.put("type", "object")
.put(
"properties",
JSONObject()
.put(
"targets",
JSONObject()
.put("type", "array")
.put("minItems", 1)
.put(
"items",
JSONObject()
.put("type", "object")
.put(
"properties",
JSONObject()
.put("packageName", JSONObject().put("type", "string"))
.put("objective", JSONObject().put("type", "string"))
.put(
"finalPresentationPolicy",
JSONObject()
.put("type", "string")
.put(
"enum",
JSONArray()
.put(SessionFinalPresentationPolicy.ATTACHED.wireValue)
.put(SessionFinalPresentationPolicy.DETACHED_HIDDEN.wireValue)
.put(SessionFinalPresentationPolicy.DETACHED_SHOWN.wireValue)
.put(SessionFinalPresentationPolicy.AGENT_CHOICE.wireValue),
),
),
)
.put(
"required",
JSONArray()
.put("packageName")
.put("objective")
.put("finalPresentationPolicy"),
)
.put("additionalProperties", false),
),
)
.put("reason", JSONObject().put("type", "string"))
.put("allowDetachedMode", JSONObject().put("type", "boolean")),
)
.put("required", JSONArray().put("targets").put("reason").put("allowDetachedMode"))
.put("additionalProperties", false)
fun startSession(
context: Context,
userObjective: String,
targetPackageOverride: String?,
allowDetachedMode: Boolean,
finalPresentationPolicyOverride: SessionFinalPresentationPolicy? = null,
executionSettings: SessionExecutionSettings = SessionExecutionSettings.default,
sessionController: AgentSessionController,
requestUserInputHandler: ((JSONArray) -> JSONObject)? = null,
): SessionStartResult {
if (!targetPackageOverride.isNullOrBlank()) {
Log.i(TAG, "Using explicit target override $targetPackageOverride")
return sessionController.startDirectSession(
plan = AgentDelegationPlan(
originalObjective = userObjective,
targets = listOf(
AgentDelegationTarget(
packageName = targetPackageOverride,
objective = userObjective,
finalPresentationPolicy =
finalPresentationPolicyOverride ?: SessionFinalPresentationPolicy.AGENT_CHOICE,
),
),
rationale = "Using explicit target package override.",
usedOverride = true,
),
allowDetachedMode = allowDetachedMode,
)
}
val pendingSession = sessionController.createPendingDirectSession(
objective = userObjective,
executionSettings = executionSettings,
)
val sessionStartResult = try {
val request = planSession(
context = context,
userObjective = userObjective,
executionSettings = executionSettings,
sessionController = sessionController,
requestUserInputHandler = requestUserInputHandler,
frameworkSessionId = pendingSession.parentSessionId,
)
sessionController.startDirectSessionChildren(
parentSessionId = pendingSession.parentSessionId,
geniePackage = pendingSession.geniePackage,
plan = request.plan,
allowDetachedMode = allowDetachedMode && request.allowDetachedMode,
executionSettings = executionSettings,
cancelParentOnFailure = true,
)
} catch (err: IOException) {
runCatching { sessionController.cancelSession(pendingSession.parentSessionId) }
throw err
} catch (err: RuntimeException) {
runCatching { sessionController.cancelSession(pendingSession.parentSessionId) }
throw err
}
Log.i(TAG, "Planner sessionStartResult=$sessionStartResult")
return sessionStartResult
}
fun planSession(
context: Context,
userObjective: String,
executionSettings: SessionExecutionSettings = SessionExecutionSettings.default,
sessionController: AgentSessionController,
requestUserInputHandler: ((JSONArray) -> JSONObject)? = null,
frameworkSessionId: String? = null,
): AgentFrameworkToolBridge.StartDirectSessionRequest {
Log.i(TAG, "Planning Agent session for objective=${userObjective.take(160)}")
val isEligibleTargetPackage = { packageName: String ->
sessionController.canStartSessionForTarget(packageName) &&
packageName !in setOf(
"com.android.shell",
"com.android.systemui",
"com.openai.codex.agent",
"com.openai.codex.genie",
)
}
var previousPlannerResponse: String? = null
var plannerRequest: AgentFrameworkToolBridge.StartDirectSessionRequest? = null
var lastPlannerError: IOException? = null
for (attemptIndex in 0 until PLANNER_ATTEMPTS) {
val plannerResponse = AgentPlannerRuntimeManager.requestText(
context = context,
instructions = PLANNER_INSTRUCTIONS,
prompt = buildPlannerPrompt(
userObjective = userObjective,
previousPlannerResponse = previousPlannerResponse,
previousPlannerError = lastPlannerError?.message,
),
outputSchema = PLANNER_OUTPUT_SCHEMA,
requestUserInputHandler = requestUserInputHandler,
executionSettings = executionSettings,
requestTimeoutMs = PLANNER_REQUEST_TIMEOUT_MS,
frameworkSessionId = frameworkSessionId,
)
Log.i(TAG, "Planner response=${plannerResponse.take(400)}")
previousPlannerResponse = plannerResponse
val parsedRequest = runCatching {
parsePlannerResponse(
responseText = plannerResponse,
userObjective = userObjective,
isEligibleTargetPackage = isEligibleTargetPackage,
)
}.getOrElse { err ->
if (err is IOException && attemptIndex < PLANNER_ATTEMPTS - 1) {
Log.w(TAG, "Planner response rejected: ${err.message}")
lastPlannerError = err
continue
}
throw err
}
plannerRequest = parsedRequest
break
}
return plannerRequest ?: throw (lastPlannerError
?: IOException("Planner did not return a valid session plan"))
}
private fun buildPlannerPrompt(
userObjective: String,
previousPlannerResponse: String?,
previousPlannerError: String?,
): String {
return buildString {
appendLine("User objective:")
appendLine(userObjective)
if (!previousPlannerError.isNullOrBlank()) {
appendLine()
appendLine("Previous candidate plan was rejected by host validation:")
appendLine(previousPlannerError)
appendLine("Choose a different installed target package and verify it with focused package commands.")
}
if (!previousPlannerResponse.isNullOrBlank()) {
appendLine()
appendLine("Previous invalid planner response:")
appendLine(previousPlannerResponse)
}
}.trim()
}
internal fun parsePlannerResponse(
responseText: String,
userObjective: String,
isEligibleTargetPackage: (String) -> Boolean,
): AgentFrameworkToolBridge.StartDirectSessionRequest {
val plannerJson = extractPlannerJson(responseText)
return AgentFrameworkToolBridge.parseStartDirectSessionArguments(
arguments = plannerJson,
userObjective = userObjective,
isEligibleTargetPackage = isEligibleTargetPackage,
)
}
private fun extractPlannerJson(responseText: String): JSONObject {
val trimmed = responseText.trim()
parseJsonObject(trimmed)?.let { return it }
val unfenced = trimmed
.removePrefix("```json")
.removePrefix("```")
.removeSuffix("```")
.trim()
parseJsonObject(unfenced)?.let { return it }
val firstBrace = trimmed.indexOf('{')
val lastBrace = trimmed.lastIndexOf('}')
if (firstBrace >= 0 && lastBrace > firstBrace) {
parseJsonObject(trimmed.substring(firstBrace, lastBrace + 1))?.let { return it }
}
throw IOException("Planner did not return a valid JSON object")
}
private fun parseJsonObject(text: String): JSONObject? {
return runCatching {
val tokener = JSONTokener(text)
val value = tokener.nextValue()
value as? JSONObject
}.getOrNull()
}
}

View File

@@ -1,116 +0,0 @@
package com.openai.codex.agent
import android.app.Activity
import android.app.AlertDialog
import android.widget.EditText
import java.io.IOException
import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.AtomicReference
import org.json.JSONArray
import org.json.JSONObject
object AgentUserInputPrompter {
fun promptForAnswers(
activity: Activity,
questions: JSONArray,
): JSONObject {
val latch = CountDownLatch(1)
val answerText = AtomicReference("")
val error = AtomicReference<IOException?>(null)
activity.runOnUiThread {
val input = EditText(activity).apply {
minLines = 4
maxLines = 8
setSingleLine(false)
setText("")
hint = "Type your answer here"
}
AlertDialog.Builder(activity)
.setTitle("Codex needs input")
.setMessage(renderQuestions(questions))
.setView(input)
.setCancelable(false)
.setPositiveButton("Submit") { dialog, _ ->
answerText.set(input.text?.toString().orEmpty())
dialog.dismiss()
latch.countDown()
}
.setNegativeButton("Cancel") { dialog, _ ->
error.set(IOException("User cancelled Agent input"))
dialog.dismiss()
latch.countDown()
}
.show()
}
latch.await()
error.get()?.let { throw it }
return JSONObject().put("answers", buildQuestionAnswers(questions, answerText.get()))
}
internal fun renderQuestions(questions: JSONArray): String {
if (questions.length() == 0) {
return "Codex requested input but did not provide a question."
}
val rendered = buildString {
for (index in 0 until questions.length()) {
val question = questions.optJSONObject(index) ?: continue
if (length > 0) {
append("\n\n")
}
val header = question.optString("header").takeIf(String::isNotBlank)
if (header != null) {
append(header)
append(":\n")
}
append(question.optString("question"))
val options = question.optJSONArray("options")
if (options != null && options.length() > 0) {
append("\nOptions:")
for (optionIndex in 0 until options.length()) {
val option = options.optJSONObject(optionIndex) ?: continue
append("\n- ")
append(option.optString("label"))
val description = option.optString("description")
if (description.isNotBlank()) {
append(": ")
append(description)
}
}
}
}
}
return if (questions.length() == 1) {
rendered
} else {
"$rendered\n\nReply with one answer per question, separated by a blank line."
}
}
internal fun buildQuestionAnswers(
questions: JSONArray,
answer: String,
): JSONObject {
val splitAnswers = answer
.split(Regex("\\n\\s*\\n"))
.map(String::trim)
.filter(String::isNotEmpty)
val answersJson = JSONObject()
for (index in 0 until questions.length()) {
val question = questions.optJSONObject(index) ?: continue
val questionId = question.optString("id")
if (questionId.isBlank()) {
continue
}
val responseText = splitAnswers.getOrNull(index)
?: if (index == 0) answer.trim() else ""
answersJson.put(
questionId,
JSONObject().put(
"answers",
JSONArray().put(responseText),
),
)
}
return answersJson
}
}

View File

@@ -1,19 +0,0 @@
package com.openai.codex.agent
import android.content.Context
object AppLabelResolver {
fun loadAppLabel(
context: Context,
packageName: String?,
): String {
if (packageName.isNullOrBlank()) {
return "Agent"
}
val pm = context.packageManager
return runCatching {
val applicationInfo = pm.getApplicationInfo(packageName, 0)
pm.getApplicationLabel(applicationInfo)?.toString().orEmpty().ifBlank { packageName }
}.getOrDefault(packageName)
}
}

View File

@@ -1,473 +0,0 @@
package com.openai.codex.agent
import android.app.agent.AgentManager
import android.app.agent.AgentService
import android.app.agent.AgentSessionEvent
import android.app.agent.AgentSessionInfo
import android.os.Process
import android.util.Log
import java.io.IOException
import kotlin.concurrent.thread
import org.json.JSONObject
class CodexAgentService : AgentService() {
companion object {
private const val TAG = "CodexAgentService"
private const val BRIDGE_REQUEST_PREFIX = "__codex_bridge__ "
private const val BRIDGE_RESPONSE_PREFIX = "__codex_bridge_result__ "
private const val BRIDGE_METHOD_GET_RUNTIME_STATUS = "getRuntimeStatus"
private const val AUTO_ANSWER_ESCALATE_PREFIX = "ESCALATE:"
private const val AUTO_ANSWER_INSTRUCTIONS =
"You are Codex acting as the Android Agent supervising a Genie execution. If you can answer the current Genie question from the available session context, call the framework session tool `android.framework.sessions.answer_question` exactly once with a short free-form answer. You may inspect current framework state with `android.framework.sessions.list`. If user input is required, do not call any framework tool. Instead reply with `ESCALATE: ` followed by the exact question the Agent should ask the user."
private const val MAX_AUTO_ANSWER_CONTEXT_CHARS = 800
private val handledGenieQuestions = java.util.concurrent.ConcurrentHashMap.newKeySet<String>()
private val pendingGenieQuestions = java.util.concurrent.ConcurrentHashMap.newKeySet<String>()
private val pendingQuestionLoads = java.util.concurrent.ConcurrentHashMap.newKeySet<String>()
private val handledBridgeRequests = java.util.concurrent.ConcurrentHashMap.newKeySet<String>()
private val pendingParentRollups = java.util.concurrent.ConcurrentHashMap.newKeySet<String>()
}
private sealed class AutoAnswerResult {
data object Answered : AutoAnswerResult()
data class Escalate(
val question: String,
) : AutoAnswerResult()
}
private val agentManager by lazy { getSystemService(AgentManager::class.java) }
private val sessionController by lazy { AgentSessionController(this) }
private val presentationPolicyStore by lazy { SessionPresentationPolicyStore(this) }
override fun onCreate() {
super.onCreate()
}
override fun onSessionChanged(session: AgentSessionInfo) {
Log.i(TAG, "onSessionChanged $session")
maybeRollUpParentSession(session)
agentManager?.let { manager ->
if (shouldServeSessionBridge(session)) {
AgentSessionBridgeServer.ensureStarted(this, manager, session.sessionId)
} else if (isTerminalSessionState(session.state)) {
AgentSessionBridgeServer.closeSession(session.sessionId)
}
}
if (session.state != AgentSessionInfo.STATE_WAITING_FOR_USER) {
AgentQuestionNotifier.cancel(this, session.sessionId)
return
}
if (!pendingQuestionLoads.add(session.sessionId)) {
return
}
thread(name = "CodexAgentQuestionLoad-${session.sessionId}") {
try {
handleWaitingSession(session)
} finally {
pendingQuestionLoads.remove(session.sessionId)
}
}
}
override fun onSessionRemoved(sessionId: String) {
Log.i(TAG, "onSessionRemoved sessionId=$sessionId")
AgentSessionBridgeServer.closeSession(sessionId)
AgentQuestionNotifier.cancel(this, sessionId)
presentationPolicyStore.removePolicy(sessionId)
handledGenieQuestions.removeIf { it.startsWith("$sessionId:") }
handledBridgeRequests.removeIf { it.startsWith("$sessionId:") }
pendingGenieQuestions.removeIf { it.startsWith("$sessionId:") }
}
private fun maybeRollUpParentSession(session: AgentSessionInfo) {
val parentSessionId = when {
!session.parentSessionId.isNullOrBlank() -> session.parentSessionId
isDirectParentSession(session) -> session.sessionId
else -> null
} ?: return
if (!pendingParentRollups.add(parentSessionId)) {
return
}
thread(name = "CodexAgentParentRollup-$parentSessionId") {
try {
runCatching {
rollUpParentSession(parentSessionId)
}.onFailure { err ->
Log.w(TAG, "Parent session roll-up failed for $parentSessionId", err)
}
} finally {
pendingParentRollups.remove(parentSessionId)
}
}
}
private fun rollUpParentSession(parentSessionId: String) {
val manager = agentManager ?: return
val sessions = manager.getSessions(currentUserId())
val parentSession = sessions.firstOrNull { it.sessionId == parentSessionId } ?: return
if (!isDirectParentSession(parentSession)) {
return
}
val childSessions = sessions.filter { it.parentSessionId == parentSessionId }
if (childSessions.isEmpty()) {
return
}
val rollup = AgentParentSessionAggregator.rollup(
childSessions.map { childSession ->
val events = manager.getSessionEvents(childSession.sessionId)
ParentSessionChildSummary(
sessionId = childSession.sessionId,
targetPackage = childSession.targetPackage,
state = childSession.state,
targetPresentation = childSession.targetPresentation,
requiredFinalPresentationPolicy = presentationPolicyStore.getPolicy(childSession.sessionId),
latestResult = findLastEventMessage(events, AgentSessionEvent.TYPE_RESULT),
latestError = findLastEventMessage(events, AgentSessionEvent.TYPE_ERROR),
)
},
)
rollup.sessionsToAttach.forEach { childSessionId ->
runCatching {
manager.attachTarget(childSessionId)
manager.publishTrace(
parentSessionId,
"Requested attach for $childSessionId to satisfy the required final presentation policy.",
)
}.onFailure { err ->
Log.w(TAG, "Failed to attach target for $childSessionId", err)
}
}
if (shouldUpdateParentSessionState(parentSession.state, rollup.state)) {
runCatching {
manager.updateSessionState(parentSessionId, rollup.state)
}.onFailure { err ->
Log.w(TAG, "Failed to update parent session state for $parentSessionId", err)
}
}
val parentEvents = if (rollup.resultMessage != null || rollup.errorMessage != null) {
manager.getSessionEvents(parentSessionId)
} else {
emptyList()
}
if (rollup.resultMessage != null && findLastEventMessage(parentEvents, AgentSessionEvent.TYPE_RESULT) != rollup.resultMessage) {
runCatching {
manager.publishResult(parentSessionId, rollup.resultMessage)
}.onFailure { err ->
Log.w(TAG, "Failed to publish parent result for $parentSessionId", err)
}
}
if (rollup.errorMessage != null && findLastEventMessage(parentEvents, AgentSessionEvent.TYPE_ERROR) != rollup.errorMessage) {
runCatching {
manager.publishError(parentSessionId, rollup.errorMessage)
}.onFailure { err ->
Log.w(TAG, "Failed to publish parent error for $parentSessionId", err)
}
}
}
private fun shouldServeSessionBridge(session: AgentSessionInfo): Boolean {
if (session.targetPackage.isNullOrBlank()) {
return false
}
return !isTerminalSessionState(session.state)
}
private fun shouldUpdateParentSessionState(
currentState: Int,
proposedState: Int,
): Boolean {
if (currentState == proposedState || isTerminalSessionState(currentState)) {
return false
}
if (
(currentState == AgentSessionInfo.STATE_RUNNING || currentState == AgentSessionInfo.STATE_WAITING_FOR_USER) &&
(proposedState == AgentSessionInfo.STATE_CREATED || proposedState == AgentSessionInfo.STATE_QUEUED)
) {
return false
}
return true
}
private fun isTerminalSessionState(state: Int): Boolean {
return when (state) {
AgentSessionInfo.STATE_COMPLETED,
AgentSessionInfo.STATE_CANCELLED,
AgentSessionInfo.STATE_FAILED,
-> true
else -> false
}
}
private fun handleWaitingSession(session: AgentSessionInfo) {
val manager = agentManager ?: return
val events = manager.getSessionEvents(session.sessionId)
val question = findLatestQuestion(events) ?: return
updateQuestionNotification(session, question)
maybeAutoAnswerGenieQuestion(session, question, events)
}
private fun maybeAutoAnswerGenieQuestion(
session: AgentSessionInfo,
question: String,
events: List<AgentSessionEvent>,
) {
val questionKey = genieQuestionKey(session.sessionId, question)
if (handledGenieQuestions.contains(questionKey) || !pendingGenieQuestions.add(questionKey)) {
return
}
thread(name = "CodexAgentAutoAnswer-${session.sessionId}") {
Log.i(TAG, "Attempting Agent auto-answer for ${session.sessionId}")
runCatching {
if (isBridgeQuestion(question)) {
answerBridgeQuestion(session, question)
handledGenieQuestions.add(questionKey)
AgentQuestionNotifier.cancel(this, session.sessionId)
Log.i(TAG, "Answered bridge question for ${session.sessionId}")
} else {
when (val result = requestGenieAutoAnswer(session, question, events)) {
AutoAnswerResult.Answered -> {
handledGenieQuestions.add(questionKey)
AgentQuestionNotifier.cancel(this, session.sessionId)
Log.i(TAG, "Auto-answered Genie question for ${session.sessionId}")
}
is AutoAnswerResult.Escalate -> {
if (sessionController.isSessionWaitingForUser(session.sessionId)) {
AgentQuestionNotifier.showQuestion(
context = this,
sessionId = session.sessionId,
targetPackage = session.targetPackage,
question = result.question,
)
}
}
}
}
}.onFailure { err ->
Log.i(TAG, "Agent auto-answer unavailable for ${session.sessionId}: ${err.message}")
if (!isBridgeQuestion(question) && sessionController.isSessionWaitingForUser(session.sessionId)) {
AgentQuestionNotifier.showQuestion(
context = this,
sessionId = session.sessionId,
targetPackage = session.targetPackage,
question = question,
)
}
}
pendingGenieQuestions.remove(questionKey)
}
}
private fun updateQuestionNotification(session: AgentSessionInfo, question: String) {
if (question.isBlank()) {
AgentQuestionNotifier.cancel(this, session.sessionId)
return
}
if (isBridgeQuestion(question)) {
AgentQuestionNotifier.cancel(this, session.sessionId)
return
}
if (pendingGenieQuestions.contains(genieQuestionKey(session.sessionId, question))) {
return
}
AgentQuestionNotifier.showQuestion(
context = this,
sessionId = session.sessionId,
targetPackage = session.targetPackage,
question = question,
)
}
private fun requestGenieAutoAnswer(
session: AgentSessionInfo,
question: String,
events: List<AgentSessionEvent>,
): AutoAnswerResult {
val runtimeStatus = AgentCodexAppServerClient.readRuntimeStatus(this)
if (!runtimeStatus.authenticated) {
throw IOException("Agent runtime is not authenticated")
}
val frameworkToolBridge = AgentFrameworkToolBridge(this, sessionController)
var answered = false
val response = AgentCodexAppServerClient.requestText(
context = this,
instructions = AUTO_ANSWER_INSTRUCTIONS,
prompt = buildAutoAnswerPrompt(session, question, events),
dynamicTools = frameworkToolBridge.buildQuestionResolutionToolSpecs(),
toolCallHandler = { toolName, arguments ->
if (
toolName == AgentFrameworkToolBridge.ANSWER_QUESTION_TOOL &&
arguments.optString("sessionId").trim().isEmpty()
) {
arguments.put("sessionId", session.sessionId)
}
if (
toolName == AgentFrameworkToolBridge.ANSWER_QUESTION_TOOL &&
arguments.optString("parentSessionId").trim().isEmpty() &&
!session.parentSessionId.isNullOrBlank()
) {
arguments.put("parentSessionId", session.parentSessionId)
}
val toolResult = frameworkToolBridge.handleToolCall(
toolName = toolName,
arguments = arguments,
userObjective = question,
focusedSessionId = session.sessionId,
)
if (toolName == AgentFrameworkToolBridge.ANSWER_QUESTION_TOOL) {
answered = true
}
toolResult
},
frameworkSessionId = session.sessionId,
).trim()
if (answered) {
return AutoAnswerResult.Answered
}
if (response.startsWith(AUTO_ANSWER_ESCALATE_PREFIX, ignoreCase = true)) {
val escalateQuestion = response.substringAfter(':').trim().ifEmpty { question }
return AutoAnswerResult.Escalate(escalateQuestion)
}
if (response.isNotBlank()) {
sessionController.answerQuestion(session.sessionId, response, session.parentSessionId)
return AutoAnswerResult.Answered
}
throw IOException("Agent runtime did not return an answer")
}
private fun buildAutoAnswerPrompt(
session: AgentSessionInfo,
question: String,
events: List<AgentSessionEvent>,
): String {
val recentContext = renderRecentContext(events)
return """
Target package: ${session.targetPackage ?: "unknown"}
Current Genie question: $question
Recent session context:
$recentContext
""".trimIndent()
}
private fun renderRecentContext(events: List<AgentSessionEvent>): String {
val context = events
.takeLast(6)
.joinToString("\n") { event ->
"${eventTypeToString(event.type)}: ${event.message ?: ""}"
}
if (context.length <= MAX_AUTO_ANSWER_CONTEXT_CHARS) {
return context.ifBlank { "No prior Genie context." }
}
return context.takeLast(MAX_AUTO_ANSWER_CONTEXT_CHARS)
}
private fun findLatestQuestion(events: List<AgentSessionEvent>): String? {
return events.lastOrNull { event ->
event.type == AgentSessionEvent.TYPE_QUESTION &&
!event.message.isNullOrBlank()
}?.message
}
private fun findLastEventMessage(events: List<AgentSessionEvent>, type: Int): String? {
return events.lastOrNull { event ->
event.type == type && !event.message.isNullOrBlank()
}?.message
}
private fun isBridgeQuestion(question: String): Boolean {
return question.startsWith(BRIDGE_REQUEST_PREFIX)
}
private fun answerBridgeQuestion(
session: AgentSessionInfo,
question: String,
) {
val request = JSONObject(question.removePrefix(BRIDGE_REQUEST_PREFIX))
val requestId = request.optString("requestId")
if (requestId.isNotBlank()) {
val bridgeRequestKey = "${session.sessionId}:$requestId"
if (!handledBridgeRequests.add(bridgeRequestKey)) {
Log.i(
TAG,
"Skipping duplicate bridge question method=${request.optString("method")} requestId=$requestId session=${session.sessionId}",
)
return
}
}
Log.i(
TAG,
"Answering bridge question method=${request.optString("method")} requestId=$requestId session=${session.sessionId}",
)
val response: JSONObject = runCatching {
when (request.optString("method")) {
BRIDGE_METHOD_GET_RUNTIME_STATUS -> {
val status = AgentCodexAppServerClient.readRuntimeStatus(this)
JSONObject()
.put("requestId", requestId)
.put("ok", true)
.put(
"runtimeStatus",
JSONObject()
.put("authenticated", status.authenticated)
.put("accountEmail", status.accountEmail)
.put("clientCount", status.clientCount)
.put("modelProviderId", status.modelProviderId)
.put("configuredModel", status.configuredModel)
.put("effectiveModel", status.effectiveModel)
.put("upstreamBaseUrl", status.upstreamBaseUrl)
.put("frameworkResponsesPath", status.frameworkResponsesPath),
)
}
else -> JSONObject()
.put("requestId", requestId)
.put("ok", false)
.put("error", "Unsupported bridge method: ${request.optString("method")}")
}
}.getOrElse { err ->
JSONObject()
.put("requestId", requestId)
.put("ok", false)
.put("error", err.message ?: err::class.java.simpleName)
}
sessionController.answerQuestion(
session.sessionId,
BRIDGE_RESPONSE_PREFIX + response.toString(),
session.parentSessionId,
)
}
private fun eventTypeToString(type: Int): String {
return when (type) {
AgentSessionEvent.TYPE_TRACE -> "Trace"
AgentSessionEvent.TYPE_QUESTION -> "Question"
AgentSessionEvent.TYPE_RESULT -> "Result"
AgentSessionEvent.TYPE_ERROR -> "Error"
AgentSessionEvent.TYPE_POLICY -> "Policy"
AgentSessionEvent.TYPE_DETACHED_ACTION -> "DetachedAction"
AgentSessionEvent.TYPE_ANSWER -> "Answer"
else -> "Event($type)"
}
}
private fun genieQuestionKey(sessionId: String, question: String): String {
if (isBridgeQuestion(question)) {
val requestId = runCatching {
JSONObject(question.removePrefix(BRIDGE_REQUEST_PREFIX)).optString("requestId").trim()
}.getOrNull()
if (!requestId.isNullOrEmpty()) {
return "$sessionId:bridge:$requestId"
}
}
return "$sessionId:$question"
}
private fun isDirectParentSession(session: AgentSessionInfo): Boolean {
return session.anchor == AgentSessionInfo.ANCHOR_AGENT &&
session.parentSessionId == null &&
session.targetPackage == null
}
private fun currentUserId(): Int {
return Process.myUid() / 100000
}
}

View File

@@ -1,15 +0,0 @@
package com.openai.codex.agent
import android.content.Context
import java.io.File
import java.io.IOException
object CodexCliBinaryLocator {
fun resolve(context: Context): File {
val binary = File(context.applicationInfo.nativeLibraryDir, "libcodex.so")
if (!binary.exists()) {
throw IOException("codex binary missing at ${binary.absolutePath}")
}
return binary
}
}

View File

@@ -1,599 +0,0 @@
package com.openai.codex.agent
import android.app.Activity
import android.app.agent.AgentManager
import android.app.agent.AgentSessionInfo
import android.content.Context
import android.content.Intent
import android.graphics.drawable.Drawable
import android.os.Bundle
import android.os.Binder
import android.util.Log
import android.view.LayoutInflater
import android.view.View
import android.view.ViewGroup
import android.widget.ArrayAdapter
import android.widget.Button
import android.widget.EditText
import android.widget.ImageView
import android.widget.Spinner
import android.widget.TextView
import android.widget.Toast
import com.openai.codex.bridge.SessionExecutionSettings
import kotlin.concurrent.thread
class CreateSessionActivity : Activity() {
companion object {
private const val TAG = "CodexCreateSession"
const val ACTION_CREATE_SESSION = "com.openai.codex.agent.action.CREATE_SESSION"
const val EXTRA_INITIAL_PROMPT = "com.openai.codex.agent.extra.INITIAL_PROMPT"
private const val EXTRA_EXISTING_SESSION_ID = "existingSessionId"
private const val EXTRA_TARGET_PACKAGE = "targetPackage"
private const val EXTRA_LOCK_TARGET = "lockTarget"
private const val EXTRA_INITIAL_MODEL = "initialModel"
private const val EXTRA_INITIAL_REASONING_EFFORT = "initialReasoningEffort"
private const val DEFAULT_MODEL = "gpt-5.3-codex-spark"
private const val DEFAULT_REASONING_EFFORT = "low"
fun preferredInitialSettings(): SessionExecutionSettings {
return SessionExecutionSettings(
model = DEFAULT_MODEL,
reasoningEffort = DEFAULT_REASONING_EFFORT,
)
}
private fun mergedWithPreferredDefaults(settings: SessionExecutionSettings): SessionExecutionSettings {
val defaults = preferredInitialSettings()
return SessionExecutionSettings(
model = settings.model ?: defaults.model,
reasoningEffort = settings.reasoningEffort ?: defaults.reasoningEffort,
)
}
fun externalCreateSessionIntent(initialPrompt: String): Intent {
return Intent(ACTION_CREATE_SESSION).apply {
addCategory(Intent.CATEGORY_DEFAULT)
putExtra(EXTRA_INITIAL_PROMPT, initialPrompt)
}
}
fun newSessionIntent(
context: Context,
initialSettings: SessionExecutionSettings,
): Intent {
return Intent(context, CreateSessionActivity::class.java).apply {
putExtra(EXTRA_INITIAL_MODEL, initialSettings.model)
putExtra(EXTRA_INITIAL_REASONING_EFFORT, initialSettings.reasoningEffort)
addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
}
}
fun existingHomeSessionIntent(
context: Context,
sessionId: String,
targetPackage: String,
initialSettings: SessionExecutionSettings,
): Intent {
return newSessionIntent(context, initialSettings).apply {
putExtra(EXTRA_EXISTING_SESSION_ID, sessionId)
putExtra(EXTRA_TARGET_PACKAGE, targetPackage)
putExtra(EXTRA_LOCK_TARGET, true)
}
}
}
private val sessionController by lazy { AgentSessionController(this) }
private val sessionUiLeaseToken = Binder()
private var availableModels: List<AgentModelOption> = emptyList()
@Volatile
private var modelsRefreshInFlight = false
private val pendingModelCallbacks = mutableListOf<() -> Unit>()
private var existingSessionId: String? = null
private var leasedSessionId: String? = null
private var uiActive = false
private var selectedPackage: InstalledApp? = null
private var targetLocked = false
private lateinit var promptInput: EditText
private lateinit var packageSummary: TextView
private lateinit var packageButton: Button
private lateinit var clearPackageButton: Button
private lateinit var modelSpinner: Spinner
private lateinit var effortSpinner: Spinner
private lateinit var titleView: TextView
private lateinit var statusView: TextView
private lateinit var startButton: Button
private var selectedReasoningOptions = emptyList<AgentReasoningEffortOption>()
private var pendingEffortOverride: String? = null
private lateinit var effortLabelAdapter: ArrayAdapter<String>
private var initialSettings = preferredInitialSettings()
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_create_session)
setFinishOnTouchOutside(true)
bindViews()
loadInitialState()
refreshModelsIfNeeded(force = true)
}
override fun onNewIntent(intent: Intent) {
super.onNewIntent(intent)
setIntent(intent)
loadInitialState()
if (availableModels.isNotEmpty()) {
applyModelOptions()
}
}
override fun onResume() {
super.onResume()
uiActive = true
updateSessionUiLease(existingSessionId)
}
override fun onPause() {
uiActive = false
updateSessionUiLease(null)
super.onPause()
}
private fun bindViews() {
titleView = findViewById(R.id.create_session_title)
statusView = findViewById(R.id.create_session_status)
promptInput = findViewById(R.id.create_session_prompt)
packageSummary = findViewById(R.id.create_session_target_summary)
packageButton = findViewById(R.id.create_session_pick_target_button)
clearPackageButton = findViewById(R.id.create_session_clear_target_button)
modelSpinner = findViewById(R.id.create_session_model_spinner)
effortSpinner = findViewById(R.id.create_session_effort_spinner)
startButton = findViewById(R.id.create_session_start_button)
effortLabelAdapter = ArrayAdapter(
this,
android.R.layout.simple_spinner_item,
mutableListOf<String>(),
).also {
it.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item)
effortSpinner.adapter = it
}
modelSpinner.adapter = ArrayAdapter(
this,
android.R.layout.simple_spinner_item,
mutableListOf<String>(),
).also { it.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item) }
modelSpinner.onItemSelectedListener = SimpleItemSelectedListener {
updateEffortOptions(pendingEffortOverride)
pendingEffortOverride = null
}
packageButton.setOnClickListener {
showInstalledAppPicker { app ->
selectedPackage = app
updatePackageSummary()
}
}
clearPackageButton.setOnClickListener {
selectedPackage = null
updatePackageSummary()
}
findViewById<Button>(R.id.create_session_cancel_button).setOnClickListener {
cancelAndFinish()
}
startButton.setOnClickListener {
startSession()
}
updatePackageSummary()
}
private fun loadInitialState() {
updateSessionUiLease(null)
existingSessionId = null
selectedPackage = null
targetLocked = false
titleView.text = "New Session"
statusView.visibility = View.GONE
statusView.text = "Loading session…"
startButton.isEnabled = true
unlockTargetSelection()
updatePackageSummary()
existingSessionId = intent.getStringExtra(EXTRA_EXISTING_SESSION_ID)?.trim()?.ifEmpty { null }
initialSettings = mergedWithPreferredDefaults(
SessionExecutionSettings(
model = intent.getStringExtra(EXTRA_INITIAL_MODEL)?.trim()?.ifEmpty { null } ?: DEFAULT_MODEL,
reasoningEffort = intent.getStringExtra(EXTRA_INITIAL_REASONING_EFFORT)?.trim()?.ifEmpty { null }
?: DEFAULT_REASONING_EFFORT,
),
)
promptInput.setText(intent.getStringExtra(EXTRA_INITIAL_PROMPT).orEmpty())
promptInput.setSelection(promptInput.text.length)
val explicitTarget = intent.getStringExtra(EXTRA_TARGET_PACKAGE)?.trim()?.ifEmpty { null }
targetLocked = intent.getBooleanExtra(EXTRA_LOCK_TARGET, false)
if (explicitTarget != null) {
selectedPackage = InstalledAppCatalog.resolveInstalledApp(this, sessionController, explicitTarget)
titleView.text = "New Session"
updatePackageSummary()
if (targetLocked) {
lockTargetSelection()
}
if (uiActive) {
updateSessionUiLease(existingSessionId)
}
return
}
val incomingSessionId = intent.getStringExtra(AgentManager.EXTRA_SESSION_ID)?.trim()?.ifEmpty { null }
if (incomingSessionId != null) {
statusView.visibility = View.VISIBLE
statusView.text = "Loading session…"
startButton.isEnabled = false
thread {
val draftSession = runCatching {
findStandaloneHomeDraftSession(incomingSessionId)
}.getOrElse { err ->
Log.w(TAG, "Failed to inspect incoming session $incomingSessionId", err)
null
}
runOnUiThread {
if (draftSession == null) {
startActivity(
Intent(this, SessionDetailActivity::class.java)
.putExtra(SessionDetailActivity.EXTRA_SESSION_ID, incomingSessionId),
)
finish()
return@runOnUiThread
}
existingSessionId = draftSession.sessionId
selectedPackage = InstalledAppCatalog.resolveInstalledApp(
this,
sessionController,
checkNotNull(draftSession.targetPackage),
)
initialSettings = mergedWithPreferredDefaults(
sessionController.executionSettingsForSession(draftSession.sessionId),
)
targetLocked = true
titleView.text = "New Session"
updatePackageSummary()
lockTargetSelection()
statusView.visibility = View.GONE
startButton.isEnabled = true
if (uiActive) {
updateSessionUiLease(existingSessionId)
}
if (availableModels.isNotEmpty()) {
applyModelOptions()
}
}
}
}
}
private fun cancelAndFinish() {
val sessionId = existingSessionId
if (sessionId == null) {
finish()
return
}
startButton.isEnabled = false
thread {
runCatching {
sessionController.cancelSession(sessionId)
}.onFailure { err ->
runOnUiThread {
startButton.isEnabled = true
showToast("Failed to cancel session: ${err.message}")
}
}.onSuccess {
runOnUiThread {
finish()
}
}
}
}
private fun lockTargetSelection() {
packageButton.visibility = View.GONE
clearPackageButton.visibility = View.GONE
}
private fun unlockTargetSelection() {
packageButton.visibility = View.VISIBLE
clearPackageButton.visibility = View.VISIBLE
}
private fun startSession() {
val prompt = promptInput.text.toString().trim()
if (prompt.isEmpty()) {
promptInput.error = "Enter a prompt"
return
}
val targetPackage = selectedPackage?.packageName
if (existingSessionId != null && targetPackage == null) {
showToast("Missing target app for existing session")
return
}
startButton.isEnabled = false
thread {
runCatching {
AgentSessionLauncher.startSessionAsync(
context = this,
request = LaunchSessionRequest(
prompt = prompt,
targetPackage = targetPackage,
model = selectedModel().model,
reasoningEffort = selectedEffort(),
existingSessionId = existingSessionId,
),
sessionController = sessionController,
requestUserInputHandler = { questions ->
AgentUserInputPrompter.promptForAnswers(this, questions)
},
)
}.onFailure { err ->
runOnUiThread {
startButton.isEnabled = true
showToast("Failed to start session: ${err.message}")
}
}.onSuccess { result ->
runOnUiThread {
showToast("Started session")
setResult(RESULT_OK, Intent().putExtra(SessionDetailActivity.EXTRA_SESSION_ID, result.parentSessionId))
finish()
}
}
}
}
private fun refreshModelsIfNeeded(
force: Boolean,
onComplete: (() -> Unit)? = null,
) {
if (!force && availableModels.isNotEmpty()) {
onComplete?.invoke()
return
}
if (onComplete != null) {
synchronized(pendingModelCallbacks) {
pendingModelCallbacks += onComplete
}
}
if (modelsRefreshInFlight) {
return
}
modelsRefreshInFlight = true
thread {
try {
runCatching { AgentCodexAppServerClient.listModels(this) }
.onFailure { err ->
Log.w(TAG, "Failed to load model catalog", err)
}
.onSuccess { models ->
availableModels = models
}
} finally {
runOnUiThread {
if (availableModels.isNotEmpty()) {
applyModelOptions()
} else {
statusView.visibility = View.VISIBLE
statusView.text = "Failed to load model catalog."
}
}
modelsRefreshInFlight = false
val callbacks = synchronized(pendingModelCallbacks) {
pendingModelCallbacks.toList().also { pendingModelCallbacks.clear() }
}
callbacks.forEach { callback -> callback.invoke() }
}
}
}
private fun applyModelOptions() {
val models = availableModels.ifEmpty(::fallbackModels)
if (availableModels.isEmpty()) {
availableModels = models
}
val labels = models.map { model ->
if (model.description.isBlank()) {
model.displayName
} else {
"${model.displayName} (${model.description})"
}
}
val adapter = ArrayAdapter(
this,
android.R.layout.simple_spinner_item,
labels,
)
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item)
pendingEffortOverride = initialSettings.reasoningEffort
modelSpinner.adapter = adapter
val modelIndex = models.indexOfFirst { it.model == initialSettings.model }
.takeIf { it >= 0 } ?: models.indexOfFirst(AgentModelOption::isDefault)
.takeIf { it >= 0 } ?: 0
modelSpinner.setSelection(modelIndex, false)
updateEffortOptions(initialSettings.reasoningEffort)
statusView.visibility = View.GONE
}
private fun selectedModel(): AgentModelOption {
return availableModels[modelSpinner.selectedItemPosition.coerceIn(0, availableModels.lastIndex)]
}
private fun selectedEffort(): String? {
return selectedReasoningOptions.getOrNull(effortSpinner.selectedItemPosition)?.reasoningEffort
}
private fun updateEffortOptions(requestedEffort: String?) {
if (availableModels.isEmpty()) {
return
}
selectedReasoningOptions = selectedModel().supportedReasoningEfforts
val labels = selectedReasoningOptions.map { option ->
"${option.reasoningEffort}${option.description}"
}
effortLabelAdapter.clear()
effortLabelAdapter.addAll(labels)
effortLabelAdapter.notifyDataSetChanged()
val desiredEffort = requestedEffort ?: selectedModel().defaultReasoningEffort
val selectedIndex = selectedReasoningOptions.indexOfFirst { it.reasoningEffort == desiredEffort }
.takeIf { it >= 0 } ?: 0
effortSpinner.setSelection(selectedIndex, false)
}
private fun updatePackageSummary() {
val app = selectedPackage
if (app == null) {
packageSummary.text = "No target app selected. This will start an Agent-anchored session."
packageSummary.setCompoundDrawablesRelativeWithIntrinsicBounds(null, null, null, null)
return
}
packageSummary.text = "${app.label} (${app.packageName})"
packageSummary.setCompoundDrawablesRelativeWithIntrinsicBounds(
resizeIcon(app.icon),
null,
null,
null,
)
packageSummary.compoundDrawablePadding =
resources.getDimensionPixelSize(android.R.dimen.app_icon_size) / 4
}
private fun showInstalledAppPicker(onSelected: (InstalledApp) -> Unit) {
val apps = InstalledAppCatalog.listInstalledApps(this, sessionController)
if (apps.isEmpty()) {
android.app.AlertDialog.Builder(this)
.setMessage("No launchable target apps are available.")
.setPositiveButton(android.R.string.ok, null)
.show()
return
}
val adapter = object : ArrayAdapter<InstalledApp>(
this,
R.layout.list_item_installed_app,
apps,
) {
override fun getView(position: Int, convertView: View?, parent: ViewGroup): View {
return bindAppRow(position, convertView, parent)
}
override fun getDropDownView(position: Int, convertView: View?, parent: ViewGroup): View {
return bindAppRow(position, convertView, parent)
}
private fun bindAppRow(position: Int, convertView: View?, parent: ViewGroup): View {
val row = convertView ?: LayoutInflater.from(context)
.inflate(R.layout.list_item_installed_app, parent, false)
val app = getItem(position) ?: return row
val iconView = row.findViewById<ImageView>(R.id.installed_app_icon)
val titleView = row.findViewById<TextView>(R.id.installed_app_title)
val subtitleView = row.findViewById<TextView>(R.id.installed_app_subtitle)
iconView.setImageDrawable(app.icon ?: getDrawable(android.R.drawable.sym_def_app_icon))
titleView.text = app.label
subtitleView.text = if (app.eligibleTarget) {
app.packageName
} else {
"${app.packageName} — unavailable"
}
row.isEnabled = app.eligibleTarget
titleView.isEnabled = app.eligibleTarget
subtitleView.isEnabled = app.eligibleTarget
iconView.alpha = if (app.eligibleTarget) 1f else 0.5f
row.alpha = if (app.eligibleTarget) 1f else 0.6f
return row
}
}
val dialog = android.app.AlertDialog.Builder(this)
.setTitle("Choose app")
.setAdapter(adapter) { _, which ->
val app = apps[which]
if (!app.eligibleTarget) {
android.app.AlertDialog.Builder(this)
.setMessage(
"The current framework rejected ${app.packageName} as a target for Genie sessions on this device.",
)
.setPositiveButton(android.R.string.ok, null)
.show()
return@setAdapter
}
onSelected(app)
}
.setNegativeButton(android.R.string.cancel, null)
.create()
dialog.setOnShowListener {
dialog.listView?.isVerticalScrollBarEnabled = true
dialog.listView?.isScrollbarFadingEnabled = false
dialog.listView?.isFastScrollEnabled = true
dialog.listView?.scrollBarStyle = View.SCROLLBARS_INSIDE_INSET
}
dialog.show()
}
private fun findStandaloneHomeDraftSession(sessionId: String): AgentSessionDetails? {
val snapshot = sessionController.loadSnapshot(sessionId)
val session = snapshot.sessions.firstOrNull { it.sessionId == sessionId } ?: return null
val hasChildren = snapshot.sessions.any { it.parentSessionId == sessionId }
return session.takeIf {
it.anchor == AgentSessionInfo.ANCHOR_HOME &&
it.state == AgentSessionInfo.STATE_CREATED &&
!hasChildren &&
!it.targetPackage.isNullOrBlank()
}
}
private fun updateSessionUiLease(sessionId: String?) {
if (leasedSessionId == sessionId) {
return
}
leasedSessionId?.let { previous ->
runCatching {
sessionController.unregisterSessionUiLease(previous, sessionUiLeaseToken)
}
leasedSessionId = null
}
sessionId?.let { current ->
val registered = runCatching {
sessionController.registerSessionUiLease(current, sessionUiLeaseToken)
}
if (registered.isSuccess) {
leasedSessionId = current
}
}
}
private fun resizeIcon(icon: Drawable?): Drawable? {
val sizedIcon = icon?.constantState?.newDrawable()?.mutate() ?: return null
val iconSize = resources.getDimensionPixelSize(android.R.dimen.app_icon_size)
sizedIcon.setBounds(0, 0, iconSize, iconSize)
return sizedIcon
}
private fun fallbackModels(): List<AgentModelOption> {
return listOf(
AgentModelOption(
id = initialSettings.model ?: DEFAULT_MODEL,
model = initialSettings.model ?: DEFAULT_MODEL,
displayName = initialSettings.model ?: DEFAULT_MODEL,
description = "Current Agent runtime default",
supportedReasoningEfforts = listOf(
AgentReasoningEffortOption("minimal", "Fastest"),
AgentReasoningEffortOption("low", "Low"),
AgentReasoningEffortOption("medium", "Balanced"),
AgentReasoningEffortOption("high", "Deep"),
AgentReasoningEffortOption("xhigh", "Max"),
),
defaultReasoningEffort = initialSettings.reasoningEffort ?: DEFAULT_REASONING_EFFORT,
isDefault = true,
),
)
}
private fun showToast(message: String) {
runOnUiThread {
Toast.makeText(this, message, Toast.LENGTH_SHORT).show()
}
}
}

View File

@@ -1,33 +0,0 @@
package com.openai.codex.agent
import android.content.Context
class DismissedSessionStore(context: Context) {
companion object {
private const val PREFS_NAME = "dismissed_sessions"
}
private val prefs = context.applicationContext.getSharedPreferences(PREFS_NAME, Context.MODE_PRIVATE)
fun dismiss(sessionId: String) {
prefs.edit().putBoolean(sessionId, true).apply()
}
fun isDismissed(sessionId: String): Boolean {
return prefs.getBoolean(sessionId, false)
}
fun clearDismissed(sessionId: String) {
prefs.edit().remove(sessionId).apply()
}
fun prune(activeSessionIds: Set<String>) {
val keysToRemove = prefs.all.keys.filter { it !in activeSessionIds }
if (keysToRemove.isEmpty()) {
return
}
prefs.edit().apply {
keysToRemove.forEach(::remove)
}.apply()
}
}

View File

@@ -1,68 +0,0 @@
package com.openai.codex.agent
import android.content.Context
import android.content.Intent
import android.content.pm.PackageManager
import android.graphics.drawable.Drawable
data class InstalledApp(
val packageName: String,
val label: String,
val icon: Drawable?,
val eligibleTarget: Boolean,
)
object InstalledAppCatalog {
private val excludedPackages = setOf(
"com.openai.codex.agent",
"com.openai.codex.genie",
)
fun listInstalledApps(
context: Context,
sessionController: AgentSessionController,
): List<InstalledApp> {
val pm = context.packageManager
val launcherIntent = Intent(Intent.ACTION_MAIN)
.addCategory(Intent.CATEGORY_LAUNCHER)
val appsByPackage = linkedMapOf<String, InstalledApp>()
pm.queryIntentActivities(launcherIntent, 0).forEach { resolveInfo ->
val applicationInfo = resolveInfo.activityInfo?.applicationInfo ?: return@forEach
val packageName = applicationInfo.packageName.takeIf(String::isNotBlank) ?: return@forEach
if (packageName in excludedPackages) {
return@forEach
}
if (packageName in appsByPackage) {
return@forEach
}
val label = resolveInfo.loadLabel(pm)?.toString().orEmpty().ifBlank { packageName }
appsByPackage[packageName] = InstalledApp(
packageName = packageName,
label = label,
icon = resolveInfo.loadIcon(pm),
eligibleTarget = sessionController.canStartSessionForTarget(packageName),
)
}
return appsByPackage.values.sortedWith(
compareBy<InstalledApp>({ it.label.lowercase() }).thenBy { it.packageName },
)
}
fun resolveInstalledApp(
context: Context,
sessionController: AgentSessionController,
packageName: String,
): InstalledApp {
listInstalledApps(context, sessionController)
.firstOrNull { it.packageName == packageName }
?.let { return it }
val pm = context.packageManager
val applicationInfo = pm.getApplicationInfo(packageName, 0)
return InstalledApp(
packageName = packageName,
label = pm.getApplicationLabel(applicationInfo)?.toString().orEmpty().ifBlank { packageName },
icon = pm.getApplicationIcon(applicationInfo),
eligibleTarget = sessionController.canStartSessionForTarget(packageName),
)
}
}

View File

@@ -1,473 +0,0 @@
package com.openai.codex.agent
import android.Manifest
import android.app.Activity
import android.app.agent.AgentManager
import android.app.agent.AgentSessionInfo
import android.content.Intent
import android.content.pm.PackageManager
import android.net.Uri
import android.os.Build
import android.os.Bundle
import android.util.Base64
import android.util.Log
import android.view.View
import android.widget.Button
import android.widget.ListView
import android.widget.TextView
import android.widget.Toast
import com.openai.codex.bridge.SessionExecutionSettings
import kotlin.concurrent.thread
class MainActivity : Activity() {
companion object {
private const val TAG = "CodexMainActivity"
private const val ACTION_DEBUG_START_AGENT_SESSION =
"com.openai.codex.agent.action.DEBUG_START_AGENT_SESSION"
private const val ACTION_DEBUG_CANCEL_ALL_AGENT_SESSIONS =
"com.openai.codex.agent.action.DEBUG_CANCEL_ALL_AGENT_SESSIONS"
private const val EXTRA_DEBUG_PROMPT = "prompt"
private const val EXTRA_DEBUG_PROMPT_BASE64 = "promptBase64"
private const val EXTRA_DEBUG_TARGET_PACKAGE = "targetPackage"
private const val EXTRA_DEBUG_FINAL_PRESENTATION_POLICY = "finalPresentationPolicy"
}
@Volatile
private var isAuthenticated = false
@Volatile
private var agentRefreshInFlight = false
@Volatile
private var latestAgentRuntimeStatus: AgentCodexAppServerClient.RuntimeStatus? = null
@Volatile
private var pendingAuthMessage: String? = null
private val agentSessionController by lazy { AgentSessionController(this) }
private val dismissedSessionStore by lazy { DismissedSessionStore(this) }
private val sessionListAdapter by lazy { TopLevelSessionListAdapter(this) }
private var latestSnapshot: AgentSnapshot = AgentSnapshot.unavailable
private val runtimeStatusListener = AgentCodexAppServerClient.RuntimeStatusListener { status ->
latestAgentRuntimeStatus = status
if (status != null) {
pendingAuthMessage = null
}
runOnUiThread {
updateAuthUi(renderAuthStatus(), status?.authenticated == true)
updateRuntimeStatusUi()
}
}
private val sessionListener = object : AgentManager.SessionListener {
override fun onSessionChanged(session: AgentSessionInfo) {
refreshAgentSessions()
}
override fun onSessionRemoved(sessionId: String, userId: Int) {
refreshAgentSessions()
}
}
private var sessionListenerRegistered = false
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
setupViews()
requestNotificationPermissionIfNeeded()
handleIncomingIntent(intent)
}
override fun onNewIntent(intent: Intent) {
super.onNewIntent(intent)
Log.i(TAG, "onNewIntent action=${intent.action}")
setIntent(intent)
handleIncomingIntent(intent)
}
override fun onResume() {
super.onResume()
registerSessionListenerIfNeeded()
AgentCodexAppServerClient.registerRuntimeStatusListener(runtimeStatusListener)
AgentCodexAppServerClient.refreshRuntimeStatusAsync(this, refreshToken = true)
refreshAgentSessions(force = true)
}
override fun onPause() {
AgentCodexAppServerClient.unregisterRuntimeStatusListener(runtimeStatusListener)
unregisterSessionListenerIfNeeded()
super.onPause()
}
private fun setupViews() {
findViewById<ListView>(R.id.session_list).adapter = sessionListAdapter
findViewById<ListView>(R.id.session_list).setOnItemClickListener { _, _, position, _ ->
sessionListAdapter.getItem(position)?.let { session ->
openSessionDetail(session.sessionId)
}
}
findViewById<Button>(R.id.create_session_button).setOnClickListener {
launchCreateSessionActivity()
}
findViewById<Button>(R.id.auth_action).setOnClickListener {
authAction()
}
findViewById<Button>(R.id.refresh_sessions_button).setOnClickListener {
refreshAgentSessions(force = true)
}
updateAuthUi("Agent auth: probing...", false)
updateRuntimeStatusUi()
updateSessionList(emptyList())
}
private fun handleIncomingIntent(intent: Intent?) {
val sessionId = intent?.getStringExtra(AgentManager.EXTRA_SESSION_ID)
if (!sessionId.isNullOrBlank()) {
openSessionDetail(sessionId)
return
}
if (shouldRouteLauncherIntentToActiveSession(intent)) {
routeLauncherIntentToActiveSession()
return
}
maybeHandleDebugIntent(intent)
}
private fun shouldRouteLauncherIntentToActiveSession(intent: Intent?): Boolean {
if (intent == null) {
return false
}
if (
intent.action == ACTION_DEBUG_CANCEL_ALL_AGENT_SESSIONS ||
intent.action == ACTION_DEBUG_START_AGENT_SESSION
) {
return false
}
return intent.action == Intent.ACTION_MAIN &&
intent.hasCategory(Intent.CATEGORY_LAUNCHER) &&
intent.getStringExtra(AgentManager.EXTRA_SESSION_ID).isNullOrBlank()
}
private fun routeLauncherIntentToActiveSession() {
thread {
val snapshot = runCatching { agentSessionController.loadSnapshot(null) }.getOrNull() ?: return@thread
val activeTopLevelSessions = SessionUiFormatter.topLevelSessions(snapshot)
.filterNot { isTerminalState(it.state) }
if (activeTopLevelSessions.size != 1) {
return@thread
}
val activeSessionId = activeTopLevelSessions.single().sessionId
runOnUiThread {
openSessionDetail(activeSessionId)
}
}
}
private fun maybeHandleDebugIntent(intent: Intent?) {
when (intent?.action) {
ACTION_DEBUG_CANCEL_ALL_AGENT_SESSIONS -> {
thread {
runCatching { agentSessionController.cancelActiveSessions() }
.onFailure { err ->
Log.w(TAG, "Failed to cancel Agent sessions from debug intent", err)
showToast("Failed to cancel active sessions: ${err.message}")
}
.onSuccess { result ->
showToast(
"Cancelled ${result.cancelledSessionIds.size} sessions, ${result.failedSessionIds.size} failed",
)
refreshAgentSessions(force = true)
}
}
intent.action = null
}
ACTION_DEBUG_START_AGENT_SESSION -> {
val prompt = extractDebugPrompt(intent)
if (prompt.isEmpty()) {
intent.action = null
return
}
val targetPackage = intent.getStringExtra(EXTRA_DEBUG_TARGET_PACKAGE)?.trim()?.ifEmpty { null }
val finalPresentationPolicy = SessionFinalPresentationPolicy.fromWireValue(
intent.getStringExtra(EXTRA_DEBUG_FINAL_PRESENTATION_POLICY),
)
startDebugSession(
prompt = prompt,
targetPackage = targetPackage,
finalPresentationPolicy = finalPresentationPolicy,
)
intent.action = null
}
}
}
private fun extractDebugPrompt(intent: Intent): String {
intent.getStringExtra(EXTRA_DEBUG_PROMPT_BASE64)
?.trim()
?.takeIf(String::isNotEmpty)
?.let { encoded ->
runCatching {
String(Base64.decode(encoded, Base64.DEFAULT), Charsets.UTF_8).trim()
}.onFailure { err ->
Log.w(TAG, "Failed to decode debug promptBase64", err)
}.getOrNull()
?.takeIf(String::isNotEmpty)
?.let { return it }
}
return intent.getStringExtra(EXTRA_DEBUG_PROMPT)?.trim().orEmpty()
}
private fun startDebugSession(
prompt: String,
targetPackage: String?,
finalPresentationPolicy: SessionFinalPresentationPolicy?,
) {
thread {
val result = runCatching {
if (targetPackage != null) {
agentSessionController.startHomeSession(
targetPackage = targetPackage,
prompt = prompt,
allowDetachedMode = true,
finalPresentationPolicy = finalPresentationPolicy
?: SessionFinalPresentationPolicy.AGENT_CHOICE,
executionSettings = SessionExecutionSettings.default,
)
} else {
AgentTaskPlanner.startSession(
context = this,
userObjective = prompt,
targetPackageOverride = null,
allowDetachedMode = true,
finalPresentationPolicyOverride = finalPresentationPolicy,
executionSettings = SessionExecutionSettings.default,
sessionController = agentSessionController,
requestUserInputHandler = { questions ->
AgentUserInputPrompter.promptForAnswers(this, questions)
},
)
}
}
result.onFailure { err ->
Log.w(TAG, "Failed to start debug Agent session", err)
showToast("Failed to start Agent session: ${err.message}")
}
result.onSuccess { started ->
showToast("Started session ${started.parentSessionId}")
refreshAgentSessions(force = true)
}
}
}
private fun refreshAgentSessions(force: Boolean = false) {
if (!force && agentRefreshInFlight) {
return
}
agentRefreshInFlight = true
thread {
try {
val result = runCatching { agentSessionController.loadSnapshot(null) }
result.onFailure { err ->
latestSnapshot = AgentSnapshot.unavailable
runOnUiThread {
findViewById<TextView>(R.id.agent_status).text =
"Agent framework unavailable (${err.message})"
updateSessionList(emptyList())
}
}
result.onSuccess { snapshot ->
latestSnapshot = snapshot
dismissedSessionStore.prune(snapshot.sessions.map(AgentSessionDetails::sessionId).toSet())
val topLevelSessions = SessionUiFormatter.topLevelSessions(snapshot)
.filter { session ->
if (!isTerminalState(session.state)) {
dismissedSessionStore.clearDismissed(session.sessionId)
true
} else {
!dismissedSessionStore.isDismissed(session.sessionId)
}
}
runOnUiThread {
updateFrameworkStatus(snapshot)
updateSessionList(topLevelSessions)
}
}
} finally {
agentRefreshInFlight = false
}
}
}
private fun updateFrameworkStatus(snapshot: AgentSnapshot) {
val roleHolders = if (snapshot.roleHolders.isEmpty()) {
"none"
} else {
snapshot.roleHolders.joinToString(", ")
}
findViewById<TextView>(R.id.agent_status).text =
"Agent framework active. Genie role holders: $roleHolders"
}
private fun updateSessionList(sessions: List<AgentSessionDetails>) {
sessionListAdapter.replaceItems(sessions)
findViewById<TextView>(R.id.session_list_empty).visibility =
if (sessions.isEmpty()) View.VISIBLE else View.GONE
}
private fun registerSessionListenerIfNeeded() {
if (sessionListenerRegistered || !agentSessionController.isAvailable()) {
return
}
sessionListenerRegistered = runCatching {
agentSessionController.registerSessionListener(mainExecutor, sessionListener)
}.getOrDefault(false)
}
private fun unregisterSessionListenerIfNeeded() {
if (!sessionListenerRegistered) {
return
}
runCatching { agentSessionController.unregisterSessionListener(sessionListener) }
sessionListenerRegistered = false
}
private fun requestNotificationPermissionIfNeeded() {
if (Build.VERSION.SDK_INT < 33) {
return
}
if (checkSelfPermission(Manifest.permission.POST_NOTIFICATIONS)
== PackageManager.PERMISSION_GRANTED
) {
return
}
requestPermissions(arrayOf(Manifest.permission.POST_NOTIFICATIONS), 1001)
}
private fun authAction() {
if (isAuthenticated) {
signOutAgent()
} else {
startAgentSignIn()
}
}
private fun startAgentSignIn() {
pendingAuthMessage = "Agent auth: opening browser for sign-in..."
updateAuthUi(pendingAuthMessage.orEmpty(), false)
thread {
runCatching { AgentCodexAppServerClient.startChatGptLogin(this) }
.onFailure { err ->
pendingAuthMessage = null
updateAuthUi("Agent auth: sign-in failed (${err.message})", false)
}
.onSuccess { loginSession ->
pendingAuthMessage = "Agent auth: complete sign-in in the browser"
updateAuthUi(pendingAuthMessage.orEmpty(), false)
runOnUiThread {
runCatching {
startActivity(Intent(Intent.ACTION_VIEW, Uri.parse(loginSession.authUrl)))
}.onFailure { err ->
pendingAuthMessage = "Agent auth: open ${loginSession.authUrl}"
updateAuthUi(pendingAuthMessage.orEmpty(), false)
showToast("Failed to open browser: ${err.message}")
}.onSuccess {
showToast("Complete sign-in in the browser")
}
}
}
}
}
private fun signOutAgent() {
pendingAuthMessage = "Agent auth: signing out..."
updateAuthUi(pendingAuthMessage.orEmpty(), false)
thread {
runCatching { AgentCodexAppServerClient.logoutAccount(this) }
.onFailure { err ->
pendingAuthMessage = null
updateAuthUi("Agent auth: sign out failed (${err.message})", isAuthenticated)
}
.onSuccess {
pendingAuthMessage = null
AgentCodexAppServerClient.refreshRuntimeStatusAsync(this)
showToast("Signed out")
}
}
}
private fun updateRuntimeStatusUi() {
findViewById<TextView>(R.id.agent_runtime_status).text = renderAgentRuntimeStatus()
}
private fun renderAgentRuntimeStatus(): String {
val runtimeStatus = latestAgentRuntimeStatus
if (runtimeStatus == null) {
return "Agent runtime: probing..."
}
val authSummary = if (runtimeStatus.authenticated) {
runtimeStatus.accountEmail?.let { "signed in ($it)" } ?: "signed in"
} else {
"not signed in"
}
val configuredModelSuffix = runtimeStatus.configuredModel
?.takeIf { it != runtimeStatus.effectiveModel }
?.let { ", configured=$it" }
?: ""
val effectiveModel = runtimeStatus.effectiveModel ?: "unknown"
return "Agent runtime: $authSummary, provider=${runtimeStatus.modelProviderId}, effective=$effectiveModel$configuredModelSuffix, clients=${runtimeStatus.clientCount}, base=${runtimeStatus.upstreamBaseUrl}"
}
private fun renderAuthStatus(): String {
pendingAuthMessage?.let { return it }
val runtimeStatus = latestAgentRuntimeStatus
if (runtimeStatus == null) {
return "Agent auth: probing..."
}
if (!runtimeStatus.authenticated) {
return "Agent auth: not signed in"
}
return runtimeStatus.accountEmail?.let { email ->
"Agent auth: signed in ($email)"
} ?: "Agent auth: signed in"
}
private fun updateAuthUi(
message: String,
authenticated: Boolean,
) {
isAuthenticated = authenticated
runOnUiThread {
findViewById<TextView>(R.id.auth_status).text = message
findViewById<Button>(R.id.auth_action).text =
if (authenticated) "Sign out" else "Start sign-in"
}
}
private fun isTerminalState(state: Int): Boolean {
return state == AgentSessionInfo.STATE_COMPLETED ||
state == AgentSessionInfo.STATE_CANCELLED ||
state == AgentSessionInfo.STATE_FAILED
}
private fun openSessionDetail(sessionId: String) {
startActivity(
Intent(this, SessionDetailActivity::class.java)
.putExtra(SessionDetailActivity.EXTRA_SESSION_ID, sessionId),
)
}
private fun launchCreateSessionActivity() {
startActivity(
CreateSessionActivity.newSessionIntent(
context = this,
initialSettings = CreateSessionActivity.preferredInitialSettings(),
),
)
moveTaskToBack(true)
}
private fun showToast(message: String) {
runOnUiThread {
Toast.makeText(this, message, Toast.LENGTH_SHORT).show()
}
}
}

View File

@@ -1,52 +0,0 @@
package com.openai.codex.agent
object SessionContinuationPromptBuilder {
private const val MAX_TIMELINE_CHARS = 1200
private const val MAX_DETAIL_CHARS = 600
fun build(
sourceTopLevelSession: AgentSessionDetails,
selectedSession: AgentSessionDetails,
prompt: String,
): String {
return buildString {
appendLine(prompt.trim())
appendLine()
appendLine("This is a follow-up continuation of an earlier attempt in the same top-level Agent session.")
appendLine("Reuse facts learned previously instead of starting over from scratch.")
appendLine()
appendLine("Previous session context:")
appendLine("- Top-level session: ${sourceTopLevelSession.sessionId}")
appendLine("- Previous child session: ${selectedSession.sessionId}")
selectedSession.targetPackage?.let { appendLine("- Target package: $it") }
appendLine("- Previous state: ${selectedSession.stateLabel}")
appendLine("- Previous presentation: ${selectedSession.targetPresentationLabel}")
appendLine("- Previous runtime: ${selectedSession.targetRuntimeLabel}")
selectedSession.latestResult
?.takeIf(String::isNotBlank)
?.let { appendLine("- Previous result: ${it.take(MAX_DETAIL_CHARS)}") }
selectedSession.latestError
?.takeIf(String::isNotBlank)
?.let { appendLine("- Previous error: ${it.take(MAX_DETAIL_CHARS)}") }
selectedSession.latestTrace
?.takeIf(String::isNotBlank)
?.let { appendLine("- Previous trace: ${it.take(MAX_DETAIL_CHARS)}") }
val timeline = selectedSession.timeline.trim()
if (timeline.isNotEmpty() && timeline != "Diagnostics not loaded.") {
appendLine()
appendLine("Recent timeline from the previous child session:")
appendLine(timeline.take(MAX_TIMELINE_CHARS))
}
val parentSummary = sourceTopLevelSession.latestResult
?: sourceTopLevelSession.latestError
?: sourceTopLevelSession.latestTrace
parentSummary
?.takeIf(String::isNotBlank)
?.let {
appendLine()
appendLine("Top-level session summary:")
appendLine(it.take(MAX_DETAIL_CHARS))
}
}.trim()
}
}

View File

@@ -1,777 +0,0 @@
package com.openai.codex.agent
import android.app.Activity
import android.app.agent.AgentManager
import android.app.agent.AgentSessionInfo
import android.content.Intent
import android.graphics.Typeface
import android.os.Binder
import android.os.Bundle
import android.text.SpannableStringBuilder
import android.text.Spanned
import android.text.style.StyleSpan
import android.util.Log
import android.view.View
import android.widget.Button
import android.widget.EditText
import android.widget.LinearLayout
import android.widget.ScrollView
import android.widget.TextView
import android.widget.Toast
import kotlin.concurrent.thread
class SessionDetailActivity : Activity() {
companion object {
private const val TAG = "CodexSessionDetail"
const val EXTRA_SESSION_ID = "sessionId"
private const val ACTION_DEBUG_CONTINUE_SESSION =
"com.openai.codex.agent.action.DEBUG_CONTINUE_SESSION"
private const val EXTRA_DEBUG_PROMPT = "prompt"
}
private data class SessionViewState(
val topLevelSession: AgentSessionDetails,
val childSessions: List<AgentSessionDetails>,
val selectedChildSession: AgentSessionDetails?,
)
private val sessionController by lazy { AgentSessionController(this) }
private val dismissedSessionStore by lazy { DismissedSessionStore(this) }
private val sessionUiLeaseToken = Binder()
private var leasedSessionId: String? = null
private var requestedSessionId: String? = null
private var topLevelSessionId: String? = null
private var selectedChildSessionId: String? = null
private var latestSnapshot: AgentSnapshot = AgentSnapshot.unavailable
private var refreshInFlight = false
private val sessionListener = object : AgentManager.SessionListener {
override fun onSessionChanged(session: AgentSessionInfo) {
refreshSnapshot()
}
override fun onSessionRemoved(sessionId: String, userId: Int) {
refreshSnapshot()
}
}
private var sessionListenerRegistered = false
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_session_detail)
requestedSessionId = intent.getStringExtra(EXTRA_SESSION_ID)
setupViews()
maybeHandleDebugIntent(intent)
}
override fun onResume() {
super.onResume()
registerSessionListenerIfNeeded()
refreshSnapshot(force = true)
}
override fun onNewIntent(intent: Intent) {
super.onNewIntent(intent)
setIntent(intent)
requestedSessionId = intent.getStringExtra(EXTRA_SESSION_ID)
topLevelSessionId = null
selectedChildSessionId = null
maybeHandleDebugIntent(intent)
refreshSnapshot(force = true)
}
override fun onPause() {
unregisterSessionListenerIfNeeded()
updateSessionUiLease(null)
super.onPause()
}
private fun setupViews() {
findViewById<Button>(R.id.session_detail_cancel_button).setOnClickListener {
cancelSession()
}
findViewById<Button>(R.id.session_detail_delete_button).setOnClickListener {
deleteSession()
}
findViewById<Button>(R.id.session_detail_child_cancel_button).setOnClickListener {
cancelSelectedChildSession()
}
findViewById<Button>(R.id.session_detail_child_delete_button).setOnClickListener {
deleteSelectedChildSession()
}
findViewById<Button>(R.id.session_detail_attach_button).setOnClickListener {
attachTarget()
}
findViewById<Button>(R.id.session_detail_answer_button).setOnClickListener {
answerQuestion()
}
findViewById<Button>(R.id.session_detail_follow_up_button).setOnClickListener {
sendFollowUpPrompt()
}
}
private fun maybeHandleDebugIntent(intent: Intent?) {
if (intent?.action != ACTION_DEBUG_CONTINUE_SESSION) {
return
}
val prompt = intent.getStringExtra(EXTRA_DEBUG_PROMPT)?.trim().orEmpty()
val sessionId = intent.getStringExtra(EXTRA_SESSION_ID)?.trim().orEmpty()
if (prompt.isEmpty()) {
intent.action = null
return
}
Log.i(TAG, "Handling debug continuation for sessionId=$sessionId")
thread {
runCatching {
val snapshot = sessionController.loadSnapshot(sessionId.ifEmpty { requestedSessionId })
val viewState = resolveViewState(snapshot) ?: error("Session not found")
Log.i(TAG, "Loaded snapshot for continuation topLevel=${viewState.topLevelSession.sessionId} child=${viewState.selectedChildSession?.sessionId}")
continueSessionInPlaceOnce(prompt, snapshot, viewState)
}.onFailure { err ->
Log.w(TAG, "Debug continuation failed", err)
showToast("Failed to continue session: ${err.message}")
}.onSuccess { result ->
Log.i(TAG, "Debug continuation reused topLevel=${result.parentSessionId}")
showToast("Continued session in place")
runOnUiThread {
moveTaskToBack(true)
}
}
}
intent.action = null
}
private fun registerSessionListenerIfNeeded() {
if (sessionListenerRegistered || !sessionController.isAvailable()) {
return
}
sessionListenerRegistered = runCatching {
sessionController.registerSessionListener(mainExecutor, sessionListener)
}.getOrDefault(false)
}
private fun unregisterSessionListenerIfNeeded() {
if (!sessionListenerRegistered) {
return
}
runCatching { sessionController.unregisterSessionListener(sessionListener) }
sessionListenerRegistered = false
}
private fun refreshSnapshot(force: Boolean = false) {
if (!force && refreshInFlight) {
return
}
refreshInFlight = true
thread {
try {
val snapshot = runCatching {
sessionController.loadSnapshot(requestedSessionId ?: selectedChildSessionId ?: topLevelSessionId)
}
.getOrElse {
runOnUiThread {
findViewById<TextView>(R.id.session_detail_summary).text =
"Failed to load session: ${it.message}"
}
return@thread
}
latestSnapshot = snapshot
runOnUiThread {
updateUi(snapshot)
}
} finally {
refreshInFlight = false
}
}
}
private fun updateUi(snapshot: AgentSnapshot) {
val viewState = resolveViewState(snapshot)
if (viewState == null) {
findViewById<TextView>(R.id.session_detail_summary).text = "Session not found"
findViewById<TextView>(R.id.session_detail_child_summary).text = "Session not found"
updateSessionUiLease(null)
return
}
val topLevelSession = viewState.topLevelSession
val selectedChildSession = viewState.selectedChildSession
val actionableSession = selectedChildSession ?: topLevelSession
val canStartStandaloneHomeSession = canStartStandaloneHomeSession(viewState)
val executionSettings = sessionController.executionSettingsForSession(topLevelSession.sessionId)
val summary = buildString {
append(
SessionUiFormatter.detailSummary(
context = this@SessionDetailActivity,
session = topLevelSession,
parentSession = null,
),
)
if (!executionSettings.model.isNullOrBlank()) {
append("\nModel: ${executionSettings.model}")
}
if (!executionSettings.reasoningEffort.isNullOrBlank()) {
append("\nThinking depth: ${executionSettings.reasoningEffort}")
}
}
findViewById<TextView>(R.id.session_detail_summary).text = formatDetailSummary(summary.trimEnd())
renderChildSessions(viewState.childSessions, selectedChildSession?.sessionId)
val childSummaryText = selectedChildSession?.let { child ->
SessionUiFormatter.detailSummary(
context = this,
session = child,
parentSession = topLevelSession,
)
} ?: if (topLevelSession.anchor == AgentSessionInfo.ANCHOR_AGENT && viewState.childSessions.isEmpty()) {
"No child sessions yet. The Agent is still planning targets or waiting to start them."
} else {
"Select a child session to inspect it. Tap the same child again to collapse it."
}
findViewById<TextView>(R.id.session_detail_child_summary).text = formatDetailSummary(childSummaryText)
findViewById<ScrollView>(R.id.session_detail_child_summary_container).scrollTo(0, 0)
findViewById<TextView>(R.id.session_detail_timeline).text = formatTimeline(
topLevelSession,
selectedChildSession,
)
findViewById<ScrollView>(R.id.session_detail_timeline_container).scrollTo(0, 0)
val isWaitingForUser = actionableSession.state == AgentSessionInfo.STATE_WAITING_FOR_USER &&
!actionableSession.latestQuestion.isNullOrBlank()
findViewById<TextView>(R.id.session_detail_question_label).visibility =
if (isWaitingForUser) View.VISIBLE else View.GONE
findViewById<TextView>(R.id.session_detail_question).visibility =
if (isWaitingForUser) View.VISIBLE else View.GONE
findViewById<EditText>(R.id.session_detail_answer_input).visibility =
if (isWaitingForUser) View.VISIBLE else View.GONE
findViewById<Button>(R.id.session_detail_answer_button).visibility =
if (isWaitingForUser) View.VISIBLE else View.GONE
findViewById<TextView>(R.id.session_detail_question).text =
actionableSession.latestQuestion.orEmpty()
val isTopLevelActive = !isTerminalState(topLevelSession.state)
val topLevelActionNote = findViewById<TextView>(R.id.session_detail_top_level_action_note)
findViewById<Button>(R.id.session_detail_cancel_button).apply {
visibility = if (isTopLevelActive) View.VISIBLE else View.GONE
text = if (topLevelSession.anchor == AgentSessionInfo.ANCHOR_AGENT && viewState.childSessions.isNotEmpty()) {
"Cancel Child Sessions"
} else {
"Cancel Session"
}
}
findViewById<Button>(R.id.session_detail_delete_button).visibility =
if (isTopLevelActive) View.GONE else View.VISIBLE
findViewById<Button>(R.id.session_detail_delete_button).text = "Delete Session"
topLevelActionNote.visibility = View.VISIBLE
topLevelActionNote.text = if (topLevelSession.anchor == AgentSessionInfo.ANCHOR_AGENT) {
if (isTopLevelActive && viewState.childSessions.isEmpty()) {
"This Agent-anchored session is still planning targets."
} else if (isTopLevelActive) {
"Cancelling the top-level session cancels all active child sessions."
} else {
"Deleting the top-level session removes it and its child sessions from the Agent UI."
}
} else {
if (canStartStandaloneHomeSession) {
"This app-scoped session is ready to start. Use the Start dialog below."
} else if (isTopLevelActive) {
"This app-scoped session is still active."
} else {
"Deleting this app-scoped session dismisses it from framework and removes it from the Agent UI."
}
}
val childIsSelected = selectedChildSession != null
val isSelectedChildActive = selectedChildSession?.let { !isTerminalState(it.state) } == true
findViewById<LinearLayout>(R.id.session_detail_child_actions).visibility =
if (childIsSelected) View.VISIBLE else View.GONE
findViewById<Button>(R.id.session_detail_child_cancel_button).visibility =
if (isSelectedChildActive) View.VISIBLE else View.GONE
findViewById<Button>(R.id.session_detail_child_delete_button).visibility =
if (childIsSelected && !isSelectedChildActive) View.VISIBLE else View.GONE
val canAttach = childIsSelected &&
actionableSession.targetPresentation != AgentSessionInfo.TARGET_PRESENTATION_ATTACHED
findViewById<Button>(R.id.session_detail_attach_button).visibility =
if (canAttach) View.VISIBLE else View.GONE
val supportsInPlaceContinuation = topLevelSession.anchor == AgentSessionInfo.ANCHOR_AGENT
val continueVisibility = if (canStartStandaloneHomeSession || (!isTopLevelActive && supportsInPlaceContinuation)) {
View.VISIBLE
} else {
View.GONE
}
findViewById<TextView>(R.id.session_detail_follow_up_label).apply {
visibility = continueVisibility
text = if (canStartStandaloneHomeSession) {
"Start Session"
} else {
"Continue Same Session"
}
}
findViewById<EditText>(R.id.session_detail_follow_up_prompt).visibility =
if (canStartStandaloneHomeSession) View.GONE else continueVisibility
findViewById<Button>(R.id.session_detail_follow_up_button).apply {
visibility = continueVisibility
text = if (canStartStandaloneHomeSession) {
"Start Session"
} else {
"Send Continuation Prompt"
}
}
findViewById<TextView>(R.id.session_detail_follow_up_note).visibility =
if (!isTopLevelActive && !supportsInPlaceContinuation) View.VISIBLE else View.GONE
updateSessionUiLease(topLevelSession.sessionId)
}
private fun renderChildSessions(
sessions: List<AgentSessionDetails>,
selectedSessionId: String?,
) {
val container = findViewById<LinearLayout>(R.id.session_detail_children_container)
val emptyView = findViewById<TextView>(R.id.session_detail_children_empty)
container.removeAllViews()
emptyView.visibility = if (sessions.isEmpty()) View.VISIBLE else View.GONE
sessions.forEach { session ->
val isSelected = session.sessionId == selectedSessionId
val row = LinearLayout(this).apply {
orientation = LinearLayout.VERTICAL
setPadding(dp(12), dp(12), dp(12), dp(12))
isClickable = true
isFocusable = true
background = getDrawable(
if (isSelected) {
R.drawable.session_child_card_selected_background
} else {
R.drawable.session_child_card_background
},
)
val layoutParams = LinearLayout.LayoutParams(
LinearLayout.LayoutParams.MATCH_PARENT,
LinearLayout.LayoutParams.WRAP_CONTENT,
).apply {
bottomMargin = dp(8)
}
this.layoutParams = layoutParams
setOnClickListener {
selectedChildSessionId = if (session.sessionId == selectedChildSessionId) {
null
} else {
session.sessionId
}
requestedSessionId = topLevelSessionId
updateUi(latestSnapshot)
}
}
val title = TextView(this).apply {
text = SessionUiFormatter.relatedSessionTitle(this@SessionDetailActivity, session)
setTypeface(typeface, if (isSelected) Typeface.BOLD else Typeface.NORMAL)
}
val subtitle = TextView(this).apply {
text = SessionUiFormatter.relatedSessionSubtitle(session)
}
row.addView(title)
row.addView(subtitle)
container.addView(row)
}
}
private fun renderTimeline(
topLevelSession: AgentSessionDetails,
selectedChildSession: AgentSessionDetails?,
): String {
return if (selectedChildSession == null) {
topLevelSession.timeline
} else {
buildString {
append("Top-level ${topLevelSession.sessionId}\n")
append(topLevelSession.timeline)
append("\n\nSelected child ${selectedChildSession.sessionId}\n")
append(selectedChildSession.timeline)
}
}
}
private fun formatDetailSummary(summary: String): CharSequence {
val trimmed = summary.trim()
if (trimmed.isEmpty()) {
return ""
}
val builder = SpannableStringBuilder()
trimmed.lines().forEachIndexed { index, line ->
if (index > 0) {
builder.append("\n\n")
}
val separatorIndex = line.indexOf(':')
if (separatorIndex <= 0) {
builder.append(line)
return@forEachIndexed
}
val label = line.substring(0, separatorIndex)
val value = line.substring(separatorIndex + 1).trim()
appendBoldLine(builder, label)
if (value.isNotEmpty()) {
builder.append('\n')
builder.append(value)
}
}
return builder
}
private fun formatTimeline(
topLevelSession: AgentSessionDetails,
selectedChildSession: AgentSessionDetails?,
): CharSequence {
val builder = SpannableStringBuilder()
appendBoldLine(builder, "Top-level session ${topLevelSession.sessionId}")
builder.append('\n')
builder.append(topLevelSession.timeline.ifBlank { "No framework events yet." })
selectedChildSession?.let { child ->
builder.append("\n\n")
appendBoldLine(builder, "Selected child ${child.sessionId}")
builder.append('\n')
builder.append(child.timeline.ifBlank { "No framework events yet." })
}
return builder
}
private fun appendBoldLine(
builder: SpannableStringBuilder,
text: String,
) {
val start = builder.length
builder.append(text)
builder.setSpan(
StyleSpan(Typeface.BOLD),
start,
builder.length,
Spanned.SPAN_EXCLUSIVE_EXCLUSIVE,
)
}
private fun answerQuestion() {
val selectedSession = currentActionableSession(latestSnapshot) ?: return
val answerInput = findViewById<EditText>(R.id.session_detail_answer_input)
val answer = answerInput.text.toString().trim()
if (answer.isEmpty()) {
answerInput.error = "Enter an answer"
return
}
thread {
runCatching {
sessionController.answerQuestion(
selectedSession.sessionId,
answer,
topLevelSession(latestSnapshot)?.sessionId,
)
}.onFailure { err ->
showToast("Failed to answer question: ${err.message}")
}.onSuccess {
answerInput.post { answerInput.text.clear() }
topLevelSession(latestSnapshot)?.let { topLevelSession ->
SessionNotificationCoordinator.acknowledgeSessionTree(
context = this,
sessionController = sessionController,
topLevelSessionId = topLevelSession.sessionId,
sessionIds = listOf(topLevelSession.sessionId, selectedSession.sessionId),
)
}
showToast("Answered ${selectedSession.sessionId}")
refreshSnapshot(force = true)
}
}
}
private fun attachTarget() {
val selectedSession = selectedChildSession(latestSnapshot) ?: return
thread {
runCatching {
sessionController.attachTarget(selectedSession.sessionId)
}.onFailure { err ->
showToast("Failed to attach target: ${err.message}")
}.onSuccess {
showToast("Attached target for ${selectedSession.sessionId}")
refreshSnapshot(force = true)
}
}
}
private fun cancelSession() {
val topLevelSession = topLevelSession(latestSnapshot) ?: return
thread {
runCatching {
if (topLevelSession.anchor == AgentSessionInfo.ANCHOR_AGENT) {
val activeChildren = childSessions(latestSnapshot)
.filterNot { isTerminalState(it.state) }
if (activeChildren.isEmpty()) {
sessionController.cancelSession(topLevelSession.sessionId)
} else {
activeChildren.forEach { childSession ->
sessionController.cancelSession(childSession.sessionId)
}
}
} else {
sessionController.cancelSession(topLevelSession.sessionId)
}
}.onFailure { err ->
showToast("Failed to cancel session: ${err.message}")
}.onSuccess {
SessionNotificationCoordinator.acknowledgeSessionTree(
context = this,
sessionController = sessionController,
topLevelSessionId = topLevelSession.sessionId,
sessionIds = listOf(topLevelSession.sessionId) + childSessions(latestSnapshot).map(AgentSessionDetails::sessionId),
)
showToast(
if (topLevelSession.anchor == AgentSessionInfo.ANCHOR_AGENT) {
"Cancelled active child sessions"
} else {
"Cancelled ${topLevelSession.sessionId}"
},
)
refreshSnapshot(force = true)
}
}
}
private fun deleteSession() {
val topLevelSession = topLevelSession(latestSnapshot) ?: return
thread {
runCatching {
if (topLevelSession.anchor == AgentSessionInfo.ANCHOR_HOME) {
sessionController.cancelSession(topLevelSession.sessionId)
}
dismissedSessionStore.dismiss(topLevelSession.sessionId)
childSessions(latestSnapshot).forEach { childSession ->
dismissedSessionStore.dismiss(childSession.sessionId)
}
SessionNotificationCoordinator.acknowledgeSessionTree(
context = this,
sessionController = sessionController,
topLevelSessionId = topLevelSession.sessionId,
sessionIds = listOf(topLevelSession.sessionId) + childSessions(latestSnapshot).map(AgentSessionDetails::sessionId),
)
}.onFailure { err ->
showToast("Failed to delete session: ${err.message}")
}.onSuccess {
showToast("Deleted session")
finish()
}
}
}
private fun cancelSelectedChildSession() {
val selectedChildSession = selectedChildSession(latestSnapshot) ?: return
thread {
runCatching {
sessionController.cancelSession(selectedChildSession.sessionId)
}.onFailure { err ->
showToast("Failed to cancel child session: ${err.message}")
}.onSuccess {
topLevelSession(latestSnapshot)?.let { topLevelSession ->
SessionNotificationCoordinator.acknowledgeSessionTree(
context = this,
sessionController = sessionController,
topLevelSessionId = topLevelSession.sessionId,
sessionIds = listOf(selectedChildSession.sessionId),
)
}
showToast("Cancelled ${selectedChildSession.sessionId}")
refreshSnapshot(force = true)
}
}
}
private fun deleteSelectedChildSession() {
val selectedChildSession = selectedChildSession(latestSnapshot) ?: return
thread {
runCatching {
dismissedSessionStore.dismiss(selectedChildSession.sessionId)
}.onFailure { err ->
showToast("Failed to delete child session: ${err.message}")
}.onSuccess {
topLevelSession(latestSnapshot)?.let { topLevelSession ->
SessionNotificationCoordinator.acknowledgeSessionTree(
context = this,
sessionController = sessionController,
topLevelSessionId = topLevelSession.sessionId,
sessionIds = listOf(selectedChildSession.sessionId),
)
}
selectedChildSessionId = null
showToast("Deleted child session")
refreshSnapshot(force = true)
}
}
}
private fun sendFollowUpPrompt() {
val viewState = resolveViewState(latestSnapshot) ?: return
val isStandaloneHomeStart = canStartStandaloneHomeSession(viewState)
if (isStandaloneHomeStart) {
showStandaloneHomeSessionDialog(viewState)
} else {
val promptInput = findViewById<EditText>(R.id.session_detail_follow_up_prompt)
val prompt = promptInput.text.toString().trim()
if (prompt.isEmpty()) {
promptInput.error = "Enter a follow-up prompt"
return
}
promptInput.text.clear()
continueSessionInPlaceAsync(prompt, latestSnapshot)
}
}
private fun showStandaloneHomeSessionDialog(
viewState: SessionViewState,
) {
val topLevelSession = viewState.topLevelSession
val targetPackage = checkNotNull(topLevelSession.targetPackage) {
"No target package available for this session"
}
startActivity(
CreateSessionActivity.existingHomeSessionIntent(
context = this,
sessionId = topLevelSession.sessionId,
targetPackage = targetPackage,
initialSettings = sessionController.executionSettingsForSession(topLevelSession.sessionId),
),
)
moveTaskToBack(true)
}
private fun continueSessionInPlaceAsync(
prompt: String,
snapshot: AgentSnapshot,
) {
thread {
runCatching {
continueSessionInPlaceOnce(prompt, snapshot)
}.onFailure { err ->
showToast("Failed to continue session: ${err.message}")
}.onSuccess { result ->
showToast("Continued session in place")
runOnUiThread {
moveTaskToBack(true)
}
}
}
}
private fun continueSessionInPlaceOnce(
prompt: String,
snapshot: AgentSnapshot,
viewState: SessionViewState = resolveViewState(snapshot) ?: error("Session not found"),
): SessionStartResult {
val topLevelSession = viewState.topLevelSession
val selectedSession = viewState.selectedChildSession
?: viewState.childSessions.lastOrNull()
?: topLevelSession
Log.i(
TAG,
"Continuing session topLevel=${topLevelSession.sessionId} selected=${selectedSession.sessionId} anchor=${topLevelSession.anchor}",
)
return AgentSessionLauncher.continueSessionInPlace(
sourceTopLevelSession = topLevelSession,
selectedSession = selectedSession,
prompt = prompt,
sessionController = sessionController,
)
}
private fun topLevelSession(snapshot: AgentSnapshot): AgentSessionDetails? {
return resolveViewState(snapshot)?.topLevelSession
}
private fun childSessions(snapshot: AgentSnapshot): List<AgentSessionDetails> {
return resolveViewState(snapshot)?.childSessions.orEmpty()
}
private fun selectedChildSession(snapshot: AgentSnapshot): AgentSessionDetails? {
return resolveViewState(snapshot)?.selectedChildSession
}
private fun currentActionableSession(snapshot: AgentSnapshot): AgentSessionDetails? {
val viewState = resolveViewState(snapshot) ?: return null
return viewState.selectedChildSession ?: viewState.topLevelSession
}
private fun resolveViewState(snapshot: AgentSnapshot): SessionViewState? {
val sessionsById = snapshot.sessions.associateBy(AgentSessionDetails::sessionId)
val requestedSession = requestedSessionId?.let(sessionsById::get)
val resolvedTopLevelSession = topLevelSessionId?.let(sessionsById::get)
?: requestedSession?.let { session ->
if (session.parentSessionId == null) {
session
} else {
sessionsById[session.parentSessionId]
}
}
?: snapshot.parentSession
?: snapshot.selectedSession?.takeIf { it.parentSessionId == null }
?: SessionUiFormatter.topLevelSessions(snapshot).firstOrNull()
?: return null
topLevelSessionId = resolvedTopLevelSession.sessionId
requestedSessionId = resolvedTopLevelSession.sessionId
val visibleChildSessions = snapshot.sessions
.filter { session ->
session.parentSessionId == resolvedTopLevelSession.sessionId &&
!dismissedSessionStore.isDismissed(session.sessionId)
}
.sortedBy(AgentSessionDetails::sessionId)
val requestedChildSession = requestedSession?.takeIf { session ->
session.parentSessionId == resolvedTopLevelSession.sessionId &&
!dismissedSessionStore.isDismissed(session.sessionId)
}
val resolvedSelectedChildSession = selectedChildSessionId?.let(sessionsById::get)?.takeIf { session ->
session.parentSessionId == resolvedTopLevelSession.sessionId &&
!dismissedSessionStore.isDismissed(session.sessionId)
} ?: requestedChildSession
selectedChildSessionId = resolvedSelectedChildSession?.sessionId
return SessionViewState(
topLevelSession = resolvedTopLevelSession,
childSessions = visibleChildSessions,
selectedChildSession = resolvedSelectedChildSession,
)
}
private fun canStartStandaloneHomeSession(viewState: SessionViewState): Boolean {
val topLevelSession = viewState.topLevelSession
return topLevelSession.anchor == AgentSessionInfo.ANCHOR_HOME &&
topLevelSession.state == AgentSessionInfo.STATE_CREATED &&
viewState.childSessions.isEmpty()
}
private fun updateSessionUiLease(sessionId: String?) {
if (leasedSessionId == sessionId) {
return
}
leasedSessionId?.let { previous ->
runCatching {
sessionController.unregisterSessionUiLease(previous, sessionUiLeaseToken)
}
leasedSessionId = null
}
sessionId?.let { current ->
val registered = runCatching {
sessionController.registerSessionUiLease(current, sessionUiLeaseToken)
}
if (registered.isSuccess) {
leasedSessionId = current
}
}
}
private fun isTerminalState(state: Int): Boolean {
return state == AgentSessionInfo.STATE_COMPLETED ||
state == AgentSessionInfo.STATE_CANCELLED ||
state == AgentSessionInfo.STATE_FAILED
}
private fun showToast(message: String) {
runOnUiThread {
Toast.makeText(this, message, Toast.LENGTH_SHORT).show()
}
}
private fun dp(value: Int): Int {
return (value * resources.displayMetrics.density).toInt()
}
}

Some files were not shown because too many files have changed in this diff Show More