Compare commits

..

14 Commits

Author SHA1 Message Date
Tiffany Citra
980778f529 Fix sandbox retry approvals 2025-10-15 17:04:10 -07:00
Kevin Alwell
6e7d4a4b45 Should fix #751 by adding verbose error message. 2025-04-30 14:55:15 -04:00
Kevin Alwell
841e19b05d fix typescript error 2025-04-29 12:43:52 -04:00
Kevin Alwell
f466a73428 Omit default to fix type errors 2025-04-29 12:30:19 -04:00
Kevin Alwell
1a868d35f3 fix typecheck error 2025-04-29 12:22:41 -04:00
Kevin Alwell
9c563054e0 TypeScript bug bashing 2025-04-29 12:17:31 -04:00
Kevin Alwell
ca7204537c TypeScript bug bashing 2025-04-29 12:14:46 -04:00
Kevin Alwell
132e87cc8c TypeScript bug bashing 2025-04-29 12:13:38 -04:00
Kevin Alwell
8e80716169 fix tests 2025-04-29 12:10:54 -04:00
Kevin Alwell
d28aedb07b fix pipeline errors 2025-04-29 12:05:16 -04:00
Kevin Alwell
586ee0ec71 Merge branch 'main' into issue-726 2025-04-29 11:54:34 -04:00
Kevin Alwell
9065e61455 Ensure disableResponseStorage flag is respected 2025-04-29 11:48:47 -04:00
Kevin Alwell
6686f28338 Ensure disableResponseStorage flag is respected 2025-04-29 11:25:07 -04:00
Kevin Alwell
3818df7ba4 Fixes issue #726 by adding config to configToSave object 2025-04-29 10:12:55 -04:00
348 changed files with 5978 additions and 43299 deletions

View File

@@ -1 +0,0 @@
iTerm

View File

@@ -1,6 +0,0 @@
[codespell]
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts
check-hidden = true
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
ignore-words-list = ratatui,ser

View File

@@ -1,27 +0,0 @@
FROM ubuntu:24.04
ARG DEBIAN_FRONTEND=noninteractive
# enable 'universe' because musl-tools & clang live there
RUN apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common && \
add-apt-repository --yes universe
# now install build deps
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential curl git ca-certificates \
pkg-config clang musl-tools libssl-dev just && \
rm -rf /var/lib/apt/lists/*
# Ubuntu 24.04 ships with user 'ubuntu' already created with UID 1000.
USER ubuntu
# install Rust + musl target as dev user
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal && \
~/.cargo/bin/rustup target add aarch64-unknown-linux-musl && \
~/.cargo/bin/rustup component add clippy rustfmt
ENV PATH="/home/ubuntu/.cargo/bin:${PATH}"
WORKDIR /workspace

View File

@@ -1,30 +0,0 @@
# Containerized Development
We provide the following options to facilitate Codex development in a container. This is particularly useful for verifying the Linux build when working on a macOS host.
## Docker
To build the Docker image locally for x64 and then run it with the repo mounted under `/workspace`:
```shell
CODEX_DOCKER_IMAGE_NAME=codex-linux-dev
docker build --platform=linux/amd64 -t "$CODEX_DOCKER_IMAGE_NAME" ./.devcontainer
docker run --platform=linux/amd64 --rm -it -e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64 -v "$PWD":/workspace -w /workspace/codex-rs "$CODEX_DOCKER_IMAGE_NAME"
```
Note that `/workspace/target` will contain the binaries built for your host platform, so we include `-e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64` in the `docker run` command so that the binaries built inside your container are written to a separate directory.
For arm64, specify `--platform=linux/amd64` instead for both `docker build` and `docker run`.
Currently, the `Dockerfile` works for both x64 and arm64 Linux, though you need to run `rustup target add x86_64-unknown-linux-musl` yourself to install the musl toolchain for x64.
## VS Code
VS Code recognizes the `devcontainer.json` file and gives you the option to develop Codex in a container. Currently, `devcontainer.json` builds and runs the `arm64` flavor of the container.
From the integrated terminal in VS Code, you can build either flavor of the `arm64` build (GNU or musl):
```shell
cargo build --target aarch64-unknown-linux-musl
cargo build --target aarch64-unknown-linux-gnu
```

View File

@@ -1,27 +0,0 @@
{
"name": "Codex",
"build": {
"dockerfile": "Dockerfile",
"context": "..",
"platform": "linux/arm64"
},
/* Force VS Code to run the container as arm64 in
case your host is x86 (or vice-versa). */
"runArgs": ["--platform=linux/arm64"],
"containerEnv": {
"RUST_BACKTRACE": "1",
"CARGO_TARGET_DIR": "${containerWorkspaceFolder}/codex-rs/target-arm64"
},
"remoteUser": "ubuntu",
"customizations": {
"vscode": {
"settings": {
"terminal.integrated.defaultProfile.linux": "bash"
},
"extensions": ["rust-lang.rust-analyzer", "tamasfe.even-better-toml"]
}
}
}

View File

@@ -1 +0,0 @@
/node_modules/

View File

@@ -1,8 +0,0 @@
printWidth = 80
quoteProps = "consistent"
semi = true
tabWidth = 2
trailingComma = "all"
# Preserve existing behavior for markdown/text wrapping.
proseWrap = "preserve"

View File

@@ -1,140 +0,0 @@
# openai/codex-action
`openai/codex-action` is a GitHub Action that facilitates the use of [Codex](https://github.com/openai/codex) on GitHub issues and pull requests. Using the action, associate **labels** to run Codex with the appropriate prompt for the given context. Codex will respond by posting comments or creating PRs, whichever you specify!
Here is a sample workflow that uses `openai/codex-action`:
```yaml
name: Codex
on:
issues:
types: [opened, labeled]
pull_request:
branches: [main]
types: [labeled]
jobs:
codex:
if: ... # optional, but can be effective in conserving CI resources
runs-on: ubuntu-latest
# TODO(mbolin): Need to verify if/when `write` is necessary.
permissions:
contents: write
issues: write
pull-requests: write
steps:
# By default, Codex runs network disabled using --full-auto, so perform
# any setup that requires network (such as installing dependencies)
# before openai/codex-action.
- name: Checkout repository
uses: actions/checkout@v4
- name: Run Codex
uses: openai/codex-action@latest
with:
openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }}
github_token: ${{ secrets.GITHUB_TOKEN }}
```
See sample usage in [`codex.yml`](../../workflows/codex.yml).
## Triggering the Action
Using the sample workflow above, we have:
```yaml
on:
issues:
types: [opened, labeled]
pull_request:
branches: [main]
types: [labeled]
```
which means our workflow will be triggered when any of the following events occur:
- a label is added to an issue
- a label is added to a pull request against the `main` branch
### Label-Based Triggers
To define a GitHub label that should trigger Codex, create a file named `.github/codex/labels/LABEL-NAME.md` in your repository where `LABEL-NAME` is the name of the label. The content of the file is the prompt template to use when the label is added (see more on [Prompt Template Variables](#prompt-template-variables) below).
For example, if the file `.github/codex/labels/codex-review.md` exists, then:
- Adding the `codex-review` label will trigger the workflow containing the `openai/codex-action` GitHub Action.
- When `openai/codex-action` starts, it will replace the `codex-review` label with `codex-review-in-progress`.
- When `openai/codex-action` is finished, it will replace the `codex-review-in-progress` label with `codex-review-completed`.
If Codex sees that either `codex-review-in-progress` or `codex-review-completed` is already present, it will not perform the action.
As determined by the [default config](./src/default-label-config.ts), Codex will act on the following labels by default:
- Adding the `codex-review` label to a pull request will have Codex review the PR and add it to the PR as a comment.
- Adding the `codex-triage` label to an issue will have Codex investigate the issue and report its findings as a comment.
- Adding the `codex-issue-fix` label to an issue will have Codex attempt to fix the issue and create a PR wit the fix, if any.
## Action Inputs
The `openai/codex-action` GitHub Action takes the following inputs
### `openai_api_key` (required)
Set your `OPENAI_API_KEY` as a [repository secret](https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions). See **Secrets and varaibles** then **Actions** in the settings for your GitHub repo.
Note that the secret name does not have to be `OPENAI_API_KEY`. For example, you might want to name it `CODEX_OPENAI_API_KEY` and then configure it on `openai/codex-action` as follows:
```yaml
openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }}
```
### `github_token` (required)
This is required so that Codex can post a comment or create a PR. Set this value on the action as follows:
```yaml
github_token: ${{ secrets.GITHUB_TOKEN }}
```
### `codex_args`
A whitespace-delimited list of arguments to pass to Codex. Defaults to `--full-auto`, but if you want to override the default model to use `o3`:
```yaml
codex_args: "--full-auto --model o3"
```
For more complex configurations, use the `codex_home` input.
### `codex_home`
If set, the value to use for the `$CODEX_HOME` environment variable when running Codex. As explained [in the docs](https://github.com/openai/codex/tree/main/codex-rs#readme), this folder can contain the `config.toml` to configure Codex, custom instructions, and log files.
This should be a relative path within your repo.
## Prompt Template Variables
As shown above, `"prompt"` and `"promptPath"` are used to define prompt templates that will be populated and passed to Codex in response to certain events. All template variables are of the form `{CODEX_ACTION_...}` and the supported values are defined below.
### `CODEX_ACTION_ISSUE_TITLE`
If the action was triggered on a GitHub issue, this is the issue title.
Specifically it is read as the `.issue.title` from the `$GITHUB_EVENT_PATH`.
### `CODEX_ACTION_ISSUE_BODY`
If the action was triggered on a GitHub issue, this is the issue body.
Specifically it is read as the `.issue.body` from the `$GITHUB_EVENT_PATH`.
### `CODEX_ACTION_GITHUB_EVENT_PATH`
The value of the `$GITHUB_EVENT_PATH` environment variable, which is the path to the file that contains the JSON payload for the event that triggered the workflow. Codex can use `jq` to read only the fields of interest from this file.
### `CODEX_ACTION_PR_DIFF`
If the action was triggered on a pull request, this is the diff between the base and head commits of the PR. It is the output from `git diff`.
Note that the content of the diff could be quite large, so is generally safer to point Codex at `CODEX_ACTION_GITHUB_EVENT_PATH` and let it decide how it wants to explore the change.

View File

@@ -1,127 +0,0 @@
name: "Codex [reusable action]"
description: "A reusable action that runs a Codex model."
inputs:
openai_api_key:
description: "The value to use as the OPENAI_API_KEY environment variable when running Codex."
required: true
trigger_phrase:
description: "Text to trigger Codex from a PR/issue body or comment."
required: false
default: ""
github_token:
description: "Token so Codex can comment on the PR or issue."
required: true
codex_args:
description: "A whitespace-delimited list of arguments to pass to Codex. Due to limitations in YAML, arguments with spaces are not supported. For more complex configurations, use the `codex_home` input."
required: false
default: "--config hide_agent_reasoning=true --full-auto"
codex_home:
description: "Value to use as the CODEX_HOME environment variable when running Codex."
required: false
codex_release_tag:
description: "The release tag of the Codex model to run, e.g., 'rust-v0.3.0'. Defaults to the latest release."
required: false
default: ""
runs:
using: "composite"
steps:
# Do this in Bash so we do not even bother to install Bun if the sender does
# not have write access to the repo.
- name: Verify user has write access to the repo.
env:
GH_TOKEN: ${{ github.token }}
shell: bash
run: |
set -euo pipefail
PERMISSION=$(gh api \
"/repos/${GITHUB_REPOSITORY}/collaborators/${{ github.event.sender.login }}/permission" \
| jq -r '.permission')
if [[ "$PERMISSION" != "admin" && "$PERMISSION" != "write" ]]; then
exit 1
fi
- name: Download Codex
env:
GH_TOKEN: ${{ github.token }}
shell: bash
run: |
set -euo pipefail
# Determine OS/arch and corresponding Codex artifact name.
uname_s=$(uname -s)
uname_m=$(uname -m)
case "$uname_s" in
Linux*) os="linux" ;;
Darwin*) os="apple-darwin" ;;
*) echo "Unsupported operating system: $uname_s"; exit 1 ;;
esac
case "$uname_m" in
x86_64*) arch="x86_64" ;;
arm64*|aarch64*) arch="aarch64" ;;
*) echo "Unsupported architecture: $uname_m"; exit 1 ;;
esac
# linux builds differentiate between musl and gnu.
if [[ "$os" == "linux" ]]; then
if [[ "$arch" == "x86_64" ]]; then
triple="${arch}-unknown-linux-musl"
else
# Only other supported linux build is aarch64 gnu.
triple="${arch}-unknown-linux-gnu"
fi
else
# macOS
triple="${arch}-apple-darwin"
fi
# Note that if we start baking version numbers into the artifact name,
# we will need to update this action.yml file to match.
artifact="codex-exec-${triple}.tar.gz"
TAG_ARG="${{ inputs.codex_release_tag }}"
# The usage is `gh release download [<tag>] [flags]`, so if TAG_ARG
# is empty, we do not pass it so we can default to the latest release.
gh release download ${TAG_ARG:+$TAG_ARG} --repo openai/codex \
--pattern "$artifact" --output - \
| tar xzO > /usr/local/bin/codex-exec
chmod +x /usr/local/bin/codex-exec
# Display Codex version to confirm binary integrity; ensure we point it
# at the checked-out repository via --cd so that any subsequent commands
# use the correct working directory.
codex-exec --cd "$GITHUB_WORKSPACE" --version
- name: Install Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: 1.2.11
- name: Install dependencies
shell: bash
run: |
cd ${{ github.action_path }}
bun install --production
- name: Run Codex
shell: bash
run: bun run ${{ github.action_path }}/src/main.ts
# Process args plus environment variables often have a max of 128 KiB,
# so we should fit within that limit?
env:
INPUT_CODEX_ARGS: ${{ inputs.codex_args || '' }}
INPUT_CODEX_HOME: ${{ inputs.codex_home || ''}}
INPUT_TRIGGER_PHRASE: ${{ inputs.trigger_phrase || '' }}
OPENAI_API_KEY: ${{ inputs.openai_api_key }}
GITHUB_TOKEN: ${{ inputs.github_token }}
GITHUB_EVENT_ACTION: ${{ github.event.action || '' }}
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name || '' }}
GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number || '' }}
GITHUB_EVENT_ISSUE_BODY: ${{ github.event.issue.body || '' }}
GITHUB_EVENT_REVIEW_BODY: ${{ github.event.review.body || '' }}
GITHUB_EVENT_COMMENT_BODY: ${{ github.event.comment.body || '' }}

View File

@@ -1,91 +0,0 @@
{
"lockfileVersion": 1,
"workspaces": {
"": {
"name": "codex-action",
"dependencies": {
"@actions/core": "^1.11.1",
"@actions/github": "^6.0.1",
},
"devDependencies": {
"@types/bun": "^1.2.19",
"@types/node": "^24.1.0",
"prettier": "^3.6.2",
"typescript": "^5.8.3",
},
},
},
"packages": {
"@actions/core": ["@actions/core@1.11.1", "", { "dependencies": { "@actions/exec": "^1.1.1", "@actions/http-client": "^2.0.1" } }, "sha512-hXJCSrkwfA46Vd9Z3q4cpEpHB1rL5NG04+/rbqW9d3+CSvtB1tYe8UTpAlixa1vj0m/ULglfEK2UKxMGxCxv5A=="],
"@actions/exec": ["@actions/exec@1.1.1", "", { "dependencies": { "@actions/io": "^1.0.1" } }, "sha512-+sCcHHbVdk93a0XT19ECtO/gIXoxvdsgQLzb2fE2/5sIZmWQuluYyjPQtrtTHdU1YzTZ7bAPN4sITq2xi1679w=="],
"@actions/github": ["@actions/github@6.0.1", "", { "dependencies": { "@actions/http-client": "^2.2.0", "@octokit/core": "^5.0.1", "@octokit/plugin-paginate-rest": "^9.2.2", "@octokit/plugin-rest-endpoint-methods": "^10.4.0", "@octokit/request": "^8.4.1", "@octokit/request-error": "^5.1.1", "undici": "^5.28.5" } }, "sha512-xbZVcaqD4XnQAe35qSQqskb3SqIAfRyLBrHMd/8TuL7hJSz2QtbDwnNM8zWx4zO5l2fnGtseNE3MbEvD7BxVMw=="],
"@actions/http-client": ["@actions/http-client@2.2.3", "", { "dependencies": { "tunnel": "^0.0.6", "undici": "^5.25.4" } }, "sha512-mx8hyJi/hjFvbPokCg4uRd4ZX78t+YyRPtnKWwIl+RzNaVuFpQHfmlGVfsKEJN8LwTCvL+DfVgAM04XaHkm6bA=="],
"@actions/io": ["@actions/io@1.1.3", "", {}, "sha512-wi9JjgKLYS7U/z8PPbco+PvTb/nRWjeoFlJ1Qer83k/3C5PHQi28hiVdeE2kHXmIL99mQFawx8qt/JPjZilJ8Q=="],
"@fastify/busboy": ["@fastify/busboy@2.1.1", "", {}, "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA=="],
"@octokit/auth-token": ["@octokit/auth-token@4.0.0", "", {}, "sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA=="],
"@octokit/core": ["@octokit/core@5.2.1", "", { "dependencies": { "@octokit/auth-token": "^4.0.0", "@octokit/graphql": "^7.1.0", "@octokit/request": "^8.4.1", "@octokit/request-error": "^5.1.1", "@octokit/types": "^13.0.0", "before-after-hook": "^2.2.0", "universal-user-agent": "^6.0.0" } }, "sha512-dKYCMuPO1bmrpuogcjQ8z7ICCH3FP6WmxpwC03yjzGfZhj9fTJg6+bS1+UAplekbN2C+M61UNllGOOoAfGCrdQ=="],
"@octokit/endpoint": ["@octokit/endpoint@9.0.6", "", { "dependencies": { "@octokit/types": "^13.1.0", "universal-user-agent": "^6.0.0" } }, "sha512-H1fNTMA57HbkFESSt3Y9+FBICv+0jFceJFPWDePYlR/iMGrwM5ph+Dd4XRQs+8X+PUFURLQgX9ChPfhJ/1uNQw=="],
"@octokit/graphql": ["@octokit/graphql@7.1.1", "", { "dependencies": { "@octokit/request": "^8.4.1", "@octokit/types": "^13.0.0", "universal-user-agent": "^6.0.0" } }, "sha512-3mkDltSfcDUoa176nlGoA32RGjeWjl3K7F/BwHwRMJUW/IteSa4bnSV8p2ThNkcIcZU2umkZWxwETSSCJf2Q7g=="],
"@octokit/openapi-types": ["@octokit/openapi-types@24.2.0", "", {}, "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg=="],
"@octokit/plugin-paginate-rest": ["@octokit/plugin-paginate-rest@9.2.2", "", { "dependencies": { "@octokit/types": "^12.6.0" }, "peerDependencies": { "@octokit/core": "5" } }, "sha512-u3KYkGF7GcZnSD/3UP0S7K5XUFT2FkOQdcfXZGZQPGv3lm4F2Xbf71lvjldr8c1H3nNbF+33cLEkWYbokGWqiQ=="],
"@octokit/plugin-rest-endpoint-methods": ["@octokit/plugin-rest-endpoint-methods@10.4.1", "", { "dependencies": { "@octokit/types": "^12.6.0" }, "peerDependencies": { "@octokit/core": "5" } }, "sha512-xV1b+ceKV9KytQe3zCVqjg+8GTGfDYwaT1ATU5isiUyVtlVAO3HNdzpS4sr4GBx4hxQ46s7ITtZrAsxG22+rVg=="],
"@octokit/request": ["@octokit/request@8.4.1", "", { "dependencies": { "@octokit/endpoint": "^9.0.6", "@octokit/request-error": "^5.1.1", "@octokit/types": "^13.1.0", "universal-user-agent": "^6.0.0" } }, "sha512-qnB2+SY3hkCmBxZsR/MPCybNmbJe4KAlfWErXq+rBKkQJlbjdJeS85VI9r8UqeLYLvnAenU8Q1okM/0MBsAGXw=="],
"@octokit/request-error": ["@octokit/request-error@5.1.1", "", { "dependencies": { "@octokit/types": "^13.1.0", "deprecation": "^2.0.0", "once": "^1.4.0" } }, "sha512-v9iyEQJH6ZntoENr9/yXxjuezh4My67CBSu9r6Ve/05Iu5gNgnisNWOsoJHTP6k0Rr0+HQIpnH+kyammu90q/g=="],
"@octokit/types": ["@octokit/types@13.10.0", "", { "dependencies": { "@octokit/openapi-types": "^24.2.0" } }, "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA=="],
"@types/bun": ["@types/bun@1.2.19", "", { "dependencies": { "bun-types": "1.2.19" } }, "sha512-d9ZCmrH3CJ2uYKXQIUuZ/pUnTqIvLDS0SK7pFmbx8ma+ziH/FRMoAq5bYpRG7y+w1gl+HgyNZbtqgMq4W4e2Lg=="],
"@types/node": ["@types/node@24.1.0", "", { "dependencies": { "undici-types": "~7.8.0" } }, "sha512-ut5FthK5moxFKH2T1CUOC6ctR67rQRvvHdFLCD2Ql6KXmMuCrjsSsRI9UsLCm9M18BMwClv4pn327UvB7eeO1w=="],
"@types/react": ["@types/react@19.1.8", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g=="],
"before-after-hook": ["before-after-hook@2.2.3", "", {}, "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ=="],
"bun-types": ["bun-types@1.2.19", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-uAOTaZSPuYsWIXRpj7o56Let0g/wjihKCkeRqUBhlLVM/Bt+Fj9xTo+LhC1OV1XDaGkz4hNC80et5xgy+9KTHQ=="],
"csstype": ["csstype@3.1.3", "", {}, "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw=="],
"deprecation": ["deprecation@2.3.1", "", {}, "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ=="],
"once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="],
"prettier": ["prettier@3.6.2", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ=="],
"tunnel": ["tunnel@0.0.6", "", {}, "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg=="],
"typescript": ["typescript@5.8.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ=="],
"undici": ["undici@5.29.0", "", { "dependencies": { "@fastify/busboy": "^2.0.0" } }, "sha512-raqeBD6NQK4SkWhQzeYKd1KmIG6dllBOTt55Rmkt4HtI9mwdWtJljnrXjAFUBLTSN67HWrOIZ3EPF4kjUw80Bg=="],
"undici-types": ["undici-types@7.8.0", "", {}, "sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw=="],
"universal-user-agent": ["universal-user-agent@6.0.1", "", {}, "sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ=="],
"wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="],
"@octokit/plugin-paginate-rest/@octokit/types": ["@octokit/types@12.6.0", "", { "dependencies": { "@octokit/openapi-types": "^20.0.0" } }, "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw=="],
"@octokit/plugin-rest-endpoint-methods/@octokit/types": ["@octokit/types@12.6.0", "", { "dependencies": { "@octokit/openapi-types": "^20.0.0" } }, "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw=="],
"bun-types/@types/node": ["@types/node@24.0.13", "", { "dependencies": { "undici-types": "~7.8.0" } }, "sha512-Qm9OYVOFHFYg3wJoTSrz80hoec5Lia/dPp84do3X7dZvLikQvM1YpmvTBEdIr/e+U8HTkFjLHLnl78K/qjf+jQ=="],
"@octokit/plugin-paginate-rest/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@20.0.0", "", {}, "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA=="],
"@octokit/plugin-rest-endpoint-methods/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@20.0.0", "", {}, "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA=="],
}
}

View File

@@ -1,21 +0,0 @@
{
"name": "codex-action",
"version": "0.0.0",
"private": true,
"scripts": {
"format": "prettier --check src",
"format:fix": "prettier --write src",
"test": "bun test",
"typecheck": "tsc"
},
"dependencies": {
"@actions/core": "^1.11.1",
"@actions/github": "^6.0.1"
},
"devDependencies": {
"@types/bun": "^1.2.19",
"@types/node": "^24.1.0",
"prettier": "^3.6.2",
"typescript": "^5.8.3"
}
}

View File

@@ -1,85 +0,0 @@
import * as github from "@actions/github";
import type { EnvContext } from "./env-context";
/**
* Add an "eyes" reaction to the entity (issue, issue comment, or pull request
* review comment) that triggered the current Codex invocation.
*
* The purpose is to provide immediate feedback to the user similar to the
* *-in-progress label flow indicating that the bot has acknowledged the
* request and is working on it.
*
* We attempt to add the reaction best suited for the current GitHub event:
*
* • issues → POST /repos/{owner}/{repo}/issues/{issue_number}/reactions
* • issue_comment → POST /repos/{owner}/{repo}/issues/comments/{comment_id}/reactions
* • pull_request_review_comment → POST /repos/{owner}/{repo}/pulls/comments/{comment_id}/reactions
*
* If the specific target is unavailable (e.g. unexpected payload shape) we
* silently skip instead of failing the whole action because the reaction is
* merely cosmetic.
*/
export async function addEyesReaction(ctx: EnvContext): Promise<void> {
const octokit = ctx.getOctokit();
const { owner, repo } = github.context.repo;
const eventName = github.context.eventName;
try {
switch (eventName) {
case "issue_comment": {
const commentId = (github.context.payload as any)?.comment?.id;
if (commentId) {
await octokit.rest.reactions.createForIssueComment({
owner,
repo,
comment_id: commentId,
content: "eyes",
});
return;
}
break;
}
case "pull_request_review_comment": {
const commentId = (github.context.payload as any)?.comment?.id;
if (commentId) {
await octokit.rest.reactions.createForPullRequestReviewComment({
owner,
repo,
comment_id: commentId,
content: "eyes",
});
return;
}
break;
}
case "issues": {
const issueNumber = github.context.issue.number;
if (issueNumber) {
await octokit.rest.reactions.createForIssue({
owner,
repo,
issue_number: issueNumber,
content: "eyes",
});
return;
}
break;
}
default: {
// Fallback: try to react to the issue/PR if we have a number.
const issueNumber = github.context.issue.number;
if (issueNumber) {
await octokit.rest.reactions.createForIssue({
owner,
repo,
issue_number: issueNumber,
content: "eyes",
});
}
}
}
} catch (error) {
// Do not fail the action if reaction creation fails log and continue.
console.warn(`Failed to add \"eyes\" reaction: ${error}`);
}
}

View File

@@ -1,53 +0,0 @@
import type { EnvContext } from "./env-context";
import { runCodex } from "./run-codex";
import { postComment } from "./post-comment";
import { addEyesReaction } from "./add-reaction";
/**
* Handle `issue_comment` and `pull_request_review_comment` events once we know
* the action is supported.
*/
export async function onComment(ctx: EnvContext): Promise<void> {
const triggerPhrase = ctx.tryGet("INPUT_TRIGGER_PHRASE");
if (!triggerPhrase) {
console.warn("Empty trigger phrase: skipping.");
return;
}
// Attempt to get the body of the comment from the environment. Depending on
// the event type either `GITHUB_EVENT_COMMENT_BODY` (issue & PR comments) or
// `GITHUB_EVENT_REVIEW_BODY` (PR reviews) is set.
const commentBody =
ctx.tryGetNonEmpty("GITHUB_EVENT_COMMENT_BODY") ??
ctx.tryGetNonEmpty("GITHUB_EVENT_REVIEW_BODY") ??
ctx.tryGetNonEmpty("GITHUB_EVENT_ISSUE_BODY");
if (!commentBody) {
console.warn("Comment body not found in environment: skipping.");
return;
}
// Check if the trigger phrase is present.
if (!commentBody.includes(triggerPhrase)) {
console.log(
`Trigger phrase '${triggerPhrase}' not found: nothing to do for this comment.`,
);
return;
}
// Derive the prompt by removing the trigger phrase. Remove only the first
// occurrence to keep any additional occurrences that might be meaningful.
const prompt = commentBody.replace(triggerPhrase, "").trim();
if (prompt.length === 0) {
console.warn("Prompt is empty after removing trigger phrase: skipping");
return;
}
// Provide immediate feedback that we are working on the request.
await addEyesReaction(ctx);
// Run Codex and post the response as a new comment.
const lastMessage = await runCodex(prompt, ctx);
await postComment(lastMessage, ctx);
}

View File

@@ -1,11 +0,0 @@
import { readdirSync, statSync } from "fs";
import * as path from "path";
export interface Config {
labels: Record<string, LabelConfig>;
}
export interface LabelConfig {
/** Returns the prompt template. */
getPromptTemplate(): string;
}

View File

@@ -1,44 +0,0 @@
import type { Config } from "./config";
export function getDefaultConfig(): Config {
return {
labels: {
"codex-investigate-issue": {
getPromptTemplate: () =>
`
Troubleshoot whether the reported issue is valid.
Provide a concise and respectful comment summarizing the findings.
### {CODEX_ACTION_ISSUE_TITLE}
{CODEX_ACTION_ISSUE_BODY}
`.trim(),
},
"codex-code-review": {
getPromptTemplate: () =>
`
Review this PR and respond with a very concise final message, formatted in Markdown.
There should be a summary of the changes (1-2 sentences) and a few bullet points if necessary.
Then provide the **review** (1-2 sentences plus bullet points, friendly tone).
{CODEX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the \`base\` and \`head\` refs that define this PR. Both refs are available locally.
`.trim(),
},
"codex-attempt-fix": {
getPromptTemplate: () =>
`
Attempt to solve the reported issue.
If a code change is required, create a new branch, commit the fix, and open a pull-request that resolves the problem.
### {CODEX_ACTION_ISSUE_TITLE}
{CODEX_ACTION_ISSUE_BODY}
`.trim(),
},
},
};
}

View File

@@ -1,116 +0,0 @@
/*
* Centralised access to environment variables used by the Codex GitHub
* Action.
*
* To enable proper unit-testing we avoid reading from `process.env` at module
* initialisation time. Instead a `EnvContext` object is created (usually from
* the real `process.env`) and passed around explicitly or where that is not
* yet practical imported as the shared `defaultContext` singleton. Tests can
* create their own context backed by a stubbed map of variables without having
* to mutate global state.
*/
import { fail } from "./fail";
import * as github from "@actions/github";
export interface EnvContext {
/**
* Return the value for a given environment variable or terminate the action
* via `fail` if it is missing / empty.
*/
get(name: string): string;
/**
* Attempt to read an environment variable. Returns the value when present;
* otherwise returns undefined (does not call `fail`).
*/
tryGet(name: string): string | undefined;
/**
* Attempt to read an environment variable. Returns non-empty string value or
* null if unset or empty string.
*/
tryGetNonEmpty(name: string): string | null;
/**
* Return a memoised Octokit instance authenticated via the token resolved
* from the provided argument (when defined) or the environment variables
* `GITHUB_TOKEN`/`GH_TOKEN`.
*
* Subsequent calls return the same cached instance to avoid spawning
* multiple REST clients within a single action run.
*/
getOctokit(token?: string): ReturnType<typeof github.getOctokit>;
}
/** Internal helper *not* exported. */
function _getRequiredEnv(
name: string,
env: Record<string, string | undefined>,
): string | undefined {
const value = env[name];
// Avoid leaking secrets into logs while still logging non-secret variables.
if (name.endsWith("KEY") || name.endsWith("TOKEN")) {
if (value) {
console.log(`value for ${name} was found`);
}
} else {
console.log(`${name}=${value}`);
}
return value;
}
/** Create a context backed by the supplied environment map (defaults to `process.env`). */
export function createEnvContext(
env: Record<string, string | undefined> = process.env,
): EnvContext {
// Lazily instantiated Octokit client shared across this context.
let cachedOctokit: ReturnType<typeof github.getOctokit> | null = null;
return {
get(name: string): string {
const value = _getRequiredEnv(name, env);
if (value == null) {
fail(`Missing required environment variable: ${name}`);
}
return value;
},
tryGet(name: string): string | undefined {
return _getRequiredEnv(name, env);
},
tryGetNonEmpty(name: string): string | null {
const value = _getRequiredEnv(name, env);
return value == null || value === "" ? null : value;
},
getOctokit(token?: string) {
if (cachedOctokit) {
return cachedOctokit;
}
// Determine the token to authenticate with.
const githubToken = token ?? env["GITHUB_TOKEN"] ?? env["GH_TOKEN"];
if (!githubToken) {
fail(
"Unable to locate a GitHub token. `github_token` should have been set on the action.",
);
}
cachedOctokit = github.getOctokit(githubToken!);
return cachedOctokit;
},
};
}
/**
* Shared context built from the actual `process.env`. Production code that is
* not yet refactored to receive a context explicitly may import and use this
* singleton. Tests should avoid the singleton and instead pass their own
* context to the functions they exercise.
*/
export const defaultContext: EnvContext = createEnvContext();

View File

@@ -1,4 +0,0 @@
export function fail(message: string): never {
console.error(message);
process.exit(1);
}

View File

@@ -1,149 +0,0 @@
import { spawnSync } from "child_process";
import * as github from "@actions/github";
import { EnvContext } from "./env-context";
function runGit(args: string[], silent = true): string {
console.info(`Running git ${args.join(" ")}`);
const res = spawnSync("git", args, {
encoding: "utf8",
stdio: silent ? ["ignore", "pipe", "pipe"] : "inherit",
});
if (res.error) {
throw res.error;
}
if (res.status !== 0) {
// Return stderr so caller may handle; else throw.
throw new Error(
`git ${args.join(" ")} failed with code ${res.status}: ${res.stderr}`,
);
}
return res.stdout.trim();
}
function stageAllChanges() {
runGit(["add", "-A"]);
}
function hasStagedChanges(): boolean {
const res = spawnSync("git", ["diff", "--cached", "--quiet", "--exit-code"]);
return res.status !== 0;
}
function ensureOnBranch(
issueNumber: number,
protectedBranches: string[],
suggestedSlug?: string,
): string {
let branch = "";
try {
branch = runGit(["symbolic-ref", "--short", "-q", "HEAD"]);
} catch {
branch = "";
}
// If detached HEAD or on a protected branch, create a new branch.
if (!branch || protectedBranches.includes(branch)) {
if (suggestedSlug) {
const safeSlug = suggestedSlug
.toLowerCase()
.replace(/[^\w\s-]/g, "")
.trim()
.replace(/\s+/g, "-");
branch = `codex-fix-${issueNumber}-${safeSlug}`;
} else {
branch = `codex-fix-${issueNumber}-${Date.now()}`;
}
runGit(["switch", "-c", branch]);
}
return branch;
}
function commitIfNeeded(issueNumber: number) {
if (hasStagedChanges()) {
runGit([
"commit",
"-m",
`fix: automated fix for #${issueNumber} via Codex`,
]);
}
}
function pushBranch(branch: string, githubToken: string, ctx: EnvContext) {
const repoSlug = ctx.get("GITHUB_REPOSITORY"); // owner/repo
const remoteUrl = `https://x-access-token:${githubToken}@github.com/${repoSlug}.git`;
runGit(["push", "--force-with-lease", "-u", remoteUrl, `HEAD:${branch}`]);
}
/**
* If this returns a string, it is the URL of the created PR.
*/
export async function maybePublishPRForIssue(
issueNumber: number,
lastMessage: string,
ctx: EnvContext,
): Promise<string | undefined> {
// Only proceed if GITHUB_TOKEN available.
const githubToken =
ctx.tryGetNonEmpty("GITHUB_TOKEN") ?? ctx.tryGetNonEmpty("GH_TOKEN");
if (!githubToken) {
console.warn("No GitHub token - skipping PR creation.");
return undefined;
}
// Print `git status` for debugging.
runGit(["status"]);
// Stage any remaining changes so they can be committed and pushed.
stageAllChanges();
const octokit = ctx.getOctokit(githubToken);
const { owner, repo } = github.context.repo;
// Determine default branch to treat as protected.
let defaultBranch = "main";
try {
const repoInfo = await octokit.rest.repos.get({ owner, repo });
defaultBranch = repoInfo.data.default_branch ?? "main";
} catch (e) {
console.warn(`Failed to get default branch, assuming 'main': ${e}`);
}
const sanitizedMessage = lastMessage.replace(/\u2022/g, "-");
const [summaryLine] = sanitizedMessage.split(/\r?\n/);
const branch = ensureOnBranch(issueNumber, [defaultBranch, "master"], summaryLine);
commitIfNeeded(issueNumber);
pushBranch(branch, githubToken, ctx);
// Try to find existing PR for this branch
const headParam = `${owner}:${branch}`;
const existing = await octokit.rest.pulls.list({
owner,
repo,
head: headParam,
state: "open",
});
if (existing.data.length > 0) {
return existing.data[0].html_url;
}
// Determine base branch (default to main)
let baseBranch = "main";
try {
const repoInfo = await octokit.rest.repos.get({ owner, repo });
baseBranch = repoInfo.data.default_branch ?? "main";
} catch (e) {
console.warn(`Failed to get default branch, assuming 'main': ${e}`);
}
const pr = await octokit.rest.pulls.create({
owner,
repo,
title: summaryLine,
head: branch,
base: baseBranch,
body: sanitizedMessage,
});
return pr.data.html_url;
}

View File

@@ -1,16 +0,0 @@
export function setGitHubActionsUser(): void {
const commands = [
["git", "config", "--global", "user.name", "github-actions[bot]"],
[
"git",
"config",
"--global",
"user.email",
"41898282+github-actions[bot]@users.noreply.github.com",
],
];
for (const command of commands) {
Bun.spawnSync(command);
}
}

View File

@@ -1,11 +0,0 @@
import * as pathMod from "path";
import { EnvContext } from "./env-context";
export function resolveWorkspacePath(path: string, ctx: EnvContext): string {
if (pathMod.isAbsolute(path)) {
return path;
} else {
const workspace = ctx.get("GITHUB_WORKSPACE");
return pathMod.join(workspace, path);
}
}

View File

@@ -1,56 +0,0 @@
import type { Config, LabelConfig } from "./config";
import { getDefaultConfig } from "./default-label-config";
import { readFileSync, readdirSync, statSync } from "fs";
import * as path from "path";
/**
* Build an in-memory configuration object by scanning the repository for
* Markdown templates located in `.github/codex/labels`.
*
* Each `*.md` file in that directory represents a label that can trigger the
* Codex GitHub Action. The filename **without** the extension is interpreted
* as the label name, e.g. `codex-review.md` ➜ `codex-review`.
*
* For every such label we derive the corresponding `doneLabel` by appending
* the suffix `-completed`.
*/
export function loadConfig(workspace: string): Config {
const labelsDir = path.join(workspace, ".github", "codex", "labels");
let entries: string[];
try {
entries = readdirSync(labelsDir);
} catch {
// If the directory is missing, return the default configuration.
return getDefaultConfig();
}
const labels: Record<string, LabelConfig> = {};
for (const entry of entries) {
if (!entry.endsWith(".md")) {
continue;
}
const fullPath = path.join(labelsDir, entry);
if (!statSync(fullPath).isFile()) {
continue;
}
const labelName = entry.slice(0, -3); // trim ".md"
labels[labelName] = new FileLabelConfig(fullPath);
}
return { labels };
}
class FileLabelConfig implements LabelConfig {
constructor(private readonly promptPath: string) {}
getPromptTemplate(): string {
return readFileSync(this.promptPath, "utf8");
}
}

View File

@@ -1,80 +0,0 @@
#!/usr/bin/env bun
import type { Config } from "./config";
import { defaultContext, EnvContext } from "./env-context";
import { loadConfig } from "./load-config";
import { setGitHubActionsUser } from "./git-user";
import { onLabeled } from "./process-label";
import { ensureBaseAndHeadCommitsForPRAreAvailable } from "./prompt-template";
import { performAdditionalValidation } from "./verify-inputs";
import { onComment } from "./comment";
import { onReview } from "./review";
async function main(): Promise<void> {
const ctx: EnvContext = defaultContext;
// Build the configuration dynamically by scanning `.github/codex/labels`.
const GITHUB_WORKSPACE = ctx.get("GITHUB_WORKSPACE");
const config: Config = loadConfig(GITHUB_WORKSPACE);
// Optionally perform additional validation of prompt template files.
performAdditionalValidation(config, GITHUB_WORKSPACE);
const GITHUB_EVENT_NAME = ctx.get("GITHUB_EVENT_NAME");
const GITHUB_EVENT_ACTION = ctx.get("GITHUB_EVENT_ACTION");
// Set user.name and user.email to a bot before Codex runs, just in case it
// creates a commit.
setGitHubActionsUser();
switch (GITHUB_EVENT_NAME) {
case "issues": {
if (GITHUB_EVENT_ACTION === "labeled") {
await onLabeled(config, ctx);
return;
} else if (GITHUB_EVENT_ACTION === "opened") {
await onComment(ctx);
return;
}
break;
}
case "issue_comment": {
if (GITHUB_EVENT_ACTION === "created") {
await onComment(ctx);
return;
}
break;
}
case "pull_request": {
if (GITHUB_EVENT_ACTION === "labeled") {
await ensureBaseAndHeadCommitsForPRAreAvailable(ctx);
await onLabeled(config, ctx);
return;
}
break;
}
case "pull_request_review": {
await ensureBaseAndHeadCommitsForPRAreAvailable(ctx);
if (GITHUB_EVENT_ACTION === "submitted") {
await onReview(ctx);
return;
}
break;
}
case "pull_request_review_comment": {
await ensureBaseAndHeadCommitsForPRAreAvailable(ctx);
if (GITHUB_EVENT_ACTION === "created") {
await onComment(ctx);
return;
}
break;
}
}
console.warn(
`Unsupported action '${GITHUB_EVENT_ACTION}' for event '${GITHUB_EVENT_NAME}'.`,
);
}
main();

View File

@@ -1,62 +0,0 @@
import { fail } from "./fail";
import * as github from "@actions/github";
import { EnvContext } from "./env-context";
/**
* Post a comment to the issue / pull request currently in scope.
*
* Provide the environment context so that token lookup (inside getOctokit) does
* not rely on global state.
*/
export async function postComment(
commentBody: string,
ctx: EnvContext,
): Promise<void> {
// Append a footer with a link back to the workflow run, if available.
const footer = buildWorkflowRunFooter(ctx);
const bodyWithFooter = footer ? `${commentBody}${footer}` : commentBody;
const octokit = ctx.getOctokit();
console.info("Got Octokit instance for posting comment");
const { owner, repo } = github.context.repo;
const issueNumber = github.context.issue.number;
if (!issueNumber) {
console.warn(
"No issue or pull_request number found in GitHub context; skipping comment creation.",
);
return;
}
try {
console.info("Calling octokit.rest.issues.createComment()");
await octokit.rest.issues.createComment({
owner,
repo,
issue_number: issueNumber,
body: bodyWithFooter,
});
} catch (error) {
fail(`Failed to create comment via GitHub API: ${error}`);
}
}
/**
* Helper to build a Markdown fragment linking back to the workflow run that
* generated the current comment. Returns `undefined` if required environment
* variables are missing e.g. when running outside of GitHub Actions so we
* can gracefully skip the footer in those cases.
*/
function buildWorkflowRunFooter(ctx: EnvContext): string | undefined {
const serverUrl =
ctx.tryGetNonEmpty("GITHUB_SERVER_URL") ?? "https://github.com";
const repository = ctx.tryGetNonEmpty("GITHUB_REPOSITORY");
const runId = ctx.tryGetNonEmpty("GITHUB_RUN_ID");
if (!repository || !runId) {
return undefined;
}
const url = `${serverUrl}/${repository}/actions/runs/${runId}`;
return `\n\n---\n*[_View workflow run_](${url})*`;
}

View File

@@ -1,195 +0,0 @@
import { fail } from "./fail";
import { EnvContext } from "./env-context";
import { renderPromptTemplate } from "./prompt-template";
import { postComment } from "./post-comment";
import { runCodex } from "./run-codex";
import * as github from "@actions/github";
import { Config, LabelConfig } from "./config";
import { maybePublishPRForIssue } from "./git-helpers";
export async function onLabeled(
config: Config,
ctx: EnvContext,
): Promise<void> {
const GITHUB_EVENT_LABEL_NAME = ctx.get("GITHUB_EVENT_LABEL_NAME");
const labelConfig = config.labels[GITHUB_EVENT_LABEL_NAME] as
| LabelConfig
| undefined;
if (!labelConfig) {
fail(
`Label \`${GITHUB_EVENT_LABEL_NAME}\` not found in config: ${JSON.stringify(config)}`,
);
}
await processLabelConfig(ctx, GITHUB_EVENT_LABEL_NAME, labelConfig);
}
/**
* Wrapper that handles `-in-progress` and `-completed` semantics around the core lint/fix/review
* processing. It will:
*
* - Skip execution if the `-in-progress` or `-completed` label is already present.
* - Mark the PR/issue as `-in-progress`.
* - After successful execution, mark the PR/issue as `-completed`.
*/
async function processLabelConfig(
ctx: EnvContext,
label: string,
labelConfig: LabelConfig,
): Promise<void> {
const octokit = ctx.getOctokit();
const { owner, repo, issueNumber, labelNames } =
await getCurrentLabels(octokit);
const inProgressLabel = `${label}-in-progress`;
const completedLabel = `${label}-completed`;
for (const markerLabel of [inProgressLabel, completedLabel]) {
if (labelNames.includes(markerLabel)) {
console.log(
`Label '${markerLabel}' already present on issue/PR #${issueNumber}. Skipping Codex action.`,
);
// Clean up: remove the triggering label to avoid confusion and re-runs.
await addAndRemoveLabels(octokit, {
owner,
repo,
issueNumber,
remove: markerLabel,
});
return;
}
}
// Mark the PR/issue as in progress.
await addAndRemoveLabels(octokit, {
owner,
repo,
issueNumber,
add: inProgressLabel,
remove: label,
});
// Run the core Codex processing.
await processLabel(ctx, label, labelConfig);
// Mark the PR/issue as completed.
await addAndRemoveLabels(octokit, {
owner,
repo,
issueNumber,
add: completedLabel,
remove: inProgressLabel,
});
}
async function processLabel(
ctx: EnvContext,
label: string,
labelConfig: LabelConfig,
): Promise<void> {
const template = labelConfig.getPromptTemplate();
const populatedTemplate = await renderPromptTemplate(template, ctx);
// Always run Codex and post the resulting message as a comment.
let commentBody = await runCodex(populatedTemplate, ctx);
// Current heuristic: only try to create a PR if "attempt" or "fix" is in the
// label name. (Yes, we plan to evolve this.)
if (label.indexOf("fix") !== -1 || label.indexOf("attempt") !== -1) {
console.info(`label ${label} indicates we should attempt to create a PR`);
const prUrl = await maybeFixIssue(ctx, commentBody);
if (prUrl) {
commentBody += `\n\n---\nOpened pull request: ${prUrl}`;
}
} else {
console.info(
`label ${label} does not indicate we should attempt to create a PR`,
);
}
await postComment(commentBody, ctx);
}
async function maybeFixIssue(
ctx: EnvContext,
lastMessage: string,
): Promise<string | undefined> {
// Attempt to create a PR out of any changes Codex produced.
const issueNumber = github.context.issue.number!; // exists for issues triggering this path
try {
return await maybePublishPRForIssue(issueNumber, lastMessage, ctx);
} catch (e) {
console.warn(`Failed to publish PR: ${e}`);
}
}
async function getCurrentLabels(
octokit: ReturnType<typeof github.getOctokit>,
): Promise<{
owner: string;
repo: string;
issueNumber: number;
labelNames: Array<string>;
}> {
const { owner, repo } = github.context.repo;
const issueNumber = github.context.issue.number;
if (!issueNumber) {
fail("No issue or pull_request number found in GitHub context.");
}
const { data: issueData } = await octokit.rest.issues.get({
owner,
repo,
issue_number: issueNumber,
});
const labelNames =
issueData.labels?.map((label: any) =>
typeof label === "string" ? label : label.name,
) ?? [];
return { owner, repo, issueNumber, labelNames };
}
async function addAndRemoveLabels(
octokit: ReturnType<typeof github.getOctokit>,
opts: {
owner: string;
repo: string;
issueNumber: number;
add?: string;
remove?: string;
},
): Promise<void> {
const { owner, repo, issueNumber, add, remove } = opts;
if (add) {
try {
await octokit.rest.issues.addLabels({
owner,
repo,
issue_number: issueNumber,
labels: [add],
});
} catch (error) {
console.warn(`Failed to add label '${add}': ${error}`);
}
}
if (remove) {
try {
await octokit.rest.issues.removeLabel({
owner,
repo,
issue_number: issueNumber,
name: remove,
});
} catch (error) {
console.warn(`Failed to remove label '${remove}': ${error}`);
}
}
}

View File

@@ -1,284 +0,0 @@
/*
* Utilities to render Codex prompt templates.
*
* A template is a Markdown (or plain-text) file that may contain one or more
* placeholders of the form `{CODEX_ACTION_<NAME>}`. At runtime these
* placeholders are substituted with dynamically generated content. Each
* placeholder is resolved **exactly once** even if it appears multiple times
* in the same template.
*/
import { readFile } from "fs/promises";
import { EnvContext } from "./env-context";
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
/**
* Lazily caches parsed `$GITHUB_EVENT_PATH` contents keyed by the file path so
* we only hit the filesystem once per unique event payload.
*/
const githubEventDataCache: Map<string, Promise<any>> = new Map();
function getGitHubEventData(ctx: EnvContext): Promise<any> {
const eventPath = ctx.get("GITHUB_EVENT_PATH");
let cached = githubEventDataCache.get(eventPath);
if (!cached) {
cached = readFile(eventPath, "utf8").then((raw) => JSON.parse(raw));
githubEventDataCache.set(eventPath, cached);
}
return cached;
}
async function runCommand(args: Array<string>): Promise<string> {
const result = Bun.spawnSync(args, {
stdout: "pipe",
stderr: "pipe",
});
if (result.success) {
return result.stdout.toString();
}
console.error(`Error running ${JSON.stringify(args)}: ${result.stderr}`);
return "";
}
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
// Regex that captures the variable name without the surrounding { } braces.
const VAR_REGEX = /\{(CODEX_ACTION_[A-Z0-9_]+)\}/g;
// Cache individual placeholder values so each one is resolved at most once per
// process even if many templates reference it.
const placeholderCache: Map<string, Promise<string>> = new Map();
/**
* Parse a template string, resolve all placeholders and return the rendered
* result.
*/
export async function renderPromptTemplate(
template: string,
ctx: EnvContext,
): Promise<string> {
// ---------------------------------------------------------------------
// 1) Gather all *unique* placeholders present in the template.
// ---------------------------------------------------------------------
const variables = new Set<string>();
for (const match of template.matchAll(VAR_REGEX)) {
variables.add(match[1]);
}
// ---------------------------------------------------------------------
// 2) Kick off (or reuse) async resolution for each variable.
// ---------------------------------------------------------------------
for (const variable of variables) {
if (!placeholderCache.has(variable)) {
placeholderCache.set(variable, resolveVariable(variable, ctx));
}
}
// ---------------------------------------------------------------------
// 3) Await completion so we can perform a simple synchronous replace below.
// ---------------------------------------------------------------------
const resolvedEntries: [string, string][] = [];
for (const [key, promise] of placeholderCache.entries()) {
resolvedEntries.push([key, await promise]);
}
const resolvedMap = new Map<string, string>(resolvedEntries);
// ---------------------------------------------------------------------
// 4) Replace each occurrence. We use replace with a callback to ensure
// correct substitution even if variable names overlap (they shouldn't,
// but better safe than sorry).
// ---------------------------------------------------------------------
return template.replace(VAR_REGEX, (_, varName: string) => {
return resolvedMap.get(varName) ?? "";
});
}
export async function ensureBaseAndHeadCommitsForPRAreAvailable(
ctx: EnvContext,
): Promise<{ baseSha: string; headSha: string } | null> {
const prShas = await getPrShas(ctx);
if (prShas == null) {
console.warn("Unable to resolve PR branches");
return null;
}
const event = await getGitHubEventData(ctx);
const pr = event.pull_request;
if (!pr) {
console.warn("event.pull_request is not defined - unexpected");
return null;
}
const workspace = ctx.get("GITHUB_WORKSPACE");
// Refs (branch names)
const baseRef: string | undefined = pr.base?.ref;
const headRef: string | undefined = pr.head?.ref;
// Clone URLs
const baseRemoteUrl: string | undefined = pr.base?.repo?.clone_url;
const headRemoteUrl: string | undefined = pr.head?.repo?.clone_url;
if (!baseRef || !headRef || !baseRemoteUrl || !headRemoteUrl) {
console.warn(
"Missing PR ref or remote URL information - cannot fetch commits",
);
return null;
}
// Ensure we have the base branch.
await runCommand([
"git",
"-C",
workspace,
"fetch",
"--no-tags",
"origin",
baseRef,
]);
// Ensure we have the head branch.
if (headRemoteUrl === baseRemoteUrl) {
// Same repository the commit is available from `origin`.
await runCommand([
"git",
"-C",
workspace,
"fetch",
"--no-tags",
"origin",
headRef,
]);
} else {
// Fork make sure a `pr` remote exists that points at the fork. Attempting
// to add a remote that already exists causes git to error, so we swallow
// any non-zero exit codes from that specific command.
await runCommand([
"git",
"-C",
workspace,
"remote",
"add",
"pr",
headRemoteUrl,
]);
// Whether adding succeeded or the remote already existed, attempt to fetch
// the head ref from the `pr` remote.
await runCommand([
"git",
"-C",
workspace,
"fetch",
"--no-tags",
"pr",
headRef,
]);
}
return prShas;
}
// ---------------------------------------------------------------------------
// Internal helpers still exported for use by other modules.
// ---------------------------------------------------------------------------
export async function resolvePrDiff(ctx: EnvContext): Promise<string> {
const prShas = await ensureBaseAndHeadCommitsForPRAreAvailable(ctx);
if (prShas == null) {
console.warn("Unable to resolve PR branches");
return "";
}
const workspace = ctx.get("GITHUB_WORKSPACE");
const { baseSha, headSha } = prShas;
return runCommand([
"git",
"-C",
workspace,
"diff",
"--color=never",
`${baseSha}..${headSha}`,
]);
}
// ---------------------------------------------------------------------------
// Placeholder resolution
// ---------------------------------------------------------------------------
async function resolveVariable(name: string, ctx: EnvContext): Promise<string> {
switch (name) {
case "CODEX_ACTION_ISSUE_TITLE": {
const event = await getGitHubEventData(ctx);
const issue = event.issue ?? event.pull_request;
return issue?.title ?? "";
}
case "CODEX_ACTION_ISSUE_BODY": {
const event = await getGitHubEventData(ctx);
const issue = event.issue ?? event.pull_request;
return issue?.body ?? "";
}
case "CODEX_ACTION_GITHUB_EVENT_PATH": {
return ctx.get("GITHUB_EVENT_PATH");
}
case "CODEX_ACTION_BASE_REF": {
const event = await getGitHubEventData(ctx);
return event?.pull_request?.base?.ref ?? "";
}
case "CODEX_ACTION_HEAD_REF": {
const event = await getGitHubEventData(ctx);
return event?.pull_request?.head?.ref ?? "";
}
case "CODEX_ACTION_PR_DIFF": {
return resolvePrDiff(ctx);
}
// -------------------------------------------------------------------
// Add new template variables here.
// -------------------------------------------------------------------
default: {
// Unknown variable leave it blank to avoid leaking placeholders to the
// final prompt. The alternative would be to `fail()` here, but silently
// ignoring unknown placeholders is more forgiving and better matches the
// behaviour of typical template engines.
console.warn(`Unknown template variable: ${name}`);
return "";
}
}
}
async function getPrShas(
ctx: EnvContext,
): Promise<{ baseSha: string; headSha: string } | null> {
const event = await getGitHubEventData(ctx);
const pr = event.pull_request;
if (!pr) {
console.warn("event.pull_request is not defined");
return null;
}
// Prefer explicit SHAs if available to avoid relying on local branch names.
const baseSha: string | undefined = pr.base?.sha;
const headSha: string | undefined = pr.head?.sha;
if (!baseSha || !headSha) {
console.warn("one of base or head is not defined on event.pull_request");
return null;
}
return { baseSha, headSha };
}

View File

@@ -1,42 +0,0 @@
import type { EnvContext } from "./env-context";
import { runCodex } from "./run-codex";
import { postComment } from "./post-comment";
import { addEyesReaction } from "./add-reaction";
/**
* Handle `pull_request_review` events. We treat the review body the same way
* as a normal comment.
*/
export async function onReview(ctx: EnvContext): Promise<void> {
const triggerPhrase = ctx.tryGet("INPUT_TRIGGER_PHRASE");
if (!triggerPhrase) {
console.warn("Empty trigger phrase: skipping.");
return;
}
const reviewBody = ctx.tryGet("GITHUB_EVENT_REVIEW_BODY");
if (!reviewBody) {
console.warn("Review body not found in environment: skipping.");
return;
}
if (!reviewBody.includes(triggerPhrase)) {
console.log(
`Trigger phrase '${triggerPhrase}' not found: nothing to do for this review.`,
);
return;
}
const prompt = reviewBody.replace(triggerPhrase, "").trim();
if (prompt.length === 0) {
console.warn("Prompt is empty after removing trigger phrase: skipping.");
return;
}
await addEyesReaction(ctx);
const lastMessage = await runCodex(prompt, ctx);
await postComment(lastMessage, ctx);
}

View File

@@ -1,56 +0,0 @@
import { fail } from "./fail";
import { EnvContext } from "./env-context";
import { tmpdir } from "os";
import { join } from "node:path";
import { readFile, mkdtemp } from "fs/promises";
import { resolveWorkspacePath } from "./github-workspace";
/**
* Runs the Codex CLI with the provided prompt and returns the output written
* to the "last message" file.
*/
export async function runCodex(
prompt: string,
ctx: EnvContext,
): Promise<string> {
const OPENAI_API_KEY = ctx.get("OPENAI_API_KEY");
const tempDirPath = await mkdtemp(join(tmpdir(), "codex-"));
const lastMessageOutput = join(tempDirPath, "codex-prompt.md");
const args = ["/usr/local/bin/codex-exec"];
const inputCodexArgs = ctx.tryGet("INPUT_CODEX_ARGS")?.trim();
if (inputCodexArgs) {
args.push(...inputCodexArgs.split(/\s+/));
}
args.push("--output-last-message", lastMessageOutput, prompt);
const env: Record<string, string> = { ...process.env, OPENAI_API_KEY };
const INPUT_CODEX_HOME = ctx.tryGet("INPUT_CODEX_HOME");
if (INPUT_CODEX_HOME) {
env.CODEX_HOME = resolveWorkspacePath(INPUT_CODEX_HOME, ctx);
}
console.log(`Running Codex: ${JSON.stringify(args)}`);
const result = Bun.spawnSync(args, {
stdout: "inherit",
stderr: "inherit",
env,
});
if (!result.success) {
fail(`Codex failed: see above for details.`);
}
// Read the output generated by Codex.
let lastMessage: string;
try {
lastMessage = await readFile(lastMessageOutput, "utf8");
} catch (err) {
fail(`Failed to read Codex output at '${lastMessageOutput}': ${err}`);
}
return lastMessage;
}

View File

@@ -1,33 +0,0 @@
// Validate the inputs passed to the composite action.
// The script currently ensures that the provided configuration file exists and
// matches the expected schema.
import type { Config } from "./config";
import { existsSync } from "fs";
import * as path from "path";
import { fail } from "./fail";
export function performAdditionalValidation(config: Config, workspace: string) {
// Additional validation: ensure referenced prompt files exist and are Markdown.
for (const [label, details] of Object.entries(config.labels)) {
// Determine which prompt key is present (the schema guarantees exactly one).
const promptPathStr =
(details as any).prompt ?? (details as any).promptPath;
if (promptPathStr) {
const promptPath = path.isAbsolute(promptPathStr)
? promptPathStr
: path.join(workspace, promptPathStr);
if (!existsSync(promptPath)) {
fail(`Prompt file for label '${label}' not found: ${promptPath}`);
}
if (!promptPath.endsWith(".md")) {
fail(
`Prompt file for label '${label}' must be a .md file (got ${promptPathStr}).`,
);
}
}
}
}

View File

@@ -1,15 +0,0 @@
{
"compilerOptions": {
"lib": ["ESNext"],
"target": "ESNext",
"module": "ESNext",
"moduleDetection": "force",
"moduleResolution": "bundler",
"noEmit": true,
"strict": true,
"skipLibCheck": true
},
"include": ["src"]
}

View File

@@ -1,3 +0,0 @@
model = "o3"
# Consider setting [mcp_servers] here!

View File

@@ -1,9 +0,0 @@
Attempt to solve the reported issue.
If a code change is required, create a new branch, commit the fix, and open a pull request that resolves the problem.
Here is the original GitHub issue that triggered this run:
### {CODEX_ACTION_ISSUE_TITLE}
{CODEX_ACTION_ISSUE_BODY}

View File

@@ -1,7 +0,0 @@
Review this PR and respond with a very concise final message, formatted in Markdown.
There should be a summary of the changes (1-2 sentences) and a few bullet points if necessary.
Then provide the **review** (1-2 sentences plus bullet points, friendly tone).
{CODEX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the `base` and `head` refs that define this PR. Both refs are available locally.

View File

@@ -1,23 +0,0 @@
Review this PR and respond with a very concise final message, formatted in Markdown.
There should be a summary of the changes (1-2 sentences) and a few bullet points if necessary.
Then provide the **review** (1-2 sentences plus bullet points, friendly tone).
Things to look out for when doing the review:
- **Make sure the pull request body explains the motivation behind the change.** If the author has failed to do this, call it out, and if you think you can deduce the motivation behind the change, propose copy.
- Ideally, the PR body also contains a small summary of the change. For small changes, the PR title may be sufficient.
- Each PR should ideally do one conceptual thing. For example, if a PR does a refactoring as well as introducing a new feature, push back and suggest the refactoring be done in a separate PR. This makes things easier for the reviewer, as refactoring changes can often be far-reaching, yet quick to review.
- If the nature of the change seems to have a visual component (which is often the case for changes to `codex-rs/tui`), recommend including a screenshot or video to demonstrate the change, if appropriate.
- Rust files should generally be organized such that the public parts of the API appear near the top of the file and helper functions go below. This is analagous to the "inverted pyramid" structure that is favored in journalism.
- Encourage the use of small enums or the newtype pattern in Rust if it helps readability without adding significant cognitive load or lines of code.
- Be wary of large files and offer suggestions for how to break things into more reasonably-sized files.
- When modifying a `Cargo.toml` file, make sure that dependency lists stay alphabetically sorted. Also consider whether a new dependency is added to the appropriate place (e.g., `[dependencies]` versus `[dev-dependencies]`)
- If you see opportunities for the changes in a diff to use more idiomatic Rust, please make specific recommendations. For example, favor the use of expressions over `return`.
- When introducing new code, be on the lookout for code that duplicates existing code. When found, propose a way to refactor the existing code such that it should be reused.
- Each create in the Cargo workspace in `codex-rs` has a specific purpose: make a note if you believe new code is not introduced in the correct crate.
- When possible, try to keep the `core` crate as small as possible. Non-core but shared logic is often a good candidate for `codex-rs/common`.
- References to existing GitHub issues and PRs are encouraged, where appropriate, though you likely do not have network access, so may not be able to help here.
{CODEX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the `base` and `head` refs that define this PR. Both refs are available locally.

View File

@@ -1,7 +0,0 @@
Troubleshoot whether the reported issue is valid.
Provide a concise and respectful comment summarizing the findings.
### {CODEX_ACTION_ISSUE_TITLE}
{CODEX_ACTION_ISSUE_BODY}

View File

@@ -1,26 +0,0 @@
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/dependabot-options-reference#package-ecosystem-
version: 2
updates:
- package-ecosystem: bun
directory: .github/actions/codex
schedule:
interval: weekly
- package-ecosystem: cargo
directories:
- codex-rs
- codex-rs/*
schedule:
interval: weekly
- package-ecosystem: devcontainers
directory: /
schedule:
interval: weekly
- package-ecosystem: docker
directory: codex-cli
schedule:
interval: weekly
- package-ecosystem: github-actions
directory: /
schedule:
interval: weekly

View File

@@ -1,28 +0,0 @@
{
"outputs": {
"codex-exec": {
"platforms": {
"macos-aarch64": { "regex": "^codex-exec-aarch64-apple-darwin\\.zst$", "path": "codex-exec" },
"macos-x86_64": { "regex": "^codex-exec-x86_64-apple-darwin\\.zst$", "path": "codex-exec" },
"linux-x86_64": { "regex": "^codex-exec-x86_64-unknown-linux-musl\\.zst$", "path": "codex-exec" },
"linux-aarch64": { "regex": "^codex-exec-aarch64-unknown-linux-musl\\.zst$", "path": "codex-exec" }
}
},
"codex": {
"platforms": {
"macos-aarch64": { "regex": "^codex-aarch64-apple-darwin\\.zst$", "path": "codex" },
"macos-x86_64": { "regex": "^codex-x86_64-apple-darwin\\.zst$", "path": "codex" },
"linux-x86_64": { "regex": "^codex-x86_64-unknown-linux-musl\\.zst$", "path": "codex" },
"linux-aarch64": { "regex": "^codex-aarch64-unknown-linux-musl\\.zst$", "path": "codex" }
}
},
"codex-linux-sandbox": {
"platforms": {
"linux-x86_64": { "regex": "^codex-linux-sandbox-x86_64-unknown-linux-musl\\.zst$", "path": "codex-linux-sandbox" },
"linux-aarch64": { "regex": "^codex-linux-sandbox-aarch64-unknown-linux-musl\\.zst$", "path": "codex-linux-sandbox" }
}
}
}
}

View File

@@ -68,18 +68,7 @@ jobs:
- name: Build
run: pnpm run build
- name: Ensure staging a release works.
working-directory: codex-cli
env:
GH_TOKEN: ${{ github.token }}
run: pnpm stage-release
- name: Ensure root README.md contains only ASCII and certain Unicode code points
- name: Ensure README.md contains only ASCII and certain Unicode code points
run: ./scripts/asciicheck.py README.md
- name: Check root README ToC
- name: Check README ToC
run: python3 scripts/readme_toc.py README.md
- name: Ensure codex-cli/README.md contains only ASCII and certain Unicode code points
run: ./scripts/asciicheck.py codex-cli/README.md
- name: Check codex-cli/README ToC
run: python3 scripts/readme_toc.py codex-cli/README.md

View File

@@ -23,7 +23,7 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
path-to-document: https://github.com/openai/codex/blob/main/docs/CLA.md
path-to-document: docs/CLA.md
path-to-signatures: signatures/cla.json
branch: cla-signatures
allowlist: dependabot[bot]

View File

@@ -1,27 +0,0 @@
# Codespell configuration is within .codespellrc
---
name: Codespell
on:
push:
branches: [main]
pull_request:
branches: [main]
permissions:
contents: read
jobs:
codespell:
name: Check for spelling errors
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Annotate locations with typos
uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1
- name: Codespell
uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2
with:
ignore_words_file: .codespellignore

View File

@@ -1,95 +0,0 @@
name: Codex
on:
issues:
types: [opened, labeled]
pull_request:
branches: [main]
types: [labeled]
jobs:
codex:
# This `if` check provides complex filtering logic to avoid running Codex
# on every PR. Admittedly, one thing this does not verify is whether the
# sender has write access to the repo: that must be done as part of a
# runtime step.
#
# Note the label values should match the ones in the .github/codex/labels
# folder.
if: |
(github.event_name == 'issues' && (
(github.event.action == 'labeled' && (github.event.label.name == 'codex-attempt' || github.event.label.name == 'codex-triage'))
)) ||
(github.event_name == 'pull_request' && github.event.action == 'labeled' && (github.event.label.name == 'codex-review' || github.event.label.name == 'codex-rust-review'))
runs-on: ubuntu-latest
permissions:
contents: write # can push or create branches
issues: write # for comments + labels on issues/PRs
pull-requests: write # for PR comments/labels
steps:
# TODO: Consider adding an optional mode (--dry-run?) to actions/codex
# that verifies whether Codex should actually be run for this event.
# (For example, it may be rejected because the sender does not have
# write access to the repo.) The benefit would be two-fold:
# 1. As the first step of this job, it gives us a chance to add a reaction
# or comment to the PR/issue ASAP to "ack" the request.
# 2. It saves resources by skipping the clone and setup steps below if
# Codex is not going to run.
- name: Checkout repository
uses: actions/checkout@v4
# We install the dependencies like we would for an ordinary CI job,
# particularly because Codex will not have network access to install
# these dependencies.
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 22
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.8.1
run_install: false
- name: Get pnpm store directory
id: pnpm-cache
shell: bash
run: |
echo "store_path=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
- name: Setup pnpm cache
uses: actions/cache@v4
with:
path: ${{ steps.pnpm-cache.outputs.store_path }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install dependencies
run: pnpm install
- uses: dtolnay/rust-toolchain@1.88
with:
targets: x86_64-unknown-linux-gnu
components: clippy
- uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/codex-rs/target/
key: cargo-ubuntu-24.04-x86_64-unknown-linux-gnu-${{ hashFiles('**/Cargo.lock') }}
# Note it is possible that the `verify` step internal to Run Codex will
# fail, in which case the work to setup the repo was worthless :(
- name: Run Codex
uses: ./.github/actions/codex
with:
openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }}
github_token: ${{ secrets.GITHUB_TOKEN }}
codex_home: ./.github/codex/home

View File

@@ -26,9 +26,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.88
with:
components: rustfmt
- uses: dtolnay/rust-toolchain@stable
- name: cargo fmt
run: cargo fmt -- --config imports_granularity=Item --check
@@ -55,19 +53,14 @@ jobs:
target: x86_64-unknown-linux-musl
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
- runner: windows-latest
target: x86_64-pc-windows-msvc
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.88
- uses: dtolnay/rust-toolchain@stable
with:
targets: ${{ matrix.target }}
components: clippy
- uses: actions/cache@v4
with:
@@ -79,40 +72,23 @@ jobs:
${{ github.workspace }}/codex-rs/target/
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' }}
name: Install musl build tools
run: |
sudo apt install -y musl-tools pkg-config
- name: cargo clippy
id: clippy
continue-on-error: true
run: cargo clippy --target ${{ matrix.target }} --all-features --tests -- -D warnings
- name: Initialize failure flag
run: echo "FAILED=" >> $GITHUB_ENV
# Running `cargo build` from the workspace root builds the workspace using
# the union of all features from third-party crates. This can mask errors
# where individual crates have underspecified features. To avoid this, we
# run `cargo build` for each crate individually, though because this is
# slower, we only do this for the x86_64-unknown-linux-gnu target.
- name: cargo build individual crates
id: build
if: ${{ matrix.target == 'x86_64-unknown-linux-gnu' }}
continue-on-error: true
run: find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 | xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo build'
- name: cargo clippy
run: cargo clippy --target ${{ matrix.target }} --all-features -- -D warnings || echo "FAILED=${FAILED:+$FAILED, }cargo clippy" >> $GITHUB_ENV
- name: cargo test
id: test
continue-on-error: true
run: cargo test --all-features --target ${{ matrix.target }}
env:
RUST_BACKTRACE: 1
run: cargo test --target ${{ matrix.target }} || echo "FAILED=${FAILED:+$FAILED, }cargo test" >> $GITHUB_ENV
# Fail the job if any of the previous steps failed.
- name: verify all steps passed
if: |
steps.clippy.outcome == 'failure' ||
steps.build.outcome == 'failure' ||
steps.test.outcome == 'failure'
- name: Fail if any step failed
if: env.FAILED != ''
run: |
echo "One or more checks failed (clippy, build, or test). See logs for details."
echo "See logs above, as the following steps failed:"
echo "$FAILED"
exit 1

View File

@@ -1,193 +0,0 @@
# Release workflow for codex-rs.
# To release, follow a workflow like:
# ```
# git tag -a rust-v0.1.0 -m "Release 0.1.0"
# git push origin rust-v0.1.0
# ```
name: rust-release
on:
push:
tags:
- "rust-v*.*.*"
concurrency:
group: ${{ github.workflow }}
cancel-in-progress: true
jobs:
tag-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Validate tag matches Cargo.toml version
shell: bash
run: |
set -euo pipefail
echo "::group::Tag validation"
# 1. Must be a tag and match the regex
[[ "${GITHUB_REF_TYPE}" == "tag" ]] \
|| { echo "❌ Not a tag push"; exit 1; }
[[ "${GITHUB_REF_NAME}" =~ ^rust-v[0-9]+\.[0-9]+\.[0-9]+(-(alpha|beta)(\.[0-9]+)?)?$ ]] \
|| { echo "❌ Tag '${GITHUB_REF_NAME}' doesn't match expected format"; exit 1; }
# 2. Extract versions
tag_ver="${GITHUB_REF_NAME#rust-v}"
cargo_ver="$(grep -m1 '^version' codex-rs/Cargo.toml \
| sed -E 's/version *= *"([^"]+)".*/\1/')"
# 3. Compare
[[ "${tag_ver}" == "${cargo_ver}" ]] \
|| { echo "❌ Tag ${tag_ver} ≠ Cargo.toml ${cargo_ver}"; exit 1; }
echo "✅ Tag and Cargo.toml agree (${tag_ver})"
echo "::endgroup::"
build:
needs: tag-check
name: ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
defaults:
run:
working-directory: codex-rs
strategy:
fail-fast: false
matrix:
include:
- runner: macos-14
target: aarch64-apple-darwin
- runner: macos-14
target: x86_64-apple-darwin
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.88
with:
targets: ${{ matrix.target }}
- uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/codex-rs/target/
key: cargo-release-${{ matrix.runner }}-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
run: |
sudo apt install -y musl-tools pkg-config
- name: Cargo build
run: cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-exec --bin codex-linux-sandbox
- name: Stage artifacts
shell: bash
run: |
dest="dist/${{ matrix.target }}"
mkdir -p "$dest"
cp target/${{ matrix.target }}/release/codex-exec "$dest/codex-exec-${{ matrix.target }}"
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
# After https://github.com/openai/codex/pull/1228 is merged and a new
# release is cut with an artifacts built after that PR, the `-gnu`
# variants can go away as we will only use the `-musl` variants.
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'x86_64-unknown-linux-gnu' || matrix.target == 'aarch64-unknown-linux-gnu' || matrix.target == 'aarch64-unknown-linux-musl' }}
name: Stage Linux-only artifacts
shell: bash
run: |
dest="dist/${{ matrix.target }}"
cp target/${{ matrix.target }}/release/codex-linux-sandbox "$dest/codex-linux-sandbox-${{ matrix.target }}"
- name: Compress artifacts
shell: bash
run: |
# Path that contains the uncompressed binaries for the current
# ${{ matrix.target }}
dest="dist/${{ matrix.target }}"
# For compatibility with environments that lack the `zstd` tool we
# additionally create a `.tar.gz` alongside every single binary that
# we publish. The end result is:
# codex-<target>.zst (existing)
# codex-<target>.tar.gz (new)
# ...same naming for codex-exec-* and codex-linux-sandbox-*
# 1. Produce a .tar.gz for every file in the directory *before* we
# run `zstd --rm`, because that flag deletes the original files.
for f in "$dest"/*; do
base="$(basename "$f")"
# Skip files that are already archives (shouldn't happen, but be
# safe).
if [[ "$base" == *.tar.gz ]]; then
continue
fi
# Create per-binary tar.gz
tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base"
# Also create .zst (existing behaviour) *and* remove the original
# uncompressed binary to keep the directory small.
zstd -T0 -19 --rm "$dest/$base"
done
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.target }}
# Upload the per-binary .zst files as well as the new .tar.gz
# equivalents we generated in the previous step.
path: |
codex-rs/dist/${{ matrix.target }}/*
release:
needs: build
name: release
runs-on: ubuntu-latest
steps:
- uses: actions/download-artifact@v4
with:
path: dist
- name: List
run: ls -R dist/
- name: Define release name
id: release_name
run: |
# Extract the version from the tag name, which is in the format
# "rust-v0.1.0".
version="${GITHUB_REF_NAME#rust-v}"
echo "name=${version}" >> $GITHUB_OUTPUT
- name: Create GitHub Release
uses: softprops/action-gh-release@v2
with:
name: ${{ steps.release_name.outputs.name }}
tag_name: ${{ github.ref_name }}
files: dist/**
# For now, tag releases as "prerelease" because we are not claiming
# the Rust CLI is stable yet.
prerelease: true
- uses: facebook/dotslash-publish-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag: ${{ github.ref_name }}
config: .github/dotslash-config.json

4
.gitignore vendored
View File

@@ -77,7 +77,3 @@ yarn.lock
package.json-e
session.ts-e
CHANGELOG.ignore.md
# nix related
.direnv
.envrc

View File

@@ -1,5 +0,0 @@
{
"recommendations": [
"tamasfe.even-better-toml",
]
}

18
.vscode/launch.json vendored
View File

@@ -1,18 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"type": "lldb",
"request": "launch",
"name": "Cargo launch",
"cargo": {
"cwd": "${workspaceFolder}/codex-rs",
"args": [
"build",
"--bin=codex-tui"
]
},
"args": []
}
]
}

16
.vscode/settings.json vendored
View File

@@ -1,16 +0,0 @@
{
"rust-analyzer.checkOnSave": true,
"rust-analyzer.check.command": "clippy",
"rust-analyzer.check.extraArgs": ["--all-features", "--tests"],
"rust-analyzer.rustfmt.extraArgs": ["--config", "imports_granularity=Item"],
"[rust]": {
"editor.defaultFormatter": "rust-lang.rust-analyzer",
"editor.formatOnSave": true,
},
"[toml]": {
"editor.defaultFormatter": "tamasfe.even-better-toml",
"editor.formatOnSave": true,
},
"evenBetterToml.formatter.reorderArrays": true,
"evenBetterToml.formatter.reorderKeys": true,
}

View File

@@ -1,9 +0,0 @@
# Rust/codex-rs
In the codex-rs folder where the rust code lives:
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR`. You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
Before creating a pull request with changes to `codex-rs`, run `just fmt` (in `codex-rs` directory) to format the code and `just fix` (in `codex-rs` directory) to fix any linter issues in the code, ensure the test suite passes by running `cargo test --all-features` in the `codex-rs` directory.
When making individual changes prefer running tests on individual files or projects first.

View File

@@ -2,68 +2,6 @@
You can install any of these versions: `npm install -g codex@version`
## `0.1.2505172129`
### 🪲 Bug Fixes
- Add node version check (#1007)
- Persist token after refresh (#1006)
## `0.1.2505171619`
- `codex --login` + `codex --free` (#998)
## `0.1.2505161800`
- Sign in with chatgpt credits (#974)
- Add support for OpenAI tool type, local_shell (#961)
## `0.1.2505161243`
- Sign in with chatgpt (#963)
- Session history viewer (#912)
- Apply patch issue when using different cwd (#942)
- Diff command for filenames with special characters (#954)
## `0.1.2505160811`
- `codex-mini-latest` (#951)
## `0.1.2505140839`
### 🪲 Bug Fixes
- Gpt-4.1 apply_patch handling (#930)
- Add support for fileOpener in config.json (#911)
- Patch in #366 and #367 for marked-terminal (#916)
- Remember to set lastIndex = 0 on shared RegExp (#918)
- Always load version from package.json at runtime (#909)
- Tweak the label for citations for better rendering (#919)
- Tighten up some logic around session timestamps and ids (#922)
- Change EventMsg enum so every variant takes a single struct (#925)
- Reasoning default to medium, show workdir when supplied (#931)
- Test_dev_null_write() was not using echo as intended (#923)
## `0.1.2504301751`
### 🚀 Features
- User config api key (#569)
- `@mention` files in codex (#701)
- Add `--reasoning` CLI flag (#314)
- Lower default retry wait time and increase number of tries (#720)
- Add common package registries domains to allowed-domains list (#414)
### 🪲 Bug Fixes
- Insufficient quota message (#758)
- Input keyboard shortcut opt+delete (#685)
- `/diff` should include untracked files (#686)
- Only allow running without sandbox if explicitly marked in safe container (#699)
- Tighten up check for /usr/bin/sandbox-exec (#710)
- Check if sandbox-exec is available (#696)
- Duplicate messages in quiet mode (#680)
## `0.1.2504251709`
### 🚀 Features

4
NOTICE
View File

@@ -1,6 +1,2 @@
OpenAI Codex
Copyright 2025 OpenAI
This project includes code derived from [Ratatui](https://github.com/ratatui/ratatui), licensed under the MIT license.
Copyright (c) 2016-2022 Florian Dehau
Copyright (c) 2023-2025 The Ratatui Developers

591
README.md
View File

@@ -1,50 +1,55 @@
<h1 align="center">OpenAI Codex CLI</h1>
<p align="center">Lightweight coding agent that runs in your terminal</p>
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install codex</code></p>
<p align="center"><code>npm i -g @openai/codex</code></p>
This is the home of the **Codex CLI**, which is a coding agent from OpenAI that runs locally on your computer. If you are looking for the _cloud-based agent_ from OpenAI, **Codex [Web]**, see <https://chatgpt.com/codex>.
<!-- ![Codex demo GIF using: codex "explain this codebase to me"](./.github/demo.gif) -->
![Codex demo GIF using: codex "explain this codebase to me"](./.github/demo.gif)
---
<details>
<summary><strong>Table of contents</strong></summary>
<summary><strong>Table&nbsp;of&nbsp;Contents</strong></summary>
<!-- Begin ToC -->
- [Experimental technology disclaimer](#experimental-technology-disclaimer)
- [Experimental Technology Disclaimer](#experimental-technology-disclaimer)
- [Quickstart](#quickstart)
- [OpenAI API Users](#openai-api-users)
- [OpenAI Plus/Pro Users](#openai-pluspro-users)
- [Why Codex?](#why-codex)
- [Security model & permissions](#security-model--permissions)
- [Security Model & Permissions](#security-model--permissions)
- [Platform sandboxing details](#platform-sandboxing-details)
- [System requirements](#system-requirements)
- [CLI reference](#cli-reference)
- [Memory & project docs](#memory--project-docs)
- [System Requirements](#system-requirements)
- [CLI Reference](#cli-reference)
- [Memory & Project Docs](#memory--project-docs)
- [Non-interactive / CI mode](#non-interactive--ci-mode)
- [Model Context Protocol (MCP)](#model-context-protocol-mcp)
- [Tracing / verbose logging](#tracing--verbose-logging)
- [Tracing / Verbose Logging](#tracing--verbose-logging)
- [Recipes](#recipes)
- [Installation](#installation)
- [DotSlash](#dotslash)
- [Configuration](#configuration)
- [Configuration Guide](#configuration-guide)
- [Basic Configuration Parameters](#basic-configuration-parameters)
- [Custom AI Provider Configuration](#custom-ai-provider-configuration)
- [History Configuration](#history-configuration)
- [Configuration Examples](#configuration-examples)
- [Full Configuration Example](#full-configuration-example)
- [Custom Instructions](#custom-instructions)
- [Environment Variables Setup](#environment-variables-setup)
- [FAQ](#faq)
- [Zero data retention (ZDR) usage](#zero-data-retention-zdr-usage)
- [Codex open source fund](#codex-open-source-fund)
- [Zero Data Retention (ZDR) Usage](#zero-data-retention-zdr-usage)
- [Codex Open Source Fund](#codex-open-source-fund)
- [Contributing](#contributing)
- [Development workflow](#development-workflow)
- [Git Hooks with Husky](#git-hooks-with-husky)
- [Debugging](#debugging)
- [Writing high-impact code changes](#writing-high-impact-code-changes)
- [Opening a pull request](#opening-a-pull-request)
- [Review process](#review-process)
- [Community values](#community-values)
- [Getting help](#getting-help)
- [Contributor license agreement (CLA)](#contributor-license-agreement-cla)
- [Contributor License Agreement (CLA)](#contributor-license-agreement-cla)
- [Quick fixes](#quick-fixes)
- [Releasing `codex`](#releasing-codex)
- [Security & responsible AI](#security--responsible-ai)
- [Alternative Build Options](#alternative-build-options)
- [Nix Flake Development](#nix-flake-development)
- [Security & Responsible AI](#security--responsible-ai)
- [License](#license)
<!-- End ToC -->
@@ -53,7 +58,7 @@ This is the home of the **Codex CLI**, which is a coding agent from OpenAI that
---
## Experimental technology disclaimer
## Experimental Technology Disclaimer
Codex CLI is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome:
@@ -66,100 +71,52 @@ Help us improve by filing issues or submitting PRs (see the section below for ho
## Quickstart
Install globally with your preferred package manager:
Install globally:
```shell
npm install -g @openai/codex # Alternatively: `brew install codex`
npm install -g @openai/codex
```
Or go to the [latest GitHub Release](https://github.com/openai/codex/releases/latest) and download the appropriate binary for your platform.
### OpenAI API Users
Next, set your OpenAI API key as an environment variable:
```shell
export OPENAI_API_KEY="your-api-key-here"
```
> [!NOTE]
> This command sets the key only for your current terminal session. You can add the `export` line to your shell's configuration file (e.g., `~/.zshrc`), but we recommend setting it for the session.
### OpenAI Plus/Pro Users
If you have a paid OpenAI account, run the following to start the login process:
```
codex login
```
If you complete the process successfully, you should have a `~/.codex/auth.json` file that contains the credentials that Codex will use.
To verify whether you are currently logged in, run:
```
codex login status
```
If you encounter problems with the login flow, please comment on <https://github.com/openai/codex/issues/1243>.
> **Note:** This command sets the key only for your current terminal session. You can add the `export` line to your shell's configuration file (e.g., `~/.zshrc`) but we recommend setting for the session. **Tip:** You can also place your API key into a `.env` file at the root of your project:
>
> ```env
> OPENAI_API_KEY=your-api-key-here
> ```
>
> The CLI will automatically load variables from `.env` (via `dotenv/config`).
<details>
<summary><strong>Use <code>--profile</code> to use other models</strong></summary>
<summary><strong>Use <code>--provider</code> to use other models</strong></summary>
Codex also allows you to use other providers that support the OpenAI Chat Completions (or Responses) API.
To do so, you must first define custom [providers](./config.md#model_providers) in `~/.codex/config.toml`. For example, the provider for a standard Ollama setup would be defined as follows:
```toml
[model_providers.ollama]
name = "Ollama"
base_url = "http://localhost:11434/v1"
```
The `base_url` will have `/chat/completions` appended to it to build the full URL for the request.
For providers that also require an `Authorization` header of the form `Bearer: SECRET`, an `env_key` can be specified, which indicates the environment variable to read to use as the value of `SECRET` when making a request:
```toml
[model_providers.openrouter]
name = "OpenRouter"
base_url = "https://openrouter.ai/api/v1"
env_key = "OPENROUTER_API_KEY"
```
Providers that speak the Responses API are also supported by adding `wire_api = "responses"` as part of the definition. Accessing OpenAI models via Azure is an example of such a provider, though it also requires specifying additional `query_params` that need to be appended to the request URL:
```toml
[model_providers.azure]
name = "Azure"
# Make sure you set the appropriate subdomain for this URL.
base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"
env_key = "AZURE_OPENAI_API_KEY" # Or "OPENAI_API_KEY", whichever you use.
# Newer versions appear to support the responses API, see https://github.com/openai/codex/pull/1321
query_params = { api-version = "2025-04-01-preview" }
wire_api = "responses"
```
Once you have defined a provider you wish to use, you can configure it as your default provider as follows:
```toml
model_provider = "azure"
```
> [!TIP]
> If you find yourself experimenting with a variety of models and providers, then you likely want to invest in defining a _profile_ for each configuration like so:
```toml
[profiles.o3]
model_provider = "azure"
model = "o3"
[profiles.mistral]
model_provider = "ollama"
model = "mistral"
```
This way, you can specify one command-line argument (.e.g., `--profile o3`, `--profile mistral`) to override multiple settings together.
> Codex also allows you to use other providers that support the OpenAI Chat Completions API. You can set the provider in the config file or use the `--provider` flag. The possible options for `--provider` are:
>
> - openai (default)
> - openrouter
> - gemini
> - ollama
> - mistral
> - deepseek
> - xai
> - groq
> - any other provider that is compatible with the OpenAI API
>
> If you use a provider other than OpenAI, you will need to set the API key for the provider in the config file or in the environment variable as:
>
> ```shell
> export <provider>_API_KEY="your-api-key-here"
> ```
>
> If you use a provider not listed above, you must also set the base URL for the provider:
>
> ```shell
> export <provider>_BASE_URL="https://your-provider-api-base-url"
> ```
</details>
<br />
@@ -177,7 +134,7 @@ codex "explain this codebase to me"
```
```shell
codex --full-auto "create the fanciest todo-list app"
codex --approval-mode full-auto "create the fanciest todo-list app"
```
That's it - Codex will scaffold a file, run it inside a sandbox, install any
@@ -201,69 +158,81 @@ And it's **fully open-source** so you can see and contribute to how it develops!
---
## Security model & permissions
## Security Model & Permissions
Codex lets you decide _how much autonomy_ you want to grant the agent. The following options can be configured independently:
Codex lets you decide _how much autonomy_ the agent receives and auto-approval policy via the
`--approval-mode` flag (or the interactive onboarding prompt):
- [`approval_policy`](./codex-rs/config.md#approval_policy) determines when you should be prompted to approve whether Codex can execute a command
- [`sandbox`](./codex-rs/config.md#sandbox) determines the _sandbox policy_ that Codex uses to execute untrusted commands
| Mode | What the agent may do without asking | Still requires approval |
| ------------------------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
| **Suggest** <br>(default) | <li>Read any file in the repo | <li>**All** file writes/patches<li> **Any** arbitrary shell commands (aside from reading files) |
| **Auto Edit** | <li>Read **and** apply-patch writes to files | <li>**All** shell commands |
| **Full Auto** | <li>Read/write files <li> Execute shell commands (network disabled, writes limited to your workdir) | - |
By default, Codex runs with `--ask-for-approval untrusted` and `--sandbox read-only`, which means that:
In **Full Auto** every command is run **network-disabled** and confined to the
current working directory (plus temporary files) for defense-in-depth. Codex
will also show a warning/confirmation if you start in **auto-edit** or
**full-auto** while the directory is _not_ tracked by Git, so you always have a
safety net.
- The user is prompted to approve every command not on the set of "trusted" commands built into Codex (`cat`, `ls`, etc.)
- Approved commands are run outside of a sandbox because user approval implies "trust," in this case.
Running Codex with the `--full-auto` convenience flag changes the configuration to `--ask-for-approval on-failure` and `--sandbox workspace-write`, which means that:
- Codex does not initially ask for user approval before running an individual command.
- Though when it runs a command, it is run under a sandbox in which:
- It can read any file on the system.
- It can only write files under the current directory (or the directory specified via `--cd`).
- Network requests are completely disabled.
- Only if the command exits with a non-zero exit code will it ask the user for approval. If granted, it will re-attempt the command outside of the sandbox. (A common case is when Codex cannot `npm install` a dependency because that requires network access.)
Again, these two options can be configured independently. For example, if you want Codex to perform an "exploration" where you are happy for it to read anything it wants but you never want to be prompted, you could run Codex with `--ask-for-approval never` and `--sandbox read-only`.
Coming soon: you'll be able to whitelist specific commands to auto-execute with
the network enabled, once we're confident in additional safeguards.
### Platform sandboxing details
The mechanism Codex uses to implement the sandbox policy depends on your OS:
The hardening mechanism Codex uses depends on your OS:
- **macOS 12+** uses **Apple Seatbelt** and runs commands using `sandbox-exec` with a profile (`-p`) that corresponds to the `--sandbox` that was specified.
- **Linux** uses a combination of Landlock/seccomp APIs to enforce the `sandbox` configuration.
- **macOS 12+** - commands are wrapped with **Apple Seatbelt** (`sandbox-exec`).
Note that when running Linux in a containerized environment such as Docker, sandboxing may not work if the host/container configuration does not support the necessary Landlock/seccomp APIs. In such cases, we recommend configuring your Docker container so that it provides the sandbox guarantees you are looking for and then running `codex` with `--sandbox danger-full-access` (or, more simply, the `--dangerously-bypass-approvals-and-sandbox` flag) within your container.
- Everything is placed in a read-only jail except for a small set of
writable roots (`$PWD`, `$TMPDIR`, `~/.codex`, etc.).
- Outbound network is _fully blocked_ by default - even if a child process
tries to `curl` somewhere it will fail.
- **Linux** - there is no sandboxing by default.
We recommend using Docker for sandboxing, where Codex launches itself inside a **minimal
container image** and mounts your repo _read/write_ at the same path. A
custom `iptables`/`ipset` firewall script denies all egress except the
OpenAI API. This gives you deterministic, reproducible runs without needing
root on the host. You can use the [`run_in_container.sh`](./codex-cli/scripts/run_in_container.sh) script to set up the sandbox.
---
## System requirements
## System Requirements
| Requirement | Details |
| --------------------------- | --------------------------------------------------------------- |
| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
| Node.js | **22 or newer** (LTS recommended) |
| Git (optional, recommended) | 2.23+ for built-in PR helpers |
| RAM | 4-GB minimum (8-GB recommended) |
---
## CLI reference
| Command | Purpose | Example |
| ------------------ | ---------------------------------- | ------------------------------- |
| `codex` | Interactive TUI | `codex` |
| `codex "..."` | Initial prompt for interactive TUI | `codex "fix lint errors"` |
| `codex exec "..."` | Non-interactive "automation mode" | `codex exec "explain utils.ts"` |
Key flags: `--model/-m`, `--ask-for-approval/-a`.
> Never run `sudo npm install -g`; fix npm permissions instead.
---
## Memory & project docs
## CLI Reference
You can give Codex extra instructions and guidance using `AGENTS.md` files. Codex looks for `AGENTS.md` files in the following places, and merges them top-down:
| Command | Purpose | Example |
| ------------------------------------ | ----------------------------------- | ------------------------------------ |
| `codex` | Interactive REPL | `codex` |
| `codex "..."` | Initial prompt for interactive REPL | `codex "fix lint errors"` |
| `codex -q "..."` | Non-interactive "quiet mode" | `codex -q --json "explain utils.ts"` |
| `codex completion <bash\|zsh\|fish>` | Print shell completion script | `codex completion bash` |
1. `~/.codex/AGENTS.md` - personal global guidance
2. `AGENTS.md` at repo root - shared project notes
3. `AGENTS.md` in the current working directory - sub-folder/feature specifics
Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`.
---
## Memory & Project Docs
Codex merges Markdown instructions in this order:
1. `~/.codex/instructions.md` - personal global guidance
2. `codex.md` at repo root - shared project notes
3. `codex.md` in cwd - sub-package specifics
Disable with `--no-project-doc` or `CODEX_DISABLE_PROJECT_DOC=1`.
---
@@ -276,38 +245,19 @@ Run Codex head-less in pipelines. Example GitHub Action step:
run: |
npm install -g @openai/codex
export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}"
codex exec --full-auto "update CHANGELOG for next release"
codex -a auto-edit --quiet "update CHANGELOG for next release"
```
## Model Context Protocol (MCP)
Set `CODEX_QUIET_MODE=1` to silence interactive UI noise.
The Codex CLI can be configured to leverage MCP servers by defining an [`mcp_servers`](./codex-rs/config.md#mcp_servers) section in `~/.codex/config.toml`. It is intended to mirror how tools such as Claude and Cursor define `mcpServers` in their respective JSON config files, though the Codex format is slightly different since it uses TOML rather than JSON, e.g.:
## Tracing / Verbose Logging
```toml
# IMPORTANT: the top-level key is `mcp_servers` rather than `mcpServers`.
[mcp_servers.server-name]
command = "npx"
args = ["-y", "mcp-server"]
env = { "API_KEY" = "value" }
Setting the environment variable `DEBUG=true` prints full API request and response details:
```shell
DEBUG=true codex
```
> [!TIP]
> It is somewhat experimental, but the Codex CLI can also be run as an MCP _server_ via `codex mcp`. If you launch it with an MCP client such as `npx @modelcontextprotocol/inspector codex mcp` and send it a `tools/list` request, you will see that there is only one tool, `codex`, that accepts a grab-bag of inputs, including a catch-all `config` map for anything you might want to override. Feel free to play around with it and provide feedback via GitHub issues.
## Tracing / verbose logging
Because Codex is written in Rust, it honors the `RUST_LOG` environment variable to configure its logging behavior.
The TUI defaults to `RUST_LOG=codex_core=info,codex_tui=info` and log messages are written to `~/.codex/log/codex-tui.log`, so you can leave the following running in a separate terminal to monitor log messages as they are written:
```
tail -F ~/.codex/log/codex-tui.log
```
By comparison, the non-interactive mode (`codex exec`) defaults to `RUST_LOG=error`, but messages are printed inline, so there is no need to monitor a separate file.
See the Rust documentation on [`RUST_LOG`](https://docs.rs/env_logger/latest/env_logger/#enabling-logging) for more information on the configuration options.
---
## Recipes
@@ -329,78 +279,184 @@ Below are a few bite-size examples you can copy-paste. Replace the text in quote
## Installation
<details open>
<summary><strong>Install Codex CLI using your preferred package manager.</strong></summary>
From `brew` (recommended, downloads only the binary for your platform):
<summary><strong>From npm (Recommended)</strong></summary>
```bash
brew install codex
npm install -g @openai/codex
# or
yarn global add @openai/codex
# or
bun install -g @openai/codex
# or
pnpm add -g @openai/codex
```
From `npm` (generally more readily available, but downloads binaries for all supported platforms):
```bash
npm i -g @openai/codex
```
Or go to the [latest GitHub Release](https://github.com/openai/codex/releases/latest) and download the appropriate binary for your platform.
Admittedly, each GitHub Release contains many executables, but in practice, you likely want one of these:
- macOS
- Apple Silicon/arm64: `codex-aarch64-apple-darwin.tar.gz`
- x86_64 (older Mac hardware): `codex-x86_64-apple-darwin.tar.gz`
- Linux
- x86_64: `codex-x86_64-unknown-linux-musl.tar.gz`
- arm64: `codex-aarch64-unknown-linux-musl.tar.gz`
Each archive contains a single entry with the platform baked into the name (e.g., `codex-x86_64-unknown-linux-musl`), so you likely want to rename it to `codex` after extracting it.
### DotSlash
The GitHub Release also contains a [DotSlash](https://dotslash-cli.com/) file for the Codex CLI named `codex`. Using a DotSlash file makes it possible to make a lightweight commit to source control to ensure all contributors use the same version of an executable, regardless of what platform they use for development.
</details>
<details>
<summary><strong>Build from source</strong></summary>
```bash
# Clone the repository and navigate to the root of the Cargo workspace.
# Clone the repository and navigate to the CLI package
git clone https://github.com/openai/codex.git
cd codex/codex-rs
cd codex/codex-cli
# Install the Rust toolchain, if necessary.
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
source "$HOME/.cargo/env"
rustup component add rustfmt
rustup component add clippy
# Enable corepack
corepack enable
# Build Codex.
cargo build
# Install dependencies and build
pnpm install
pnpm build
# Launch the TUI with a sample prompt.
cargo run --bin codex -- "explain this codebase to me"
# Get the usage and the options
node ./dist/cli.js --help
# After making changes, ensure the code is clean.
cargo fmt -- --config imports_granularity=Item
cargo clippy --tests
# Run the locally-built CLI directly
node ./dist/cli.js
# Run the tests.
cargo test
# Or link the command globally for convenience
pnpm link
```
</details>
---
## Configuration
## Configuration Guide
Codex supports a rich set of configuration options documented in [`codex-rs/config.md`](./codex-rs/config.md).
Codex configuration files can be placed in the `~/.codex/` directory, supporting both YAML and JSON formats.
By default, Codex loads its configuration from `~/.codex/config.toml`.
### Basic Configuration Parameters
Though `--config` can be used to set/override ad-hoc config values for individual invocations of `codex`.
| Parameter | Type | Default | Description | Available Options |
| ------------------- | ------- | ---------- | -------------------------------- | ---------------------------------------------------------------------------------------------- |
| `model` | string | `o4-mini` | AI model to use | Any model name supporting OpenAI API |
| `approvalMode` | string | `suggest` | AI assistant's permission mode | `suggest` (suggestions only)<br>`auto-edit` (automatic edits)<br>`full-auto` (fully automatic) |
| `fullAutoErrorMode` | string | `ask-user` | Error handling in full-auto mode | `ask-user` (prompt for user input)<br>`ignore-and-continue` (ignore and proceed) |
| `notify` | boolean | `true` | Enable desktop notifications | `true`/`false` |
### Custom AI Provider Configuration
In the `providers` object, you can configure multiple AI service providers. Each provider requires the following parameters:
| Parameter | Type | Description | Example |
| --------- | ------ | --------------------------------------- | ----------------------------- |
| `name` | string | Display name of the provider | `"OpenAI"` |
| `baseURL` | string | API service URL | `"https://api.openai.com/v1"` |
| `envKey` | string | Environment variable name (for API key) | `"OPENAI_API_KEY"` |
### History Configuration
In the `history` object, you can configure conversation history settings:
| Parameter | Type | Description | Example Value |
| ------------------- | ------- | ------------------------------------------------------ | ------------- |
| `maxSize` | number | Maximum number of history entries to save | `1000` |
| `saveHistory` | boolean | Whether to save history | `true` |
| `sensitivePatterns` | array | Patterns of sensitive information to filter in history | `[]` |
### Configuration Examples
1. YAML format (save as `~/.codex/config.yaml`):
```yaml
model: o4-mini
approvalMode: suggest
fullAutoErrorMode: ask-user
notify: true
```
2. JSON format (save as `~/.codex/config.json`):
```json
{
"model": "o4-mini",
"approvalMode": "suggest",
"fullAutoErrorMode": "ask-user",
"notify": true
}
```
### Full Configuration Example
Below is a comprehensive example of `config.json` with multiple custom providers:
```json
{
"model": "o4-mini",
"provider": "openai",
"providers": {
"openai": {
"name": "OpenAI",
"baseURL": "https://api.openai.com/v1",
"envKey": "OPENAI_API_KEY"
},
"openrouter": {
"name": "OpenRouter",
"baseURL": "https://openrouter.ai/api/v1",
"envKey": "OPENROUTER_API_KEY"
},
"gemini": {
"name": "Gemini",
"baseURL": "https://generativelanguage.googleapis.com/v1beta/openai",
"envKey": "GEMINI_API_KEY"
},
"ollama": {
"name": "Ollama",
"baseURL": "http://localhost:11434/v1",
"envKey": "OLLAMA_API_KEY"
},
"mistral": {
"name": "Mistral",
"baseURL": "https://api.mistral.ai/v1",
"envKey": "MISTRAL_API_KEY"
},
"deepseek": {
"name": "DeepSeek",
"baseURL": "https://api.deepseek.com",
"envKey": "DEEPSEEK_API_KEY"
},
"xai": {
"name": "xAI",
"baseURL": "https://api.x.ai/v1",
"envKey": "XAI_API_KEY"
},
"groq": {
"name": "Groq",
"baseURL": "https://api.groq.com/openai/v1",
"envKey": "GROQ_API_KEY"
}
},
"history": {
"maxSize": 1000,
"saveHistory": true,
"sensitivePatterns": []
}
}
```
### Custom Instructions
You can create a `~/.codex/instructions.md` file to define custom instructions:
```markdown
- Always respond with emojis
- Only use git commands when explicitly requested
```
### Environment Variables Setup
For each AI provider, you need to set the corresponding API key in your environment variables. For example:
```bash
# OpenAI
export OPENAI_API_KEY="your-api-key-here"
# OpenRouter
export OPENROUTER_API_KEY="your-openrouter-key-here"
# Similarly for other providers
```
---
@@ -441,7 +497,7 @@ Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.mic
---
## Zero data retention (ZDR) usage
## Zero Data Retention (ZDR) Usage
Codex CLI **does** support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. If your OpenAI organization has Zero Data Retention enabled and you still encounter errors such as:
@@ -449,17 +505,11 @@ Codex CLI **does** support OpenAI organizations with [Zero Data Retention (ZDR)]
OpenAI rejected the request. Error details: Status: 400, Code: unsupported_parameter, Type: invalid_request_error, Message: 400 Previous response cannot be used for this organization due to Zero Data Retention.
```
Ensure you are running `codex` with `--config disable_response_storage=true` or add this line to `~/.codex/config.toml` to avoid specifying the command line option each time:
```toml
disable_response_storage = true
```
See [the configuration documentation on `disable_response_storage`](./codex-rs/config.md#disable_response_storage) for details.
You may need to upgrade to a more recent version with: `npm i -g @openai/codex@latest`
---
## Codex open source fund
## Codex Open Source Fund
We're excited to launch a **$1 million initiative** supporting open source projects that use Codex CLI and other OpenAI models.
@@ -480,7 +530,51 @@ More broadly we welcome contributions - whether you are opening your very first
- Create a _topic branch_ from `main` - e.g. `feat/interactive-prompt`.
- Keep your changes focused. Multiple unrelated fixes should be opened as separate PRs.
- Following the [development setup](#development-workflow) instructions above, ensure your change is free of lint warnings and test failures.
- Use `pnpm test:watch` during development for super-fast feedback.
- We use **Vitest** for unit tests, **ESLint** + **Prettier** for style, and **TypeScript** for type-checking.
- Before pushing, run the full test/type/lint suite:
### Git Hooks with Husky
This project uses [Husky](https://typicode.github.io/husky/) to enforce code quality checks:
- **Pre-commit hook**: Automatically runs lint-staged to format and lint files before committing
- **Pre-push hook**: Runs tests and type checking before pushing to the remote
These hooks help maintain code quality and prevent pushing code with failing tests. For more details, see [HUSKY.md](./codex-cli/HUSKY.md).
```bash
pnpm test && pnpm run lint && pnpm run typecheck
```
- If you have **not** yet signed the Contributor License Agreement (CLA), add a PR comment containing the exact text
```text
I have read the CLA Document and I hereby sign the CLA
```
The CLA-Assistant bot will turn the PR status green once all authors have signed.
```bash
# Watch mode (tests rerun on change)
pnpm test:watch
# Type-check without emitting files
pnpm typecheck
# Automatically fix lint + prettier issues
pnpm lint:fix
pnpm format:fix
```
### Debugging
To debug the CLI with a visual debugger, do the following in the `codex-cli` folder:
- Run `pnpm run build` to build the CLI, which will generate `cli.js.map` alongside `cli.js` in the `dist` folder.
- Run the CLI with `node --inspect-brk ./dist/cli.js` The program then waits until a debugger is attached before proceeding. Options:
- In VS Code, choose **Debug: Attach to Node Process** from the command palette and choose the option in the dropdown with debug port `9229` (likely the first option)
- Go to <chrome://inspect> in Chrome and find **localhost:9229** and click **trace**
### Writing high-impact code changes
@@ -492,7 +586,7 @@ More broadly we welcome contributions - whether you are opening your very first
### Opening a pull request
- Fill in the PR template (or include similar information) - **What? Why? How?**
- Run **all** checks locally (`cargo test && cargo clippy --tests && cargo fmt -- --config imports_granularity=Item`). CI failures that could have been caught locally slow down the process.
- Run **all** checks locally (`npm test && npm run lint && npm run typecheck`). CI failures that could have been caught locally slow down the process.
- Make sure your branch is up-to-date with `main` and that you have resolved merge conflicts.
- Mark the PR as **Ready for review** only when you believe it is in a merge-able state.
@@ -514,7 +608,7 @@ If you run into problems setting up the project, would like feedback on an idea,
Together we can make Codex CLI an incredible tool. **Happy hacking!** :rocket:
### Contributor license agreement (CLA)
### Contributor License Agreement (CLA)
All contributors **must** accept the CLA. The process is lightweight:
@@ -539,26 +633,49 @@ The **DCO check** blocks merges until every commit in the PR carries the footer
### Releasing `codex`
_For admins only._
To publish a new version of the CLI, run the release scripts defined in `codex-cli/package.json`:
Make sure you are on `main` and have no local changes. Then run:
1. Open the `codex-cli` directory
2. Make sure you're on a branch like `git checkout -b bump-version`
3. Bump the version and `CLI_VERSION` to current datetime: `pnpm release:version`
4. Commit the version bump (with DCO sign-off):
```bash
git add codex-cli/src/utils/session.ts codex-cli/package.json
git commit -s -m "chore(release): codex-cli v$(node -p \"require('./codex-cli/package.json').version\")"
```
5. Copy README, build, and publish to npm: `pnpm release`
6. Push to branch: `git push origin HEAD`
```shell
VERSION=0.2.0 # Can also be 0.2.0-alpha.1 or any valid Rust version.
./codex-rs/scripts/create_github_release.sh "$VERSION"
### Alternative Build Options
#### Nix Flake Development
Prerequisite: Nix >= 2.4 with flakes enabled (`experimental-features = nix-command flakes` in `~/.config/nix/nix.conf`).
Enter a Nix development shell:
```bash
nix develop
```
This will make a local commit on top of `main` with `version` set to `$VERSION` in `codex-rs/Cargo.toml` (note that on `main`, we leave the version as `version = "0.0.0"`).
This shell includes Node.js, installs dependencies, builds the CLI, and provides a `codex` command alias.
This will push the commit using the tag `rust-v${VERSION}`, which in turn kicks off [the release workflow](.github/workflows/rust-release.yml). This will create a new GitHub Release named `$VERSION`.
Build and run the CLI directly:
If everything looks good in the generated GitHub Release, uncheck the **pre-release** box so it is the latest release.
```bash
nix build
./result/bin/codex --help
```
Create a PR to update [`Formula/c/codex.rb`](https://github.com/Homebrew/homebrew-core/blob/main/Formula/c/codex.rb) on Homebrew.
Run the CLI via the flake app:
```bash
nix run .#codex
```
---
## Security & responsible AI
## Security & Responsible AI
Have you discovered a vulnerability or have concerns about model output? Please e-mail **security@openai.com** and we will respond promptly.

View File

@@ -1,6 +1,6 @@
module.exports = {
root: true,
env: { browser: true, node: true, es2020: true },
env: { browser: true, es2020: true },
extends: [
"eslint:recommended",
"plugin:@typescript-eslint/recommended",

View File

@@ -1,7 +0,0 @@
# Added by ./scripts/install_native_deps.sh
/bin/codex-aarch64-apple-darwin
/bin/codex-aarch64-unknown-linux-musl
/bin/codex-linux-sandbox-arm64
/bin/codex-linux-sandbox-x64
/bin/codex-x86_64-apple-darwin
/bin/codex-x86_64-unknown-linux-musl

View File

@@ -1,4 +1,4 @@
FROM node:24-slim
FROM node:20-slim
ARG TZ
ENV TZ="$TZ"

View File

@@ -1,736 +0,0 @@
<h1 align="center">OpenAI Codex CLI</h1>
<p align="center">Lightweight coding agent that runs in your terminal</p>
<p align="center"><code>npm i -g @openai/codex</code></p>
> [!IMPORTANT]
> This is the documentation for the _legacy_ TypeScript implementation of the Codex CLI. It has been superseded by the _Rust_ implementation. See the [README in the root of the Codex repository](https://github.com/openai/codex/blob/main/README.md) for details.
![Codex demo GIF using: codex "explain this codebase to me"](../.github/demo.gif)
---
<details>
<summary><strong>Table of contents</strong></summary>
<!-- Begin ToC -->
- [Experimental technology disclaimer](#experimental-technology-disclaimer)
- [Quickstart](#quickstart)
- [Why Codex?](#why-codex)
- [Security model & permissions](#security-model--permissions)
- [Platform sandboxing details](#platform-sandboxing-details)
- [System requirements](#system-requirements)
- [CLI reference](#cli-reference)
- [Memory & project docs](#memory--project-docs)
- [Non-interactive / CI mode](#non-interactive--ci-mode)
- [Tracing / verbose logging](#tracing--verbose-logging)
- [Recipes](#recipes)
- [Installation](#installation)
- [Configuration guide](#configuration-guide)
- [Basic configuration parameters](#basic-configuration-parameters)
- [Custom AI provider configuration](#custom-ai-provider-configuration)
- [History configuration](#history-configuration)
- [Configuration examples](#configuration-examples)
- [Full configuration example](#full-configuration-example)
- [Custom instructions](#custom-instructions)
- [Environment variables setup](#environment-variables-setup)
- [FAQ](#faq)
- [Zero data retention (ZDR) usage](#zero-data-retention-zdr-usage)
- [Codex open source fund](#codex-open-source-fund)
- [Contributing](#contributing)
- [Development workflow](#development-workflow)
- [Git hooks with Husky](#git-hooks-with-husky)
- [Debugging](#debugging)
- [Writing high-impact code changes](#writing-high-impact-code-changes)
- [Opening a pull request](#opening-a-pull-request)
- [Review process](#review-process)
- [Community values](#community-values)
- [Getting help](#getting-help)
- [Contributor license agreement (CLA)](#contributor-license-agreement-cla)
- [Quick fixes](#quick-fixes)
- [Releasing `codex`](#releasing-codex)
- [Alternative build options](#alternative-build-options)
- [Nix flake development](#nix-flake-development)
- [Security & responsible AI](#security--responsible-ai)
- [License](#license)
<!-- End ToC -->
</details>
---
## Experimental technology disclaimer
Codex CLI is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome:
- Bug reports
- Feature requests
- Pull requests
- Good vibes
Help us improve by filing issues or submitting PRs (see the section below for how to contribute)!
## Quickstart
Install globally:
```shell
npm install -g @openai/codex
```
Next, set your OpenAI API key as an environment variable:
```shell
export OPENAI_API_KEY="your-api-key-here"
```
> **Note:** This command sets the key only for your current terminal session. You can add the `export` line to your shell's configuration file (e.g., `~/.zshrc`) but we recommend setting for the session. **Tip:** You can also place your API key into a `.env` file at the root of your project:
>
> ```env
> OPENAI_API_KEY=your-api-key-here
> ```
>
> The CLI will automatically load variables from `.env` (via `dotenv/config`).
<details>
<summary><strong>Use <code>--provider</code> to use other models</strong></summary>
> Codex also allows you to use other providers that support the OpenAI Chat Completions API. You can set the provider in the config file or use the `--provider` flag. The possible options for `--provider` are:
>
> - openai (default)
> - openrouter
> - azure
> - gemini
> - ollama
> - mistral
> - deepseek
> - xai
> - groq
> - arceeai
> - any other provider that is compatible with the OpenAI API
>
> If you use a provider other than OpenAI, you will need to set the API key for the provider in the config file or in the environment variable as:
>
> ```shell
> export <provider>_API_KEY="your-api-key-here"
> ```
>
> If you use a provider not listed above, you must also set the base URL for the provider:
>
> ```shell
> export <provider>_BASE_URL="https://your-provider-api-base-url"
> ```
</details>
<br />
Run interactively:
```shell
codex
```
Or, run with a prompt as input (and optionally in `Full Auto` mode):
```shell
codex "explain this codebase to me"
```
```shell
codex --approval-mode full-auto "create the fanciest todo-list app"
```
That's it - Codex will scaffold a file, run it inside a sandbox, install any
missing dependencies, and show you the live result. Approve the changes and
they'll be committed to your working directory.
---
## Why Codex?
Codex CLI is built for developers who already **live in the terminal** and want
ChatGPT-level reasoning **plus** the power to actually run code, manipulate
files, and iterate - all under version control. In short, it's _chat-driven
development_ that understands and executes your repo.
- **Zero setup** - bring your OpenAI API key and it just works!
- **Full auto-approval, while safe + secure** by running network-disabled and directory-sandboxed
- **Multimodal** - pass in screenshots or diagrams to implement features ✨
And it's **fully open-source** so you can see and contribute to how it develops!
---
## Security model & permissions
Codex lets you decide _how much autonomy_ the agent receives and auto-approval policy via the
`--approval-mode` flag (or the interactive onboarding prompt):
| Mode | What the agent may do without asking | Still requires approval |
| ------------------------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
| **Suggest** <br>(default) | <li>Read any file in the repo | <li>**All** file writes/patches<li> **Any** arbitrary shell commands (aside from reading files) |
| **Auto Edit** | <li>Read **and** apply-patch writes to files | <li>**All** shell commands |
| **Full Auto** | <li>Read/write files <li> Execute shell commands (network disabled, writes limited to your workdir) | - |
In **Full Auto** every command is run **network-disabled** and confined to the
current working directory (plus temporary files) for defense-in-depth. Codex
will also show a warning/confirmation if you start in **auto-edit** or
**full-auto** while the directory is _not_ tracked by Git, so you always have a
safety net.
Coming soon: you'll be able to whitelist specific commands to auto-execute with
the network enabled, once we're confident in additional safeguards.
### Platform sandboxing details
The hardening mechanism Codex uses depends on your OS:
- **macOS 12+** - commands are wrapped with **Apple Seatbelt** (`sandbox-exec`).
- Everything is placed in a read-only jail except for a small set of
writable roots (`$PWD`, `$TMPDIR`, `~/.codex`, etc.).
- Outbound network is _fully blocked_ by default - even if a child process
tries to `curl` somewhere it will fail.
- **Linux** - there is no sandboxing by default.
We recommend using Docker for sandboxing, where Codex launches itself inside a **minimal
container image** and mounts your repo _read/write_ at the same path. A
custom `iptables`/`ipset` firewall script denies all egress except the
OpenAI API. This gives you deterministic, reproducible runs without needing
root on the host. You can use the [`run_in_container.sh`](../codex-cli/scripts/run_in_container.sh) script to set up the sandbox.
---
## System requirements
| Requirement | Details |
| --------------------------- | --------------------------------------------------------------- |
| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
| Node.js | **22 or newer** (LTS recommended) |
| Git (optional, recommended) | 2.23+ for built-in PR helpers |
| RAM | 4-GB minimum (8-GB recommended) |
> Never run `sudo npm install -g`; fix npm permissions instead.
---
## CLI reference
| Command | Purpose | Example |
| ------------------------------------ | ----------------------------------- | ------------------------------------ |
| `codex` | Interactive REPL | `codex` |
| `codex "..."` | Initial prompt for interactive REPL | `codex "fix lint errors"` |
| `codex -q "..."` | Non-interactive "quiet mode" | `codex -q --json "explain utils.ts"` |
| `codex completion <bash\|zsh\|fish>` | Print shell completion script | `codex completion bash` |
Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`.
---
## Memory & project docs
You can give Codex extra instructions and guidance using `AGENTS.md` files. Codex looks for `AGENTS.md` files in the following places, and merges them top-down:
1. `~/.codex/AGENTS.md` - personal global guidance
2. `AGENTS.md` at repo root - shared project notes
3. `AGENTS.md` in the current working directory - sub-folder/feature specifics
Disable loading of these files with `--no-project-doc` or the environment variable `CODEX_DISABLE_PROJECT_DOC=1`.
---
## Non-interactive / CI mode
Run Codex head-less in pipelines. Example GitHub Action step:
```yaml
- name: Update changelog via Codex
run: |
npm install -g @openai/codex
export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}"
codex -a auto-edit --quiet "update CHANGELOG for next release"
```
Set `CODEX_QUIET_MODE=1` to silence interactive UI noise.
## Tracing / verbose logging
Setting the environment variable `DEBUG=true` prints full API request and response details:
```shell
DEBUG=true codex
```
---
## Recipes
Below are a few bite-size examples you can copy-paste. Replace the text in quotes with your own task. See the [prompting guide](https://github.com/openai/codex/blob/main/codex-cli/examples/prompting_guide.md) for more tips and usage patterns.
| ✨ | What you type | What happens |
| --- | ------------------------------------------------------------------------------- | -------------------------------------------------------------------------- |
| 1 | `codex "Refactor the Dashboard component to React Hooks"` | Codex rewrites the class component, runs `npm test`, and shows the diff. |
| 2 | `codex "Generate SQL migrations for adding a users table"` | Infers your ORM, creates migration files, and runs them in a sandboxed DB. |
| 3 | `codex "Write unit tests for utils/date.ts"` | Generates tests, executes them, and iterates until they pass. |
| 4 | `codex "Bulk-rename *.jpeg -> *.jpg with git mv"` | Safely renames files and updates imports/usages. |
| 5 | `codex "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step-by-step human explanation. |
| 6 | `codex "Carefully review this repo, and propose 3 high impact well-scoped PRs"` | Suggests impactful PRs in the current codebase. |
| 7 | `codex "Look for vulnerabilities and create a security review report"` | Finds and explains security bugs. |
---
## Installation
<details open>
<summary><strong>From npm (Recommended)</strong></summary>
```bash
npm install -g @openai/codex
# or
yarn global add @openai/codex
# or
bun install -g @openai/codex
# or
pnpm add -g @openai/codex
```
</details>
<details>
<summary><strong>Build from source</strong></summary>
```bash
# Clone the repository and navigate to the CLI package
git clone https://github.com/openai/codex.git
cd codex/codex-cli
# Enable corepack
corepack enable
# Install dependencies and build
pnpm install
pnpm build
# Linux-only: download prebuilt sandboxing binaries (requires gh and zstd).
./scripts/install_native_deps.sh
# Get the usage and the options
node ./dist/cli.js --help
# Run the locally-built CLI directly
node ./dist/cli.js
# Or link the command globally for convenience
pnpm link
```
</details>
---
## Configuration guide
Codex configuration files can be placed in the `~/.codex/` directory, supporting both YAML and JSON formats.
### Basic configuration parameters
| Parameter | Type | Default | Description | Available Options |
| ------------------- | ------- | ---------- | -------------------------------- | ---------------------------------------------------------------------------------------------- |
| `model` | string | `o4-mini` | AI model to use | Any model name supporting OpenAI API |
| `approvalMode` | string | `suggest` | AI assistant's permission mode | `suggest` (suggestions only)<br>`auto-edit` (automatic edits)<br>`full-auto` (fully automatic) |
| `fullAutoErrorMode` | string | `ask-user` | Error handling in full-auto mode | `ask-user` (prompt for user input)<br>`ignore-and-continue` (ignore and proceed) |
| `notify` | boolean | `true` | Enable desktop notifications | `true`/`false` |
### Custom AI provider configuration
In the `providers` object, you can configure multiple AI service providers. Each provider requires the following parameters:
| Parameter | Type | Description | Example |
| --------- | ------ | --------------------------------------- | ----------------------------- |
| `name` | string | Display name of the provider | `"OpenAI"` |
| `baseURL` | string | API service URL | `"https://api.openai.com/v1"` |
| `envKey` | string | Environment variable name (for API key) | `"OPENAI_API_KEY"` |
### History configuration
In the `history` object, you can configure conversation history settings:
| Parameter | Type | Description | Example Value |
| ------------------- | ------- | ------------------------------------------------------ | ------------- |
| `maxSize` | number | Maximum number of history entries to save | `1000` |
| `saveHistory` | boolean | Whether to save history | `true` |
| `sensitivePatterns` | array | Patterns of sensitive information to filter in history | `[]` |
### Configuration examples
1. YAML format (save as `~/.codex/config.yaml`):
```yaml
model: o4-mini
approvalMode: suggest
fullAutoErrorMode: ask-user
notify: true
```
2. JSON format (save as `~/.codex/config.json`):
```json
{
"model": "o4-mini",
"approvalMode": "suggest",
"fullAutoErrorMode": "ask-user",
"notify": true
}
```
### Full configuration example
Below is a comprehensive example of `config.json` with multiple custom providers:
```json
{
"model": "o4-mini",
"provider": "openai",
"providers": {
"openai": {
"name": "OpenAI",
"baseURL": "https://api.openai.com/v1",
"envKey": "OPENAI_API_KEY"
},
"azure": {
"name": "AzureOpenAI",
"baseURL": "https://YOUR_PROJECT_NAME.openai.azure.com/openai",
"envKey": "AZURE_OPENAI_API_KEY"
},
"openrouter": {
"name": "OpenRouter",
"baseURL": "https://openrouter.ai/api/v1",
"envKey": "OPENROUTER_API_KEY"
},
"gemini": {
"name": "Gemini",
"baseURL": "https://generativelanguage.googleapis.com/v1beta/openai",
"envKey": "GEMINI_API_KEY"
},
"ollama": {
"name": "Ollama",
"baseURL": "http://localhost:11434/v1",
"envKey": "OLLAMA_API_KEY"
},
"mistral": {
"name": "Mistral",
"baseURL": "https://api.mistral.ai/v1",
"envKey": "MISTRAL_API_KEY"
},
"deepseek": {
"name": "DeepSeek",
"baseURL": "https://api.deepseek.com",
"envKey": "DEEPSEEK_API_KEY"
},
"xai": {
"name": "xAI",
"baseURL": "https://api.x.ai/v1",
"envKey": "XAI_API_KEY"
},
"groq": {
"name": "Groq",
"baseURL": "https://api.groq.com/openai/v1",
"envKey": "GROQ_API_KEY"
},
"arceeai": {
"name": "ArceeAI",
"baseURL": "https://conductor.arcee.ai/v1",
"envKey": "ARCEEAI_API_KEY"
}
},
"history": {
"maxSize": 1000,
"saveHistory": true,
"sensitivePatterns": []
}
}
```
### Custom instructions
You can create a `~/.codex/AGENTS.md` file to define custom guidance for the agent:
```markdown
- Always respond with emojis
- Only use git commands when explicitly requested
```
### Environment variables setup
For each AI provider, you need to set the corresponding API key in your environment variables. For example:
```bash
# OpenAI
export OPENAI_API_KEY="your-api-key-here"
# Azure OpenAI
export AZURE_OPENAI_API_KEY="your-azure-api-key-here"
export AZURE_OPENAI_API_VERSION="2025-04-01-preview" (Optional)
# OpenRouter
export OPENROUTER_API_KEY="your-openrouter-key-here"
# Similarly for other providers
```
---
## FAQ
<details>
<summary>OpenAI released a model called Codex in 2021 - is this related?</summary>
In 2021, OpenAI released Codex, an AI system designed to generate code from natural language prompts. That original Codex model was deprecated as of March 2023 and is separate from the CLI tool.
</details>
<details>
<summary>Which models are supported?</summary>
Any model available with [Responses API](https://platform.openai.com/docs/api-reference/responses). The default is `o4-mini`, but pass `--model gpt-4.1` or set `model: gpt-4.1` in your config file to override.
</details>
<details>
<summary>Why does <code>o3</code> or <code>o4-mini</code> not work for me?</summary>
It's possible that your [API account needs to be verified](https://help.openai.com/en/articles/10910291-api-organization-verification) in order to start streaming responses and seeing chain of thought summaries from the API. If you're still running into issues, please let us know!
</details>
<details>
<summary>How do I stop Codex from editing my files?</summary>
Codex runs model-generated commands in a sandbox. If a proposed command or file change doesn't look right, you can simply type **n** to deny the command or give the model feedback.
</details>
<details>
<summary>Does it work on Windows?</summary>
Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - Codex has been tested on macOS and Linux with Node 22.
</details>
---
## Zero data retention (ZDR) usage
Codex CLI **does** support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. If your OpenAI organization has Zero Data Retention enabled and you still encounter errors such as:
```
OpenAI rejected the request. Error details: Status: 400, Code: unsupported_parameter, Type: invalid_request_error, Message: 400 Previous response cannot be used for this organization due to Zero Data Retention.
```
You may need to upgrade to a more recent version with: `npm i -g @openai/codex@latest`
---
## Codex open source fund
We're excited to launch a **$1 million initiative** supporting open source projects that use Codex CLI and other OpenAI models.
- Grants are awarded up to **$25,000** API credits.
- Applications are reviewed **on a rolling basis**.
**Interested? [Apply here](https://openai.com/form/codex-open-source-fund/).**
---
## Contributing
This project is under active development and the code will likely change pretty significantly. We'll update this message once that's complete!
More broadly we welcome contributions - whether you are opening your very first pull request or you're a seasoned maintainer. At the same time we care about reliability and long-term maintainability, so the bar for merging code is intentionally **high**. The guidelines below spell out what "high-quality" means in practice and should make the whole process transparent and friendly.
### Development workflow
- Create a _topic branch_ from `main` - e.g. `feat/interactive-prompt`.
- Keep your changes focused. Multiple unrelated fixes should be opened as separate PRs.
- Use `pnpm test:watch` during development for super-fast feedback.
- We use **Vitest** for unit tests, **ESLint** + **Prettier** for style, and **TypeScript** for type-checking.
- Before pushing, run the full test/type/lint suite:
### Git hooks with Husky
This project uses [Husky](https://typicode.github.io/husky/) to enforce code quality checks:
- **Pre-commit hook**: Automatically runs lint-staged to format and lint files before committing
- **Pre-push hook**: Runs tests and type checking before pushing to the remote
These hooks help maintain code quality and prevent pushing code with failing tests. For more details, see [HUSKY.md](./HUSKY.md).
```bash
pnpm test && pnpm run lint && pnpm run typecheck
```
- If you have **not** yet signed the Contributor License Agreement (CLA), add a PR comment containing the exact text
```text
I have read the CLA Document and I hereby sign the CLA
```
The CLA-Assistant bot will turn the PR status green once all authors have signed.
```bash
# Watch mode (tests rerun on change)
pnpm test:watch
# Type-check without emitting files
pnpm typecheck
# Automatically fix lint + prettier issues
pnpm lint:fix
pnpm format:fix
```
### Debugging
To debug the CLI with a visual debugger, do the following in the `codex-cli` folder:
- Run `pnpm run build` to build the CLI, which will generate `cli.js.map` alongside `cli.js` in the `dist` folder.
- Run the CLI with `node --inspect-brk ./dist/cli.js` The program then waits until a debugger is attached before proceeding. Options:
- In VS Code, choose **Debug: Attach to Node Process** from the command palette and choose the option in the dropdown with debug port `9229` (likely the first option)
- Go to <chrome://inspect> in Chrome and find **localhost:9229** and click **trace**
### Writing high-impact code changes
1. **Start with an issue.** Open a new one or comment on an existing discussion so we can agree on the solution before code is written.
2. **Add or update tests.** Every new feature or bug-fix should come with test coverage that fails before your change and passes afterwards. 100% coverage is not required, but aim for meaningful assertions.
3. **Document behaviour.** If your change affects user-facing behaviour, update the README, inline help (`codex --help`), or relevant example projects.
4. **Keep commits atomic.** Each commit should compile and the tests should pass. This makes reviews and potential rollbacks easier.
### Opening a pull request
- Fill in the PR template (or include similar information) - **What? Why? How?**
- Run **all** checks locally (`npm test && npm run lint && npm run typecheck`). CI failures that could have been caught locally slow down the process.
- Make sure your branch is up-to-date with `main` and that you have resolved merge conflicts.
- Mark the PR as **Ready for review** only when you believe it is in a merge-able state.
### Review process
1. One maintainer will be assigned as a primary reviewer.
2. We may ask for changes - please do not take this personally. We value the work, we just also value consistency and long-term maintainability.
3. When there is consensus that the PR meets the bar, a maintainer will squash-and-merge.
### Community values
- **Be kind and inclusive.** Treat others with respect; we follow the [Contributor Covenant](https://www.contributor-covenant.org/).
- **Assume good intent.** Written communication is hard - err on the side of generosity.
- **Teach & learn.** If you spot something confusing, open an issue or PR with improvements.
### Getting help
If you run into problems setting up the project, would like feedback on an idea, or just want to say _hi_ - please open a Discussion or jump into the relevant issue. We are happy to help.
Together we can make Codex CLI an incredible tool. **Happy hacking!** :rocket:
### Contributor license agreement (CLA)
All contributors **must** accept the CLA. The process is lightweight:
1. Open your pull request.
2. Paste the following comment (or reply `recheck` if you've signed before):
```text
I have read the CLA Document and I hereby sign the CLA
```
3. The CLA-Assistant bot records your signature in the repo and marks the status check as passed.
No special Git commands, email attachments, or commit footers required.
#### Quick fixes
| Scenario | Command |
| ----------------- | ------------------------------------------------ |
| Amend last commit | `git commit --amend -s --no-edit && git push -f` |
The **DCO check** blocks merges until every commit in the PR carries the footer (with squash this is just the one).
### Releasing `codex`
To publish a new version of the CLI you first need to stage the npm package. A
helper script in `codex-cli/scripts/` does all the heavy lifting. Inside the
`codex-cli` folder run:
```bash
# Classic, JS implementation that includes small, native binaries for Linux sandboxing.
pnpm stage-release
# Optionally specify the temp directory to reuse between runs.
RELEASE_DIR=$(mktemp -d)
pnpm stage-release --tmp "$RELEASE_DIR"
# "Fat" package that additionally bundles the native Rust CLI binaries for
# Linux. End-users can then opt-in at runtime by setting CODEX_RUST=1.
pnpm stage-release --native
```
Go to the folder where the release is staged and verify that it works as intended. If so, run the following from the temp folder:
```
cd "$RELEASE_DIR"
npm publish
```
### Alternative build options
#### Nix flake development
Prerequisite: Nix >= 2.4 with flakes enabled (`experimental-features = nix-command flakes` in `~/.config/nix/nix.conf`).
Enter a Nix development shell:
```bash
# Use either one of the commands according to which implementation you want to work with
nix develop .#codex-cli # For entering codex-cli specific shell
nix develop .#codex-rs # For entering codex-rs specific shell
```
This shell includes Node.js, installs dependencies, builds the CLI, and provides a `codex` command alias.
Build and run the CLI directly:
```bash
# Use either one of the commands according to which implementation you want to work with
nix build .#codex-cli # For building codex-cli
nix build .#codex-rs # For building codex-rs
./result/bin/codex --help
```
Run the CLI via the flake app:
```bash
# Use either one of the commands according to which implementation you want to work with
nix run .#codex-cli # For running codex-cli
nix run .#codex-rs # For running codex-rs
```
Use direnv with flakes
If you have direnv installed, you can use the following `.envrc` to automatically enter the Nix shell when you `cd` into the project directory:
```bash
cd codex-rs
echo "use flake ../flake.nix#codex-cli" >> .envrc && direnv allow
cd codex-cli
echo "use flake ../flake.nix#codex-rs" >> .envrc && direnv allow
```
---
## Security & responsible AI
Have you discovered a vulnerability or have concerns about model output? Please e-mail **security@openai.com** and we will respond promptly.
---
## License
This repository is licensed under the [Apache-2.0 License](LICENSE).

View File

@@ -1,153 +1,27 @@
#!/usr/bin/env node
// Unified entry point for the Codex CLI.
/*
* Behavior
* =========
* 1. By default we import the JavaScript implementation located in
* dist/cli.js.
*
* 2. Developers can opt-in to a pre-compiled Rust binary by setting the
* environment variable CODEX_RUST to a truthy value (`1`, `true`, etc.).
* When that variable is present we resolve the correct binary for the
* current platform / architecture and execute it via child_process.
*
* If the CODEX_RUST=1 is specified and there is no native binary for the
* current platform / architecture, an error is thrown.
*/
import fs from "fs";
import path from "path";
import { fileURLToPath, pathToFileURL } from "url";
// Unified entry point for Codex CLI on all platforms
// Dynamically loads the compiled ESM bundle in dist/cli.js
// Determine whether the user explicitly wants the Rust CLI.
import path from 'path';
import { fileURLToPath, pathToFileURL } from 'url';
// __dirname equivalent in ESM
// Determine this script's directory
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// For the @native release of the Node module, the `use-native` file is added,
// indicating we should default to the native binary. For other releases,
// setting CODEX_RUST=1 will opt-in to the native binary, if included.
const wantsNative = fs.existsSync(path.join(__dirname, "use-native")) ||
(process.env.CODEX_RUST != null
? ["1", "true", "yes"].includes(process.env.CODEX_RUST.toLowerCase())
: false);
// Resolve the path to the compiled CLI bundle
const cliPath = path.resolve(__dirname, '../dist/cli.js');
const cliUrl = pathToFileURL(cliPath).href;
// Try native binary if requested.
if (wantsNative && process.platform !== 'win32') {
const { platform, arch } = process;
let targetTriple = null;
switch (platform) {
case "linux":
case "android":
switch (arch) {
case "x64":
targetTriple = "x86_64-unknown-linux-musl";
break;
case "arm64":
targetTriple = "aarch64-unknown-linux-musl";
break;
default:
break;
}
break;
case "darwin":
switch (arch) {
case "x64":
targetTriple = "x86_64-apple-darwin";
break;
case "arm64":
targetTriple = "aarch64-apple-darwin";
break;
default:
break;
}
break;
default:
break;
}
if (!targetTriple) {
throw new Error(`Unsupported platform: ${platform} (${arch})`);
}
const binaryPath = path.join(__dirname, "..", "bin", `codex-${targetTriple}`);
// Use an asynchronous spawn instead of spawnSync so that Node is able to
// respond to signals (e.g. Ctrl-C / SIGINT) while the native binary is
// executing. This allows us to forward those signals to the child process
// and guarantees that when either the child terminates or the parent
// receives a fatal signal, both processes exit in a predictable manner.
const { spawn } = await import("child_process");
const child = spawn(binaryPath, process.argv.slice(2), {
stdio: "inherit",
});
child.on("error", (err) => {
// Typically triggered when the binary is missing or not executable.
// Re-throwing here will terminate the parent with a non-zero exit code
// while still printing a helpful stack trace.
// eslint-disable-next-line no-console
console.error(err);
process.exit(1);
});
// Forward common termination signals to the child so that it shuts down
// gracefully. In the handler we temporarily disable the default behavior of
// exiting immediately; once the child has been signaled we simply wait for
// its exit event which will in turn terminate the parent (see below).
const forwardSignal = (signal) => {
if (child.killed) {
return;
}
try {
child.kill(signal);
} catch {
/* ignore */
}
};
["SIGINT", "SIGTERM", "SIGHUP"].forEach((sig) => {
process.on(sig, () => forwardSignal(sig));
});
// When the child exits, mirror its termination reason in the parent so that
// shell scripts and other tooling observe the correct exit status.
// Wrap the lifetime of the child process in a Promise so that we can await
// its termination in a structured way. The Promise resolves with an object
// describing how the child exited: either via exit code or due to a signal.
const childResult = await new Promise((resolve) => {
child.on("exit", (code, signal) => {
if (signal) {
resolve({ type: "signal", signal });
} else {
resolve({ type: "code", exitCode: code ?? 1 });
}
});
});
if (childResult.type === "signal") {
// Re-emit the same signal so that the parent terminates with the expected
// semantics (this also sets the correct exit code of 128 + n).
process.kill(process.pid, childResult.signal);
} else {
process.exit(childResult.exitCode);
}
} else {
// Fallback: execute the original JavaScript CLI.
// Resolve the path to the compiled CLI bundle
const cliPath = path.resolve(__dirname, "../dist/cli.js");
const cliUrl = pathToFileURL(cliPath).href;
// Load and execute the CLI
// Load and execute the CLI
(async () => {
try {
await import(cliUrl);
} catch (err) {
// eslint-disable-next-line no-console
console.error(err);
// eslint-disable-next-line no-undef
process.exit(1);
}
}
})();

View File

@@ -72,9 +72,6 @@ if (isDevBuild) {
esbuild
.build({
entryPoints: ["src/cli.tsx"],
// Do not bundle the contents of package.json at build time: always read it
// at runtime.
external: ["../package.json"],
bundle: true,
format: "esm",
platform: "node",

View File

@@ -1,43 +0,0 @@
{ pkgs, monorep-deps ? [], ... }:
let
node = pkgs.nodejs_22;
in
rec {
package = pkgs.buildNpmPackage {
pname = "codex-cli";
version = "0.1.0";
src = ./.;
npmDepsHash = "sha256-3tAalmh50I0fhhd7XreM+jvl0n4zcRhqygFNB1Olst8";
nodejs = node;
npmInstallFlags = [ "--frozen-lockfile" ];
meta = with pkgs.lib; {
description = "OpenAI Codex commandline interface";
license = licenses.asl20;
homepage = "https://github.com/openai/codex";
};
};
devShell = pkgs.mkShell {
name = "codex-cli-dev";
buildInputs = monorep-deps ++ [
node
pkgs.pnpm
];
shellHook = ''
echo "Entering development shell for codex-cli"
# cd codex-cli
if [ -f package-lock.json ]; then
pnpm ci || echo "npm ci failed"
else
pnpm install || echo "npm install failed"
fi
npm run build || echo "npm build failed"
export PATH=$PWD/node_modules/.bin:$PATH
alias codex="node $PWD/dist/cli.js"
'';
};
app = {
type = "app";
program = "${package}/bin/codex";
};
}

View File

@@ -1,7 +1,7 @@
name: "impossible-pong"
description: |
Update index.html with the following features:
- Add an overlaid styled popup to start the game on first load
- Add an overlayed styled popup to start the game on first load
- Between each point, show a 3 second countdown (this should be skipped if a player wins)
- After each game the AI wins, display text at the bottom of the screen with lighthearted insults for the player
- Add a leaderboard to the right of the court that shows how many games each player has won.

View File

@@ -13,7 +13,7 @@ act,prompt,for_devs
"Advertiser","I want you to act as an advertiser. You will create a campaign to promote a product or service of your choice. You will choose a target audience, develop key messages and slogans, select the media channels for promotion, and decide on any additional activities needed to reach your goals. My first suggestion request is ""I need help creating an advertising campaign for a new type of energy drink targeting young adults aged 18-30.""",FALSE
"Storyteller","I want you to act as a storyteller. You will come up with entertaining stories that are engaging, imaginative and captivating for the audience. It can be fairy tales, educational stories or any other type of stories which has the potential to capture people's attention and imagination. Depending on the target audience, you may choose specific themes or topics for your storytelling session e.g., if it's children then you can talk about animals; If it's adults then history-based tales might engage them better etc. My first request is ""I need an interesting story on perseverance.""",FALSE
"Football Commentator","I want you to act as a football commentator. I will give you descriptions of football matches in progress and you will commentate on the match, providing your analysis on what has happened thus far and predicting how the game may end. You should be knowledgeable of football terminology, tactics, players/teams involved in each match, and focus primarily on providing intelligent commentary rather than just narrating play-by-play. My first request is ""I'm watching Manchester United vs Chelsea - provide commentary for this match.""",FALSE
"Stand-up Comedian","I want you to act as a stand-up comedian. I will provide you with some topics related to current events and you will use your with, creativity, and observational skills to create a routine based on those topics. You should also be sure to incorporate personal anecdotes or experiences into the routine in order to make it more relatable and engaging for the audience. My first request is ""I want an humorous take on politics.""",FALSE
"Stand-up Comedian","I want you to act as a stand-up comedian. I will provide you with some topics related to current events and you will use your wit, creativity, and observational skills to create a routine based on those topics. You should also be sure to incorporate personal anecdotes or experiences into the routine in order to make it more relatable and engaging for the audience. My first request is ""I want an humorous take on politics.""",FALSE
"Motivational Coach","I want you to act as a motivational coach. I will provide you with some information about someone's goals and challenges, and it will be your job to come up with strategies that can help this person achieve their goals. This could involve providing positive affirmations, giving helpful advice or suggesting activities they can do to reach their end goal. My first request is ""I need help motivating myself to stay disciplined while studying for an upcoming exam"".",FALSE
"Composer","I want you to act as a composer. I will provide the lyrics to a song and you will create music for it. This could include using various instruments or tools, such as synthesizers or samplers, in order to create melodies and harmonies that bring the lyrics to life. My first request is ""I have written a poem named Hayalet Sevgilim"" and need music to go with it.""""""",FALSE
"Debater","I want you to act as a debater. I will provide you with some topics related to current events and your task is to research both sides of the debates, present valid arguments for each side, refute opposing points of view, and draw persuasive conclusions based on evidence. Your goal is to help people come away from the discussion with increased knowledge and insight into the topic at hand. My first request is ""I want an opinion piece about Deno.""",FALSE
@@ -23,7 +23,7 @@ act,prompt,for_devs
"Movie Critic","I want you to act as a movie critic. You will develop an engaging and creative movie review. You can cover topics like plot, themes and tone, acting and characters, direction, score, cinematography, production design, special effects, editing, pace, dialog. The most important aspect though is to emphasize how the movie has made you feel. What has really resonated with you. You can also be critical about the movie. Please avoid spoilers. My first request is ""I need to write a movie review for the movie Interstellar""",FALSE
"Relationship Coach","I want you to act as a relationship coach. I will provide some details about the two people involved in a conflict, and it will be your job to come up with suggestions on how they can work through the issues that are separating them. This could include advice on communication techniques or different strategies for improving their understanding of one another's perspectives. My first request is ""I need help solving conflicts between my spouse and myself.""",FALSE
"Poet","I want you to act as a poet. You will create poems that evoke emotions and have the power to stir people's soul. Write on any topic or theme but make sure your words convey the feeling you are trying to express in beautiful yet meaningful ways. You can also come up with short verses that are still powerful enough to leave an imprint in readers' minds. My first request is ""I need a poem about love.""",FALSE
"Rapper","I want you to act as a rapper. You will come up with powerful and meaningful lyrics, beats and rhythm that can 'wow' the audience. Your lyrics should have an intriguing meaning and message which people can relate too. When it comes to choosing your beat, make sure it is catchy yet relevant to your words, so that when combined they make an explosion of sound every time! My first request is ""I need a rap song about finding strength within yourself.""",FALSE
"Rapper","I want you to act as a rapper. You will come up with powerful and meaningful lyrics, beats and rhythm that can 'wow' the audience. Your lyrics should have an intriguing meaning and message which people can relate too. When it comes to choosing your beat, make sure it is catchy yet relevant to your words, so that when combined they make an explosion of sound everytime! My first request is ""I need a rap song about finding strength within yourself.""",FALSE
"Motivational Speaker","I want you to act as a motivational speaker. Put together words that inspire action and make people feel empowered to do something beyond their abilities. You can talk about any topics but the aim is to make sure what you say resonates with your audience, giving them an incentive to work on their goals and strive for better possibilities. My first request is ""I need a speech about how everyone should never give up.""",FALSE
"Philosophy Teacher","I want you to act as a philosophy teacher. I will provide some topics related to the study of philosophy, and it will be your job to explain these concepts in an easy-to-understand manner. This could include providing examples, posing questions or breaking down complex ideas into smaller pieces that are easier to comprehend. My first request is ""I need help understanding how different philosophical theories can be applied in everyday life.""",FALSE
"Philosopher","I want you to act as a philosopher. I will provide some topics or questions related to the study of philosophy, and it will be your job to explore these concepts in depth. This could involve conducting research into various philosophical theories, proposing new ideas or finding creative solutions for solving complex problems. My first request is ""I need help developing an ethical framework for decision making.""",FALSE
1 act prompt for_devs
13 Advertiser I want you to act as an advertiser. You will create a campaign to promote a product or service of your choice. You will choose a target audience, develop key messages and slogans, select the media channels for promotion, and decide on any additional activities needed to reach your goals. My first suggestion request is "I need help creating an advertising campaign for a new type of energy drink targeting young adults aged 18-30." FALSE
14 Storyteller I want you to act as a storyteller. You will come up with entertaining stories that are engaging, imaginative and captivating for the audience. It can be fairy tales, educational stories or any other type of stories which has the potential to capture people's attention and imagination. Depending on the target audience, you may choose specific themes or topics for your storytelling session e.g., if it's children then you can talk about animals; If it's adults then history-based tales might engage them better etc. My first request is "I need an interesting story on perseverance." FALSE
15 Football Commentator I want you to act as a football commentator. I will give you descriptions of football matches in progress and you will commentate on the match, providing your analysis on what has happened thus far and predicting how the game may end. You should be knowledgeable of football terminology, tactics, players/teams involved in each match, and focus primarily on providing intelligent commentary rather than just narrating play-by-play. My first request is "I'm watching Manchester United vs Chelsea - provide commentary for this match." FALSE
16 Stand-up Comedian I want you to act as a stand-up comedian. I will provide you with some topics related to current events and you will use your with, creativity, and observational skills to create a routine based on those topics. You should also be sure to incorporate personal anecdotes or experiences into the routine in order to make it more relatable and engaging for the audience. My first request is "I want an humorous take on politics." I want you to act as a stand-up comedian. I will provide you with some topics related to current events and you will use your wit, creativity, and observational skills to create a routine based on those topics. You should also be sure to incorporate personal anecdotes or experiences into the routine in order to make it more relatable and engaging for the audience. My first request is "I want an humorous take on politics." FALSE
17 Motivational Coach I want you to act as a motivational coach. I will provide you with some information about someone's goals and challenges, and it will be your job to come up with strategies that can help this person achieve their goals. This could involve providing positive affirmations, giving helpful advice or suggesting activities they can do to reach their end goal. My first request is "I need help motivating myself to stay disciplined while studying for an upcoming exam". FALSE
18 Composer I want you to act as a composer. I will provide the lyrics to a song and you will create music for it. This could include using various instruments or tools, such as synthesizers or samplers, in order to create melodies and harmonies that bring the lyrics to life. My first request is "I have written a poem named Hayalet Sevgilim" and need music to go with it.""" FALSE
19 Debater I want you to act as a debater. I will provide you with some topics related to current events and your task is to research both sides of the debates, present valid arguments for each side, refute opposing points of view, and draw persuasive conclusions based on evidence. Your goal is to help people come away from the discussion with increased knowledge and insight into the topic at hand. My first request is "I want an opinion piece about Deno." FALSE
23 Movie Critic I want you to act as a movie critic. You will develop an engaging and creative movie review. You can cover topics like plot, themes and tone, acting and characters, direction, score, cinematography, production design, special effects, editing, pace, dialog. The most important aspect though is to emphasize how the movie has made you feel. What has really resonated with you. You can also be critical about the movie. Please avoid spoilers. My first request is "I need to write a movie review for the movie Interstellar" FALSE
24 Relationship Coach I want you to act as a relationship coach. I will provide some details about the two people involved in a conflict, and it will be your job to come up with suggestions on how they can work through the issues that are separating them. This could include advice on communication techniques or different strategies for improving their understanding of one another's perspectives. My first request is "I need help solving conflicts between my spouse and myself." FALSE
25 Poet I want you to act as a poet. You will create poems that evoke emotions and have the power to stir people's soul. Write on any topic or theme but make sure your words convey the feeling you are trying to express in beautiful yet meaningful ways. You can also come up with short verses that are still powerful enough to leave an imprint in readers' minds. My first request is "I need a poem about love." FALSE
26 Rapper I want you to act as a rapper. You will come up with powerful and meaningful lyrics, beats and rhythm that can 'wow' the audience. Your lyrics should have an intriguing meaning and message which people can relate too. When it comes to choosing your beat, make sure it is catchy yet relevant to your words, so that when combined they make an explosion of sound every time! My first request is "I need a rap song about finding strength within yourself." I want you to act as a rapper. You will come up with powerful and meaningful lyrics, beats and rhythm that can 'wow' the audience. Your lyrics should have an intriguing meaning and message which people can relate too. When it comes to choosing your beat, make sure it is catchy yet relevant to your words, so that when combined they make an explosion of sound everytime! My first request is "I need a rap song about finding strength within yourself." FALSE
27 Motivational Speaker I want you to act as a motivational speaker. Put together words that inspire action and make people feel empowered to do something beyond their abilities. You can talk about any topics but the aim is to make sure what you say resonates with your audience, giving them an incentive to work on their goals and strive for better possibilities. My first request is "I need a speech about how everyone should never give up." FALSE
28 Philosophy Teacher I want you to act as a philosophy teacher. I will provide some topics related to the study of philosophy, and it will be your job to explain these concepts in an easy-to-understand manner. This could include providing examples, posing questions or breaking down complex ideas into smaller pieces that are easier to comprehend. My first request is "I need help understanding how different philosophical theories can be applied in everyday life." FALSE
29 Philosopher I want you to act as a philosopher. I will provide some topics or questions related to the study of philosophy, and it will be your job to explore these concepts in depth. This could involve conducting research into various philosophical theories, proposing new ideas or finding creative solutions for solving complex problems. My first request is "I need help developing an ethical framework for decision making." FALSE

View File

@@ -1,6 +1,6 @@
{
"name": "@openai/codex",
"version": "0.0.0-dev",
"version": "0.1.2504251709",
"license": "Apache-2.0",
"bin": {
"codex": "bin/codex.js"
@@ -20,10 +20,12 @@
"typecheck": "tsc --noEmit",
"build": "node build.mjs",
"build:dev": "NODE_ENV=development node build.mjs --dev && NODE_OPTIONS=--enable-source-maps node dist/cli-dev.js",
"stage-release": "./scripts/stage_release.sh"
"release:readme": "cp ../README.md ./README.md",
"release:version": "TS=$(date +%y%m%d%H%M) && sed -E -i'' -e \"s/\\\"0\\.1\\.[0-9]{10}\\\"/\\\"0.1.${TS}\\\"/g\" package.json src/utils/session.ts",
"release:build-and-publish": "pnpm run build && npm publish",
"release": "pnpm run release:readme && pnpm run release:version && pnpm install && pnpm run release:build-and-publish"
},
"files": [
"bin",
"dist"
],
"dependencies": {
@@ -31,12 +33,10 @@
"chalk": "^5.2.0",
"diff": "^7.0.0",
"dotenv": "^16.1.4",
"express": "^5.1.0",
"fast-deep-equal": "^3.1.3",
"fast-npm-meta": "^0.4.2",
"figures": "^6.1.0",
"file-type": "^20.1.0",
"https-proxy-agent": "^7.0.6",
"ink": "^5.2.0",
"js-yaml": "^4.1.0",
"marked": "^15.0.7",
@@ -55,7 +55,6 @@
"devDependencies": {
"@eslint/js": "^9.22.0",
"@types/diff": "^7.0.2",
"@types/express": "^5.0.1",
"@types/js-yaml": "^4.0.9",
"@types/marked-terminal": "^6.1.1",
"@types/react": "^18.0.32",
@@ -77,13 +76,12 @@
"semver": "^7.7.1",
"ts-node": "^10.9.1",
"typescript": "^5.0.3",
"vite": "^6.3.4",
"vitest": "^3.1.2",
"vitest": "^3.0.9",
"whatwg-url": "^14.2.0",
"which": "^5.0.0"
},
"repository": {
"type": "git",
"url": "git+https://github.com/openai/codex.git"
"url": "https://github.com/openai/codex"
}
}

View File

@@ -1,9 +0,0 @@
# npm releases
Run the following:
To build the 0.2.x or later version of the npm module, which runs the Rust version of the CLI, build it as follows:
```bash
./codex-cli/scripts/stage_rust_release.py --release-version 0.6.0
```

View File

@@ -2,26 +2,6 @@
set -euo pipefail # Exit on error, undefined vars, and pipeline failures
IFS=$'\n\t' # Stricter word splitting
# Read allowed domains from file
ALLOWED_DOMAINS_FILE="/etc/codex/allowed_domains.txt"
if [ -f "$ALLOWED_DOMAINS_FILE" ]; then
ALLOWED_DOMAINS=()
while IFS= read -r domain; do
ALLOWED_DOMAINS+=("$domain")
done < "$ALLOWED_DOMAINS_FILE"
echo "Using domains from file: ${ALLOWED_DOMAINS[*]}"
else
# Fallback to default domains
ALLOWED_DOMAINS=("api.openai.com")
echo "Domains file not found, using default: ${ALLOWED_DOMAINS[*]}"
fi
# Ensure we have at least one domain
if [ ${#ALLOWED_DOMAINS[@]} -eq 0 ]; then
echo "ERROR: No allowed domains specified"
exit 1
fi
# Flush existing rules and delete existing ipsets
iptables -F
iptables -X
@@ -44,7 +24,8 @@ iptables -A OUTPUT -o lo -j ACCEPT
ipset create allowed-domains hash:net
# Resolve and add other allowed domains
for domain in "${ALLOWED_DOMAINS[@]}"; do
for domain in \
"api.openai.com"; do
echo "Resolving $domain..."
ips=$(dig +short A "$domain")
if [ -z "$ips" ]; then
@@ -106,7 +87,7 @@ else
echo "Firewall verification passed - unable to reach https://example.com as expected"
fi
# Always verify OpenAI API access is working
# Verify OpenAI API access
if ! curl --connect-timeout 5 https://api.openai.com >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - unable to reach https://api.openai.com"
exit 1

View File

@@ -1,106 +0,0 @@
#!/usr/bin/env bash
# Install native runtime dependencies for codex-cli.
#
# By default the script copies the sandbox binaries that are required at
# runtime. When called with the --full-native flag, it additionally
# bundles pre-built Rust CLI binaries so that the resulting npm package can run
# the native implementation when users set CODEX_RUST=1.
#
# Usage
# install_native_deps.sh [--full-native] [--workflow-url URL] [CODEX_CLI_ROOT]
#
# The optional RELEASE_ROOT is the path that contains package.json. Omitting
# it installs the binaries into the repository's own bin/ folder to support
# local development.
set -euo pipefail
# ------------------
# Parse arguments
# ------------------
CODEX_CLI_ROOT=""
INCLUDE_RUST=0
# Until we start publishing stable GitHub releases, we have to grab the binaries
# from the GitHub Action that created them. Update the URL below to point to the
# appropriate workflow run:
WORKFLOW_URL="https://github.com/openai/codex/actions/runs/15981617627"
while [[ $# -gt 0 ]]; do
case "$1" in
--full-native)
INCLUDE_RUST=1
;;
--workflow-url)
shift || { echo "--workflow-url requires an argument"; exit 1; }
if [ -n "$1" ]; then
WORKFLOW_URL="$1"
fi
;;
*)
if [[ -z "$CODEX_CLI_ROOT" ]]; then
CODEX_CLI_ROOT="$1"
else
echo "Unexpected argument: $1" >&2
exit 1
fi
;;
esac
shift
done
# ----------------------------------------------------------------------------
# Determine where the binaries should be installed.
# ----------------------------------------------------------------------------
if [ -n "$CODEX_CLI_ROOT" ]; then
# The caller supplied a release root directory.
BIN_DIR="$CODEX_CLI_ROOT/bin"
else
# No argument; fall back to the repos own bin directory.
# Resolve the path of this script, then walk up to the repo root.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CODEX_CLI_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
BIN_DIR="$CODEX_CLI_ROOT/bin"
fi
# Make sure the destination directory exists.
mkdir -p "$BIN_DIR"
# ----------------------------------------------------------------------------
# Download and decompress the artifacts from the GitHub Actions workflow.
# ----------------------------------------------------------------------------
WORKFLOW_ID="${WORKFLOW_URL##*/}"
ARTIFACTS_DIR="$(mktemp -d)"
trap 'rm -rf "$ARTIFACTS_DIR"' EXIT
# NB: The GitHub CLI `gh` must be installed and authenticated.
gh run download --dir "$ARTIFACTS_DIR" --repo openai/codex "$WORKFLOW_ID"
# Decompress the artifacts for Linux sandboxing.
zstd -d "$ARTIFACTS_DIR/x86_64-unknown-linux-musl/codex-linux-sandbox-x86_64-unknown-linux-musl.zst" \
-o "$BIN_DIR/codex-linux-sandbox-x64"
zstd -d "$ARTIFACTS_DIR/aarch64-unknown-linux-musl/codex-linux-sandbox-aarch64-unknown-linux-musl.zst" \
-o "$BIN_DIR/codex-linux-sandbox-arm64"
if [[ "$INCLUDE_RUST" -eq 1 ]]; then
# x64 Linux
zstd -d "$ARTIFACTS_DIR/x86_64-unknown-linux-musl/codex-x86_64-unknown-linux-musl.zst" \
-o "$BIN_DIR/codex-x86_64-unknown-linux-musl"
# ARM64 Linux
zstd -d "$ARTIFACTS_DIR/aarch64-unknown-linux-musl/codex-aarch64-unknown-linux-musl.zst" \
-o "$BIN_DIR/codex-aarch64-unknown-linux-musl"
# x64 macOS
zstd -d "$ARTIFACTS_DIR/x86_64-apple-darwin/codex-x86_64-apple-darwin.zst" \
-o "$BIN_DIR/codex-x86_64-apple-darwin"
# ARM64 macOS
zstd -d "$ARTIFACTS_DIR/aarch64-apple-darwin/codex-aarch64-apple-darwin.zst" \
-o "$BIN_DIR/codex-aarch64-apple-darwin"
fi
echo "Installed native dependencies into $BIN_DIR"

View File

@@ -10,8 +10,6 @@ set -e
# Default the work directory to WORKSPACE_ROOT_DIR if not provided.
WORK_DIR="${WORKSPACE_ROOT_DIR:-$(pwd)}"
# Default allowed domains - can be overridden with OPENAI_ALLOWED_DOMAINS env var
OPENAI_ALLOWED_DOMAINS="${OPENAI_ALLOWED_DOMAINS:-api.openai.com}"
# Parse optional flag.
if [ "$1" = "--work_dir" ]; then
@@ -47,12 +45,6 @@ if [ -z "$WORK_DIR" ]; then
exit 1
fi
# Verify that OPENAI_ALLOWED_DOMAINS is not empty
if [ -z "$OPENAI_ALLOWED_DOMAINS" ]; then
echo "Error: OPENAI_ALLOWED_DOMAINS is empty."
exit 1
fi
# Kill any existing container for the working directory using cleanup(), centralizing removal logic.
cleanup
@@ -65,25 +57,8 @@ docker run --name "$CONTAINER_NAME" -d \
codex \
sleep infinity
# Write the allowed domains to a file in the container
docker exec --user root "$CONTAINER_NAME" bash -c "mkdir -p /etc/codex"
for domain in $OPENAI_ALLOWED_DOMAINS; do
# Validate domain format to prevent injection
if [[ ! "$domain" =~ ^[a-zA-Z0-9][a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then
echo "Error: Invalid domain format: $domain"
exit 1
fi
echo "$domain" | docker exec --user root -i "$CONTAINER_NAME" bash -c "cat >> /etc/codex/allowed_domains.txt"
done
# Set proper permissions on the domains file
docker exec --user root "$CONTAINER_NAME" bash -c "chmod 444 /etc/codex/allowed_domains.txt && chown root:root /etc/codex/allowed_domains.txt"
# Initialize the firewall inside the container as root user
docker exec --user root "$CONTAINER_NAME" bash -c "/usr/local/bin/init_firewall.sh"
# Remove the firewall script after running it
docker exec --user root "$CONTAINER_NAME" bash -c "rm -f /usr/local/bin/init_firewall.sh"
# Initialize the firewall inside the container with root privileges.
docker exec --user root "$CONTAINER_NAME" /usr/local/bin/init_firewall.sh
# Execute the provided command in the container, ensuring it runs in the work directory.
# We use a parameterized bash command to safely handle the command and directory.

View File

@@ -1,154 +0,0 @@
#!/usr/bin/env bash
# -----------------------------------------------------------------------------
# stage_release.sh
# -----------------------------------------------------------------------------
# Stages an npm release for @openai/codex.
#
# Usage:
#
# --tmp <dir> : Use <dir> instead of a freshly created temp directory.
# --native : Bundle the pre-built Rust CLI binaries for Linux alongside
# the JavaScript implementation (a so-called "fat" package).
# -h|--help : Print usage.
#
# When --native is supplied we copy the linux-sandbox binaries (as before) and
# additionally fetch / unpack the two Rust targets that we currently support:
# - x86_64-unknown-linux-musl
# - aarch64-unknown-linux-musl
#
# NOTE: This script is intended to be run from the repository root via
# `pnpm --filter codex-cli stage-release ...` or inside codex-cli with the
# helper script entry in package.json (`pnpm stage-release ...`).
# -----------------------------------------------------------------------------
set -euo pipefail
# Helper - usage / flag parsing
usage() {
cat <<EOF
Usage: $(basename "$0") [--tmp DIR] [--native] [--version VERSION]
Options
--tmp DIR Use DIR to stage the release (defaults to a fresh mktemp dir)
--native Bundle Rust binaries for Linux (fat package)
--version Specify the version to release (defaults to a timestamp-based version)
-h, --help Show this help
Legacy positional argument: the first non-flag argument is still interpreted
as the temporary directory (for backwards compatibility) but is deprecated.
EOF
exit "${1:-0}"
}
TMPDIR=""
INCLUDE_NATIVE=0
# Default to a timestamp-based version (keep same scheme as before)
VERSION="$(printf '0.1.%d' "$(date +%y%m%d%H%M)")"
WORKFLOW_URL=""
# Manual flag parser - Bash getopts does not handle GNU long options well.
while [[ $# -gt 0 ]]; do
case "$1" in
--tmp)
shift || { echo "--tmp requires an argument"; usage 1; }
TMPDIR="$1"
;;
--tmp=*)
TMPDIR="${1#*=}"
;;
--native)
INCLUDE_NATIVE=1
;;
--version)
shift || { echo "--version requires an argument"; usage 1; }
VERSION="$1"
;;
--workflow-url)
shift || { echo "--workflow-url requires an argument"; exit 1; }
WORKFLOW_URL="$1"
;;
-h|--help)
usage 0
;;
--*)
echo "Unknown option: $1" >&2
usage 1
;;
*)
echo "Unexpected extra argument: $1" >&2
usage 1
;;
esac
shift
done
# Fallback when the caller did not specify a directory.
# If no directory was specified create a fresh temporary one.
if [[ -z "$TMPDIR" ]]; then
TMPDIR="$(mktemp -d)"
fi
# Ensure the directory exists, then resolve to an absolute path.
mkdir -p "$TMPDIR"
TMPDIR="$(cd "$TMPDIR" && pwd)"
# Main build logic
echo "Staging release in $TMPDIR"
# The script lives in codex-cli/scripts/ - change into codex-cli root so that
# relative paths keep working.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CODEX_CLI_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
pushd "$CODEX_CLI_ROOT" >/dev/null
# 1. Build the JS artifacts ---------------------------------------------------
pnpm install
pnpm build
# Paths inside the staged package
mkdir -p "$TMPDIR/bin"
cp -r bin/codex.js "$TMPDIR/bin/codex.js"
cp -r dist "$TMPDIR/dist"
cp -r src "$TMPDIR/src" # keep source for TS sourcemaps
cp ../README.md "$TMPDIR" || true # README is one level up - ignore if missing
# Modify package.json - bump version and optionally add the native directory to
# the files array so that the binaries are published to npm.
jq --arg version "$VERSION" \
'.version = $version' \
package.json > "$TMPDIR/package.json"
# 2. Native runtime deps (sandbox plus optional Rust binaries)
if [[ "$INCLUDE_NATIVE" -eq 1 ]]; then
./scripts/install_native_deps.sh --full-native --workflow-url "$WORKFLOW_URL" "$TMPDIR"
touch "${TMPDIR}/bin/use-native"
else
./scripts/install_native_deps.sh "$TMPDIR"
fi
popd >/dev/null
echo "Staged version $VERSION for release in $TMPDIR"
if [[ "$INCLUDE_NATIVE" -eq 1 ]]; then
echo "Verify the CLI:"
echo " node ${TMPDIR}/bin/codex.js --version"
echo " node ${TMPDIR}/bin/codex.js --help"
else
echo "Test Node:"
echo " node ${TMPDIR}/bin/codex.js --help"
fi
# Print final hint for convenience
if [[ "$INCLUDE_NATIVE" -eq 1 ]]; then
echo "Next: cd \"$TMPDIR\" && npm publish --tag native"
else
echo "Next: cd \"$TMPDIR\" && npm publish"
fi

View File

@@ -1,62 +0,0 @@
#!/usr/bin/env python3
import json
import subprocess
import sys
import argparse
from pathlib import Path
def main() -> int:
parser = argparse.ArgumentParser(
description="""Stage a release for the npm module.
Run this after the GitHub Release has been created and use
`--release-version` to specify the version to release.
"""
)
parser.add_argument(
"--release-version", required=True, help="Version to release, e.g., 0.3.0"
)
args = parser.parse_args()
version = args.release_version
gh_run = subprocess.run(
[
"gh",
"run",
"list",
"--branch",
f"rust-v{version}",
"--json",
"workflowName,url,headSha",
"--jq",
'first(.[] | select(.workflowName == "rust-release"))',
],
stdout=subprocess.PIPE,
check=True,
)
gh_run.check_returncode()
workflow = json.loads(gh_run.stdout)
sha = workflow["headSha"]
print(f"should `git checkout {sha}`")
current_dir = Path(__file__).parent.resolve()
stage_release = subprocess.run(
[
current_dir / "stage_release.sh",
"--version",
version,
"--workflow-url",
workflow["url"],
"--native",
]
)
stage_release.check_returncode()
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,13 +1,12 @@
import type { ApprovalPolicy } from "./approvals";
import type { AppConfig } from "./utils/config";
import type { TerminalChatSession } from "./utils/session.js";
import type { ResponseItem } from "openai/resources/responses/responses";
import TerminalChat from "./components/chat/terminal-chat";
import TerminalChatPastRollout from "./components/chat/terminal-chat-past-rollout";
import { checkInGit } from "./utils/check-in-git";
import { CLI_VERSION, type TerminalChatSession } from "./utils/session.js";
import { onExit } from "./utils/terminal";
import { CLI_VERSION } from "./version";
import { ConfirmInput } from "@inkjs/ui";
import { Box, Text, useApp, useStdin } from "ink";
import React, { useMemo, useState } from "react";
@@ -50,7 +49,6 @@ export default function App({
<TerminalChatPastRollout
session={rollout.session}
items={rollout.items}
fileOpener={config.fileOpener}
/>
);
}

View File

@@ -281,14 +281,12 @@ export function resolvePathAgainstWorkdir(
candidatePath: string,
workdir: string | undefined,
): string {
// Normalize candidatePath to prevent path traversal attacks
const normalizedCandidatePath = path.normalize(candidatePath);
if (path.isAbsolute(normalizedCandidatePath)) {
return normalizedCandidatePath;
if (path.isAbsolute(candidatePath)) {
return candidatePath;
} else if (workdir != null) {
return path.resolve(workdir, normalizedCandidatePath);
return path.resolve(workdir, candidatePath);
} else {
return path.resolve(normalizedCandidatePath);
return path.resolve(candidatePath);
}
}
@@ -365,31 +363,11 @@ export function isSafeCommand(
reason: "View file contents",
group: "Reading files",
};
case "nl":
return {
reason: "View file with line numbers",
group: "Reading files",
};
case "rg": {
// Certain ripgrep options execute external commands or invoke other
// processes, so we must reject them.
const isUnsafe = command.some(
(arg: string) =>
UNSAFE_OPTIONS_FOR_RIPGREP_WITHOUT_ARGS.has(arg) ||
[...UNSAFE_OPTIONS_FOR_RIPGREP_WITH_ARGS].some(
(opt) => arg === opt || arg.startsWith(`${opt}=`),
),
);
if (isUnsafe) {
break;
}
case "rg":
return {
reason: "Ripgrep search",
group: "Searching",
};
}
case "find": {
// Certain options to `find` allow executing arbitrary processes, so we
// cannot auto-approve them.
@@ -468,15 +446,11 @@ export function isSafeCommand(
}
break;
case "sed":
// We allow two types of sed invocations:
// 1. `sed -n 1,200p FILE`
// 2. `sed -n 1,200p` because the file is passed via stdin, e.g.,
// `nl -ba README.md | sed -n '1,200p'`
if (
cmd1 === "-n" &&
isValidSedNArg(cmd2) &&
(command.length === 3 ||
(typeof cmd3 === "string" && command.length === 4))
typeof cmd3 === "string" &&
command.length === 4
) {
return {
reason: "Sed print subset",
@@ -510,22 +484,6 @@ const UNSAFE_OPTIONS_FOR_FIND_COMMAND: ReadonlySet<string> = new Set([
"-fprintf",
]);
// Ripgrep options that are considered unsafe because they may execute
// arbitrary commands or spawn auxiliary processes.
const UNSAFE_OPTIONS_FOR_RIPGREP_WITH_ARGS: ReadonlySet<string> = new Set([
// Executes an arbitrary command for each matching file.
"--pre",
// Allows custom hostname command which could leak environment details.
"--hostname-bin",
]);
const UNSAFE_OPTIONS_FOR_RIPGREP_WITHOUT_ARGS: ReadonlySet<string> = new Set([
// Enables searching inside archives which triggers external decompression
// utilities reject out of an abundance of caution.
"--search-zip",
"-z",
]);
// ---------------- Helper utilities for complex shell expressions -----------------
// A conservative allow-list of bash operators that do not, on their own, cause

View File

@@ -1,19 +1,6 @@
#!/usr/bin/env node
import "dotenv/config";
// Exit early if on an older version of Node.js (< 22)
const major = process.versions.node.split(".").map(Number)[0]!;
if (major < 22) {
// eslint-disable-next-line no-console
console.error(
"\n" +
"Codex CLI requires Node.js version 22 or newer.\n" +
`You are running Node.js v${process.versions.node}.\n` +
"Please upgrade Node.js: https://nodejs.org/en/download/\n",
);
process.exit(1);
}
// Hack to suppress deprecation warnings (punycode)
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(process as any).noDeprecation = true;
@@ -27,32 +14,26 @@ import type { ReasoningEffort } from "openai/resources.mjs";
import App from "./app";
import { runSinglePass } from "./cli-singlepass";
import SessionsOverlay from "./components/sessions-overlay.js";
import { AgentLoop } from "./utils/agent/agent-loop";
import { ReviewDecision } from "./utils/agent/review";
import { AutoApprovalMode } from "./utils/auto-approval-mode";
import { checkForUpdates } from "./utils/check-updates";
import {
getApiKey,
loadConfig,
PRETTY_PRINT,
INSTRUCTIONS_FILEPATH,
} from "./utils/config";
import {
getApiKey as fetchApiKey,
maybeRedeemCredits,
} from "./utils/get-api-key";
import { createInputItem } from "./utils/input-utils";
import { initLogger } from "./utils/logger/log";
import { isModelSupportedForResponses } from "./utils/model-utils.js";
import { parseToolCall } from "./utils/parsers";
import { providers } from "./utils/providers";
import { onExit, setInkRenderer } from "./utils/terminal";
import chalk from "chalk";
import { spawnSync } from "child_process";
import fs from "fs";
import { render } from "ink";
import meow from "meow";
import os from "os";
import path from "path";
import React from "react";
@@ -75,13 +56,10 @@ const cli = meow(
--version Print version and exit
-h, --help Show usage and exit
-m, --model <model> Model to use for completions (default: codex-mini-latest)
-m, --model <model> Model to use for completions (default: o4-mini)
-p, --provider <provider> Provider to use for completions (default: openai)
-i, --image <path> Path(s) to image files to include as input
-v, --view <rollout> Inspect a previously saved rollout instead of starting a session
--history Browse previous sessions
--login Start a new sign in flow
--free Retry redeeming free credits
-q, --quiet Non-interactive mode that only prints the assistant's final output
-c, --config Open the instructions file in your editor
-w, --writable-root <path> Writable folder for sandbox in full-auto mode (can be specified multiple times)
@@ -90,7 +68,7 @@ const cli = meow(
--auto-edit Automatically approve file edits; still prompt for commands
--full-auto Automatically approve edits and commands when executed in the sandbox
--no-project-doc Do not automatically include the repository's 'AGENTS.md'
--no-project-doc Do not automatically include the repository's 'codex.md'
--project-doc <file> Include an additional markdown file at <file> as context
--full-stdout Do not truncate stdout/stderr from command outputs
--notify Enable desktop notifications for responses
@@ -101,8 +79,6 @@ const cli = meow(
--flex-mode Use "flex-mode" processing mode for the request (only supported
with models o3 and o4-mini)
--reasoning <effort> Set the reasoning effort level (low, medium, high) (default: high)
Dangerous options
--dangerously-auto-approve-everything
Skip all confirmation prompts and execute commands without
@@ -126,9 +102,6 @@ const cli = meow(
help: { type: "boolean", aliases: ["h"] },
version: { type: "boolean", description: "Print version and exit" },
view: { type: "string" },
history: { type: "boolean", description: "Browse previous sessions" },
login: { type: "boolean", description: "Force a new sign in flow" },
free: { type: "boolean", description: "Retry redeeming free credits" },
model: { type: "string", aliases: ["m"] },
provider: { type: "string", aliases: ["p"] },
image: { type: "string", isMultiple: true, aliases: ["i"] },
@@ -171,7 +144,7 @@ const cli = meow(
},
noProjectDoc: {
type: "boolean",
description: "Disable automatic inclusion of project-level AGENTS.md",
description: "Disable automatic inclusion of project-level codex.md",
},
projectDoc: {
type: "string",
@@ -286,100 +259,11 @@ let config = loadConfig(undefined, undefined, {
isFullContext: fullContextMode,
});
// `prompt` can be updated later when the user resumes a previous session
// via the `--history` flag. Therefore it must be declared with `let` rather
// than `const`.
let prompt = cli.input[0];
const prompt = cli.input[0];
const model = cli.flags.model ?? config.model;
const imagePaths = cli.flags.image;
const provider = cli.flags.provider ?? config.provider ?? "openai";
const client = {
issuer: "https://auth.openai.com",
client_id: "app_EMoamEEZ73f0CkXaXp7hrann",
};
let apiKey = "";
let savedTokens:
| {
id_token?: string;
access_token?: string;
refresh_token: string;
}
| undefined;
// Try to load existing auth file if present
try {
const home = os.homedir();
const authDir = path.join(home, ".codex");
const authFile = path.join(authDir, "auth.json");
if (fs.existsSync(authFile)) {
const data = JSON.parse(fs.readFileSync(authFile, "utf-8"));
savedTokens = data.tokens;
const lastRefreshTime = data.last_refresh
? new Date(data.last_refresh).getTime()
: 0;
const expired = Date.now() - lastRefreshTime > 28 * 24 * 60 * 60 * 1000;
if (data.OPENAI_API_KEY && !expired) {
apiKey = data.OPENAI_API_KEY;
}
}
} catch {
// ignore errors
}
// Get provider-specific API key if not OpenAI
if (provider.toLowerCase() !== "openai") {
const providerInfo = providers[provider.toLowerCase()];
if (providerInfo) {
const providerApiKey = process.env[providerInfo.envKey];
if (providerApiKey) {
apiKey = providerApiKey;
}
}
}
// Only proceed with OpenAI auth flow if:
// 1. Provider is OpenAI and no API key is set, or
// 2. Login flag is explicitly set
if (provider.toLowerCase() === "openai" && !apiKey) {
if (cli.flags.login) {
apiKey = await fetchApiKey(client.issuer, client.client_id);
try {
const home = os.homedir();
const authDir = path.join(home, ".codex");
const authFile = path.join(authDir, "auth.json");
if (fs.existsSync(authFile)) {
const data = JSON.parse(fs.readFileSync(authFile, "utf-8"));
savedTokens = data.tokens;
}
} catch {
/* ignore */
}
} else {
apiKey = await fetchApiKey(client.issuer, client.client_id);
}
}
// Ensure the API key is available as an environment variable for legacy code
process.env["OPENAI_API_KEY"] = apiKey;
// Only attempt credit redemption for OpenAI provider
if (cli.flags.free && provider.toLowerCase() === "openai") {
// eslint-disable-next-line no-console
console.log(`${chalk.bold("codex --free")} attempting to redeem credits...`);
if (!savedTokens?.refresh_token) {
apiKey = await fetchApiKey(client.issuer, client.client_id, true);
// fetchApiKey includes credit redemption as the end of the flow
} else {
await maybeRedeemCredits(
client.issuer,
client.client_id,
savedTokens.refresh_token,
savedTokens.id_token,
);
}
}
const apiKey = getApiKey(provider);
// Set of providers that don't require API keys
const NO_API_KEY_REQUIRED = new Set(["ollama"]);
@@ -398,18 +282,13 @@ if (!apiKey && !NO_API_KEY_REQUIRED.has(provider.toLowerCase())) {
? `You can create a key here: ${chalk.bold(
chalk.underline("https://platform.openai.com/account/api-keys"),
)}\n`
: provider.toLowerCase() === "azure"
: provider.toLowerCase() === "gemini"
? `You can create a ${chalk.bold(
`${provider.toUpperCase()}_OPENAI_API_KEY`,
)} ` +
`in Azure AI Foundry portal at ${chalk.bold(chalk.underline("https://ai.azure.com"))}.\n`
: provider.toLowerCase() === "gemini"
? `You can create a ${chalk.bold(
`${provider.toUpperCase()}_API_KEY`,
)} ` + `in the ${chalk.bold(`Google AI Studio`)}.\n`
: `You can create a ${chalk.bold(
`${provider.toUpperCase()}_API_KEY`,
)} ` + `in the ${chalk.bold(`${provider}`)} dashboard.\n`
`${provider.toUpperCase()}_API_KEY`,
)} ` + `in the ${chalk.bold(`Google AI Studio`)}.\n`
: `You can create a ${chalk.bold(
`${provider.toUpperCase()}_API_KEY`,
)} ` + `in the ${chalk.bold(`${provider}`)} dashboard.\n`
}`,
);
process.exit(1);
@@ -427,8 +306,8 @@ config = {
model: model ?? config.model,
notify: Boolean(cli.flags.notify),
reasoningEffort:
(cli.flags.reasoning as ReasoningEffort | undefined) ?? "medium",
flexMode: cli.flags.flexMode || (config.flexMode ?? false),
(cli.flags.reasoning as ReasoningEffort | undefined) ?? "high",
flexMode: Boolean(cli.flags.flexMode),
provider,
disableResponseStorage,
};
@@ -442,19 +321,15 @@ try {
}
// For --flex-mode, validate and exit if incorrect.
if (config.flexMode) {
if (cli.flags.flexMode) {
const allowedFlexModels = new Set(["o3", "o4-mini"]);
if (!allowedFlexModels.has(config.model)) {
if (cli.flags.flexMode) {
// eslint-disable-next-line no-console
console.error(
`The --flex-mode option is only supported when using the 'o3' or 'o4-mini' models. ` +
`Current model: '${config.model}'.`,
);
process.exit(1);
} else {
config.flexMode = false;
}
// eslint-disable-next-line no-console
console.error(
`The --flex-mode option is only supported when using the 'o3' or 'o4-mini' models. ` +
`Current model: '${config.model}'.`,
);
process.exit(1);
}
}
@@ -474,46 +349,6 @@ if (
let rollout: AppRollout | undefined;
// For --history, show session selector and optionally update prompt or rollout.
if (cli.flags.history) {
const result: { path: string; mode: "view" | "resume" } | null =
await new Promise((resolve) => {
const instance = render(
React.createElement(SessionsOverlay, {
onView: (p: string) => {
instance.unmount();
resolve({ path: p, mode: "view" });
},
onResume: (p: string) => {
instance.unmount();
resolve({ path: p, mode: "resume" });
},
onExit: () => {
instance.unmount();
resolve(null);
},
}),
);
});
if (!result) {
process.exit(0);
}
if (result.mode === "view") {
try {
const content = fs.readFileSync(result.path, "utf-8");
rollout = JSON.parse(content) as AppRollout;
} catch (error) {
// eslint-disable-next-line no-console
console.error("Error reading session file:", error);
process.exit(1);
}
} else {
prompt = `Resume this session: ${result.path}`;
}
}
// For --view, optionally load an existing rollout from disk, display it and exit.
if (cli.flags.view) {
const viewPath = cli.flags.view;

View File

@@ -1,7 +1,6 @@
import type { TerminalHeaderProps } from "./terminal-header.js";
import type { GroupedResponseItem } from "./use-message-grouping.js";
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
import type { FileOpenerScheme } from "src/utils/config.js";
import TerminalChatResponseItem from "./terminal-chat-response-item.js";
import TerminalHeader from "./terminal-header.js";
@@ -20,13 +19,11 @@ type MessageHistoryProps = {
confirmationPrompt: React.ReactNode;
loading: boolean;
headerProps: TerminalHeaderProps;
fileOpener: FileOpenerScheme | undefined;
};
const MessageHistory: React.FC<MessageHistoryProps> = ({
batch,
headerProps,
fileOpener,
}) => {
const messages = batch.map(({ item }) => item!);
@@ -71,10 +68,7 @@ const MessageHistory: React.FC<MessageHistoryProps> = ({
message.type === "message" && message.role === "user" ? 0 : 1
}
>
<TerminalChatResponseItem
item={message}
fileOpener={fileOpener}
/>
<TerminalChatResponseItem item={message} />
</Box>
);
}}

View File

@@ -137,9 +137,6 @@ export interface MultilineTextEditorProps {
// Called when the internal text buffer updates.
readonly onChange?: (text: string) => void;
// Optional initial cursor position (character offset)
readonly initialCursorOffset?: number;
}
// Expose a minimal imperative API so parent components (e.g. TerminalChatInput)
@@ -172,7 +169,6 @@ const MultilineTextEditorInner = (
onSubmit,
focus = true,
onChange,
initialCursorOffset,
}: MultilineTextEditorProps,
ref: React.Ref<MultilineTextEditorHandle | null>,
): React.ReactElement => {
@@ -180,7 +176,7 @@ const MultilineTextEditorInner = (
// Editor State
// ---------------------------------------------------------------------------
const buffer = useRef(new TextBuffer(initialText, initialCursorOffset));
const buffer = useRef(new TextBuffer(initialText));
const [version, setVersion] = useState(0);
// Keep track of the current terminal size so that the editor grows/shrinks

View File

@@ -1,6 +1,5 @@
import type { MultilineTextEditorHandle } from "./multiline-editor";
import type { ReviewDecision } from "../../utils/agent/review.js";
import type { FileSystemSuggestion } from "../../utils/file-system-suggestions.js";
import type { HistoryEntry } from "../../utils/storage/command-history.js";
import type {
ResponseInputItem,
@@ -12,7 +11,6 @@ import { TerminalChatCommandReview } from "./terminal-chat-command-review.js";
import TextCompletions from "./terminal-chat-completions.js";
import { loadConfig } from "../../utils/config.js";
import { getFileSystemSuggestions } from "../../utils/file-system-suggestions.js";
import { expandFileTags } from "../../utils/file-tag-utils";
import { createInputItem } from "../../utils/input-utils.js";
import { log } from "../../utils/logger/log.js";
import { setSessionId } from "../../utils/session.js";
@@ -54,7 +52,6 @@ export default function TerminalChatInput({
openApprovalOverlay,
openHelpOverlay,
openDiffOverlay,
openSessionsOverlay,
onCompact,
interruptAgent,
active,
@@ -78,7 +75,6 @@ export default function TerminalChatInput({
openApprovalOverlay: () => void;
openHelpOverlay: () => void;
openDiffOverlay: () => void;
openSessionsOverlay: () => void;
onCompact: () => void;
interruptAgent: () => void;
active: boolean;
@@ -96,120 +92,16 @@ export default function TerminalChatInput({
const [historyIndex, setHistoryIndex] = useState<number | null>(null);
const [draftInput, setDraftInput] = useState<string>("");
const [skipNextSubmit, setSkipNextSubmit] = useState<boolean>(false);
const [fsSuggestions, setFsSuggestions] = useState<
Array<FileSystemSuggestion>
>([]);
const [fsSuggestions, setFsSuggestions] = useState<Array<string>>([]);
const [selectedCompletion, setSelectedCompletion] = useState<number>(-1);
// Multiline text editor key to force remount after submission
const [editorState, setEditorState] = useState<{
key: number;
initialCursorOffset?: number;
}>({ key: 0 });
const [editorKey, setEditorKey] = useState(0);
// Imperative handle from the multiline editor so we can query caret position
const editorRef = useRef<MultilineTextEditorHandle | null>(null);
// Track the caret row across keystrokes
const prevCursorRow = useRef<number | null>(null);
const prevCursorWasAtLastRow = useRef<boolean>(false);
// --- Helper for updating input, remounting editor, and moving cursor to end ---
const applyFsSuggestion = useCallback((newInputText: string) => {
setInput(newInputText);
setEditorState((s) => ({
key: s.key + 1,
initialCursorOffset: newInputText.length,
}));
}, []);
// --- Helper for updating file system suggestions ---
function updateFsSuggestions(
txt: string,
alwaysUpdateSelection: boolean = false,
) {
// Clear file system completions if a space is typed
if (txt.endsWith(" ")) {
setFsSuggestions([]);
setSelectedCompletion(-1);
} else {
// Determine the current token (last whitespace-separated word)
const words = txt.trim().split(/\s+/);
const lastWord = words[words.length - 1] ?? "";
const shouldUpdateSelection =
lastWord.startsWith("@") || alwaysUpdateSelection;
// Strip optional leading '@' for the path prefix
let pathPrefix: string;
if (lastWord.startsWith("@")) {
pathPrefix = lastWord.slice(1);
// If only '@' is typed, list everything in the current directory
pathPrefix = pathPrefix.length === 0 ? "./" : pathPrefix;
} else {
pathPrefix = lastWord;
}
if (shouldUpdateSelection) {
const completions = getFileSystemSuggestions(pathPrefix);
setFsSuggestions(completions);
if (completions.length > 0) {
setSelectedCompletion((prev) =>
prev < 0 || prev >= completions.length ? 0 : prev,
);
} else {
setSelectedCompletion(-1);
}
} else if (fsSuggestions.length > 0) {
// Token cleared → clear menu
setFsSuggestions([]);
setSelectedCompletion(-1);
}
}
}
/**
* Result of replacing text with a file system suggestion
*/
interface ReplacementResult {
/** The new text with the suggestion applied */
text: string;
/** The selected suggestion if a replacement was made */
suggestion: FileSystemSuggestion | null;
/** Whether a replacement was actually made */
wasReplaced: boolean;
}
// --- Helper for replacing input with file system suggestion ---
function getFileSystemSuggestion(
txt: string,
requireAtPrefix: boolean = false,
): ReplacementResult {
if (fsSuggestions.length === 0 || selectedCompletion < 0) {
return { text: txt, suggestion: null, wasReplaced: false };
}
const words = txt.trim().split(/\s+/);
const lastWord = words[words.length - 1] ?? "";
// Check if @ prefix is required and the last word doesn't have it
if (requireAtPrefix && !lastWord.startsWith("@")) {
return { text: txt, suggestion: null, wasReplaced: false };
}
const selected = fsSuggestions[selectedCompletion];
if (!selected) {
return { text: txt, suggestion: null, wasReplaced: false };
}
const replacement = lastWord.startsWith("@")
? `@${selected.path}`
: selected.path;
words[words.length - 1] = replacement;
return {
text: words.join(" "),
suggestion: selected,
wasReplaced: true,
};
}
// Load command history on component mount
useEffect(() => {
async function loadHistory() {
@@ -282,9 +174,6 @@ export default function TerminalChatInput({
case "/history":
openOverlay();
break;
case "/sessions":
openSessionsOverlay();
break;
case "/help":
openHelpOverlay();
break;
@@ -334,12 +223,21 @@ export default function TerminalChatInput({
}
if (_key.tab && selectedCompletion >= 0) {
const { text: newText, wasReplaced } =
getFileSystemSuggestion(input);
const words = input.trim().split(/\s+/);
const selected = fsSuggestions[selectedCompletion];
if (words.length > 0 && selected) {
words[words.length - 1] = selected;
const newText = words.join(" ");
setInput(newText);
// Force remount of the editor with the new text
setEditorKey((k) => k + 1);
// We need to move the cursor to the end after editor remounts
setTimeout(() => {
editorRef.current?.moveCursorToEnd?.();
}, 0);
// Only proceed if the text was actually changed
if (wasReplaced) {
applyFsSuggestion(newText);
setFsSuggestions([]);
setSelectedCompletion(-1);
}
@@ -379,7 +277,7 @@ export default function TerminalChatInput({
setInput(history[newIndex]?.command ?? "");
// Re-mount the editor so it picks up the new initialText
setEditorState((s) => ({ key: s.key + 1 }));
setEditorKey((k) => k + 1);
return; // handled
}
@@ -398,23 +296,28 @@ export default function TerminalChatInput({
if (newIndex >= history.length) {
setHistoryIndex(null);
setInput(draftInput);
setEditorState((s) => ({ key: s.key + 1 }));
setEditorKey((k) => k + 1);
} else {
setHistoryIndex(newIndex);
setInput(history[newIndex]?.command ?? "");
setEditorState((s) => ({ key: s.key + 1 }));
setEditorKey((k) => k + 1);
}
return; // handled
}
// Otherwise let it propagate
}
// Defer filesystem suggestion logic to onSubmit if enter key is pressed
if (!_key.return) {
// Pressing tab should trigger the file system suggestions
const shouldUpdateSelection = _key.tab;
const targetInput = _key.delete ? input.slice(0, -1) : input + _input;
updateFsSuggestions(targetInput, shouldUpdateSelection);
if (_key.tab) {
const words = input.split(/\s+/);
const mostRecentWord = words[words.length - 1];
if (mostRecentWord === undefined || mostRecentWord === "") {
return;
}
const completions = getFileSystemSuggestions(mostRecentWord);
setFsSuggestions(completions);
if (completions.length > 0) {
setSelectedCompletion(0);
}
}
}
@@ -489,10 +392,6 @@ export default function TerminalChatInput({
setInput("");
openOverlay();
return;
} else if (inputValue === "/sessions") {
setInput("");
openSessionsOverlay();
return;
} else if (inputValue === "/help") {
setInput("");
openHelpOverlay();
@@ -593,7 +492,7 @@ export default function TerminalChatInput({
try {
const os = await import("node:os");
const { CLI_VERSION } = await import("../../version.js");
const { CLI_VERSION } = await import("../../utils/session.js");
const { buildBugReportUrl } = await import(
"../../utils/bug-report.js"
);
@@ -700,10 +599,7 @@ export default function TerminalChatInput({
);
text = text.trim();
// Expand @file tokens into XML blocks for the model
const expandedText = await expandFileTags(text);
const inputItem = await createInputItem(expandedText, images);
const inputItem = await createInputItem(text, images);
submitInput([inputItem]);
// Get config for history persistence.
@@ -737,7 +633,6 @@ export default function TerminalChatInput({
openModelOverlay,
openHelpOverlay,
openDiffOverlay,
openSessionsOverlay,
history,
onCompact,
skipNextSubmit,
@@ -778,30 +673,28 @@ export default function TerminalChatInput({
setHistoryIndex(null);
}
setInput(txt);
// Clear tab completions if a space is typed
if (txt.endsWith(" ")) {
setFsSuggestions([]);
setSelectedCompletion(-1);
} else if (fsSuggestions.length > 0) {
// Update file suggestions as user types
const words = txt.trim().split(/\s+/);
const mostRecentWord =
words.length > 0 ? words[words.length - 1] : "";
if (mostRecentWord !== undefined) {
setFsSuggestions(getFileSystemSuggestions(mostRecentWord));
}
}
}}
key={editorState.key}
initialCursorOffset={editorState.initialCursorOffset}
key={editorKey}
initialText={input}
height={6}
focus={active}
onSubmit={(txt) => {
// If final token is an @path, replace with filesystem suggestion if available
const {
text: replacedText,
suggestion,
wasReplaced,
} = getFileSystemSuggestion(txt, true);
// If we replaced @path token with a directory, don't submit
if (wasReplaced && suggestion?.isDirectory) {
applyFsSuggestion(replacedText);
// Update suggestions for the new directory
updateFsSuggestions(replacedText, true);
return;
}
onSubmit(replacedText);
setEditorState((s) => ({ key: s.key + 1 }));
onSubmit(txt);
setEditorKey((k) => k + 1);
setInput("");
setHistoryIndex(null);
setDraftInput("");
@@ -848,7 +741,7 @@ export default function TerminalChatInput({
</Text>
) : fsSuggestions.length > 0 ? (
<TextCompletions
completions={fsSuggestions.map((suggestion) => suggestion.path)}
completions={fsSuggestions}
selectedCompletion={selectedCompletion}
displayLimit={5}
/>

View File

@@ -1,6 +1,5 @@
import type { TerminalChatSession } from "../../utils/session.js";
import type { ResponseItem } from "openai/resources/responses/responses";
import type { FileOpenerScheme } from "src/utils/config.js";
import TerminalChatResponseItem from "./terminal-chat-response-item";
import { Box, Text } from "ink";
@@ -9,11 +8,9 @@ import React from "react";
export default function TerminalChatPastRollout({
session,
items,
fileOpener,
}: {
session: TerminalChatSession;
items: Array<ResponseItem>;
fileOpener: FileOpenerScheme | undefined;
}): React.ReactElement {
const { version, id: sessionId, model } = session;
return (
@@ -54,13 +51,9 @@ export default function TerminalChatPastRollout({
{React.useMemo(
() =>
items.map((item, key) => (
<TerminalChatResponseItem
key={key}
item={item}
fileOpener={fileOpener}
/>
<TerminalChatResponseItem key={key} item={item} />
)),
[items, fileOpener],
[items],
)}
</Box>
</Box>

View File

@@ -8,30 +8,23 @@ import type {
ResponseOutputMessage,
ResponseReasoningItem,
} from "openai/resources/responses/responses";
import type { FileOpenerScheme } from "src/utils/config";
import { useTerminalSize } from "../../hooks/use-terminal-size";
import { collapseXmlBlocks } from "../../utils/file-tag-utils";
import { parseToolCall, parseToolCallOutput } from "../../utils/parsers";
import chalk, { type ForegroundColorName } from "chalk";
import { Box, Text } from "ink";
import { parse, setOptions } from "marked";
import TerminalRenderer from "marked-terminal";
import path from "path";
import React, { useEffect, useMemo } from "react";
import { formatCommandForDisplay } from "src/format-command.js";
import supportsHyperlinks from "supports-hyperlinks";
export default function TerminalChatResponseItem({
item,
fullStdout = false,
setOverlayMode,
fileOpener,
}: {
item: ResponseItem;
fullStdout?: boolean;
setOverlayMode?: React.Dispatch<React.SetStateAction<OverlayModeType>>;
fileOpener: FileOpenerScheme | undefined;
}): React.ReactElement {
switch (item.type) {
case "message":
@@ -39,15 +32,10 @@ export default function TerminalChatResponseItem({
<TerminalChatResponseMessage
setOverlayMode={setOverlayMode}
message={item}
fileOpener={fileOpener}
/>
);
// @ts-expect-error new item types aren't in SDK yet
case "local_shell_call":
case "function_call":
return <TerminalChatResponseToolCall message={item} />;
// @ts-expect-error new item types aren't in SDK yet
case "local_shell_call_output":
case "function_call_output":
return (
<TerminalChatResponseToolCallOutput
@@ -61,9 +49,7 @@ export default function TerminalChatResponseItem({
// @ts-expect-error `reasoning` is not in the responses API yet
if (item.type === "reasoning") {
return (
<TerminalChatResponseReasoning message={item} fileOpener={fileOpener} />
);
return <TerminalChatResponseReasoning message={item} />;
}
return <TerminalChatResponseGenericMessage message={item} />;
@@ -91,10 +77,8 @@ export default function TerminalChatResponseItem({
export function TerminalChatResponseReasoning({
message,
fileOpener,
}: {
message: ResponseReasoningItem & { duration_ms?: number };
fileOpener: FileOpenerScheme | undefined;
}): React.ReactElement | null {
// Only render when there is a reasoning summary
if (!message.summary || message.summary.length === 0) {
@@ -107,7 +91,7 @@ export function TerminalChatResponseReasoning({
return (
<Box key={key} flexDirection="column">
{s.headline && <Text bold>{s.headline}</Text>}
<Markdown fileOpener={fileOpener}>{s.text}</Markdown>
<Markdown>{s.text}</Markdown>
</Box>
);
})}
@@ -123,11 +107,9 @@ const colorsByRole: Record<string, ForegroundColorName> = {
function TerminalChatResponseMessage({
message,
setOverlayMode,
fileOpener,
}: {
message: ResponseInputMessageItem | ResponseOutputMessage;
setOverlayMode?: React.Dispatch<React.SetStateAction<OverlayModeType>>;
fileOpener: FileOpenerScheme | undefined;
}) {
// auto switch to model mode if the system message contains "has been deprecated"
useEffect(() => {
@@ -146,7 +128,7 @@ function TerminalChatResponseMessage({
<Text bold color={colorsByRole[message.role] || "gray"}>
{message.role === "assistant" ? "codex" : message.role}
</Text>
<Markdown fileOpener={fileOpener}>
<Markdown>
{message.content
.map(
(c) =>
@@ -155,7 +137,7 @@ function TerminalChatResponseMessage({
: c.type === "refusal"
? c.refusal
: c.type === "input_text"
? collapseXmlBlocks(c.text)
? c.text
: c.type === "input_image"
? "<Image>"
: c.type === "input_file"
@@ -171,28 +153,16 @@ function TerminalChatResponseMessage({
function TerminalChatResponseToolCall({
message,
}: {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
message: ResponseFunctionToolCallItem | any;
message: ResponseFunctionToolCallItem;
}) {
let workdir: string | undefined;
let cmdReadableText: string | undefined;
if (message.type === "function_call") {
const details = parseToolCall(message);
workdir = details?.workdir;
cmdReadableText = details?.cmdReadableText;
} else if (message.type === "local_shell_call") {
const action = message.action;
workdir = action.working_directory;
cmdReadableText = formatCommandForDisplay(action.command);
}
const details = parseToolCall(message);
return (
<Box flexDirection="column" gap={1}>
<Text color="magentaBright" bold>
command
{workdir ? <Text dimColor>{` (${workdir})`}</Text> : ""}
</Text>
<Text>
<Text dimColor>$</Text> {cmdReadableText}
<Text dimColor>$</Text> {details?.cmdReadableText}
</Text>
</Box>
);
@@ -202,8 +172,7 @@ function TerminalChatResponseToolCallOutput({
message,
fullStdout,
}: {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
message: ResponseFunctionToolCallOutputItem | any;
message: ResponseFunctionToolCallOutputItem;
fullStdout: boolean;
}) {
const { output, metadata } = parseToolCallOutput(message.output);
@@ -270,91 +239,26 @@ export function TerminalChatResponseGenericMessage({
export type MarkdownProps = TerminalRendererOptions & {
children: string;
fileOpener: FileOpenerScheme | undefined;
/** Base path for resolving relative file citation paths. */
cwd?: string;
};
export function Markdown({
children,
fileOpener,
cwd,
...options
}: MarkdownProps): React.ReactElement {
const size = useTerminalSize();
const rendered = React.useMemo(() => {
const linkifiedMarkdown = rewriteFileCitations(children, fileOpener, cwd);
// Configure marked for this specific render
setOptions({
// @ts-expect-error missing parser, space props
renderer: new TerminalRenderer({ ...options, width: size.columns }),
});
const parsed = parse(linkifiedMarkdown, { async: false }).trim();
const parsed = parse(children, { async: false }).trim();
// Remove the truncation logic
return parsed;
// eslint-disable-next-line react-hooks/exhaustive-deps -- options is an object of primitives
}, [
children,
size.columns,
size.rows,
fileOpener,
supportsHyperlinks.stdout,
chalk.level,
]);
}, [children, size.columns, size.rows]);
return <Text>{rendered}</Text>;
}
/** Regex to match citations for source files (hence the `F:` prefix). */
const citationRegex = new RegExp(
[
// Opening marker
"【",
// Capture group 1: file ID or name (anything except '†')
"F:([^†]+)",
// Field separator
"†",
// Capture group 2: start line (digits)
"L(\\d+)",
// Non-capturing group for optional end line
"(?:",
// Capture group 3: end line (digits or '?')
"-L(\\d+|\\?)",
// End of optional group (may not be present)
")?",
// Closing marker
"】",
].join(""),
"g", // Global flag
);
function rewriteFileCitations(
markdown: string,
fileOpener: FileOpenerScheme | undefined,
cwd: string = process.cwd(),
): string {
citationRegex.lastIndex = 0;
return markdown.replace(citationRegex, (_match, file, start, _end) => {
const absPath = path.resolve(cwd, file);
if (!fileOpener) {
return `[${file}](${absPath})`;
}
const uri = `${fileOpener}://file${absPath}:${start}`;
const label = `${file}:${start}`;
// In practice, sometimes multiple citations for the same file, but with a
// different line number, are shown sequentially, so we:
// - include the line number in the label to disambiguate them
// - add a space after the link to make it easier to read
return `[${label}](${uri}) `;
});
}

View File

@@ -1,4 +1,3 @@
import type { AppRollout } from "../../app.js";
import type { ApplyPatchCommand, ApprovalPolicy } from "../../approvals.js";
import type { CommandConfirmation } from "../../utils/agent/agent-loop.js";
import type { AppConfig } from "../../utils/config.js";
@@ -6,7 +5,6 @@ import type { ColorName } from "chalk";
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
import TerminalChatInput from "./terminal-chat-input.js";
import TerminalChatPastRollout from "./terminal-chat-past-rollout.js";
import { TerminalChatToolCallCommand } from "./terminal-chat-tool-call-command.js";
import TerminalMessageHistory from "./terminal-message-history.js";
import { formatCommandForDisplay } from "../../format-command.js";
@@ -15,7 +13,7 @@ import { useTerminalSize } from "../../hooks/use-terminal-size.js";
import { AgentLoop } from "../../utils/agent/agent-loop.js";
import { ReviewDecision } from "../../utils/agent/review.js";
import { generateCompactSummary } from "../../utils/compact-summary.js";
import { saveConfig } from "../../utils/config.js";
import { getBaseUrl, getApiKey, saveConfig } from "../../utils/config.js";
import { extractAppliedPatches as _extractAppliedPatches } from "../../utils/extract-applied-patches.js";
import { getGitDiff } from "../../utils/get-diff.js";
import { createInputItem } from "../../utils/input-utils.js";
@@ -25,27 +23,24 @@ import {
calculateContextPercentRemaining,
uniqueById,
} from "../../utils/model-utils.js";
import { createOpenAIClient } from "../../utils/openai-client.js";
import { CLI_VERSION } from "../../utils/session.js";
import { shortCwd } from "../../utils/short-path.js";
import { saveRollout } from "../../utils/storage/save-rollout.js";
import { CLI_VERSION } from "../../version.js";
import ApprovalModeOverlay from "../approval-mode-overlay.js";
import DiffOverlay from "../diff-overlay.js";
import HelpOverlay from "../help-overlay.js";
import HistoryOverlay from "../history-overlay.js";
import ModelOverlay from "../model-overlay.js";
import SessionsOverlay from "../sessions-overlay.js";
import chalk from "chalk";
import fs from "fs/promises";
import { Box, Text } from "ink";
import { spawn } from "node:child_process";
import OpenAI from "openai";
import React, { useEffect, useMemo, useRef, useState } from "react";
import { inspect } from "util";
export type OverlayModeType =
| "none"
| "history"
| "sessions"
| "model"
| "approval"
| "help"
@@ -83,7 +78,10 @@ async function generateCommandExplanation(
): Promise<string> {
try {
// Create a temporary OpenAI client
const oai = createOpenAIClient(config);
const oai = new OpenAI({
apiKey: getApiKey(config.provider),
baseURL: getBaseUrl(config.provider),
});
// Format the command for display
const commandForDisplay = formatCommandForDisplay(command);
@@ -196,7 +194,6 @@ export default function TerminalChat({
submitConfirmation,
} = useConfirmation();
const [overlayMode, setOverlayMode] = useState<OverlayModeType>("none");
const [viewRollout, setViewRollout] = useState<AppRollout | null>(null);
// Store the diff text when opening the diff overlay so the view isnt
// recomputed on every rerender while it is open.
@@ -460,16 +457,6 @@ export default function TerminalChat({
[items, model],
);
if (viewRollout) {
return (
<TerminalChatPastRollout
fileOpener={config.fileOpener}
session={viewRollout.session}
items={viewRollout.items}
/>
);
}
return (
<Box flexDirection="column">
<Box flexDirection="column">
@@ -496,7 +483,6 @@ export default function TerminalChat({
initialImagePaths,
flexModeEnabled: Boolean(config.flexMode),
}}
fileOpener={config.fileOpener}
/>
) : (
<Box>
@@ -525,7 +511,6 @@ export default function TerminalChat({
openModelOverlay={() => setOverlayMode("model")}
openApprovalOverlay={() => setOverlayMode("approval")}
openHelpOverlay={() => setOverlayMode("help")}
openSessionsOverlay={() => setOverlayMode("sessions")}
openDiffOverlay={() => {
const { isGitRepo, diff } = getGitDiff();
let text: string;
@@ -585,25 +570,6 @@ export default function TerminalChat({
{overlayMode === "history" && (
<HistoryOverlay items={items} onExit={() => setOverlayMode("none")} />
)}
{overlayMode === "sessions" && (
<SessionsOverlay
onView={async (p) => {
try {
const txt = await fs.readFile(p, "utf-8");
const data = JSON.parse(txt) as AppRollout;
setViewRollout(data);
setOverlayMode("none");
} catch {
setOverlayMode("none");
}
}}
onResume={(p) => {
setOverlayMode("none");
setInitialPrompt(`Resume this session: ${p}`);
}}
onExit={() => setOverlayMode("none")}
/>
)}
{overlayMode === "model" && (
<ModelOverlay
currentModel={model}

View File

@@ -2,7 +2,6 @@ import type { OverlayModeType } from "./terminal-chat.js";
import type { TerminalHeaderProps } from "./terminal-header.js";
import type { GroupedResponseItem } from "./use-message-grouping.js";
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
import type { FileOpenerScheme } from "src/utils/config.js";
import TerminalChatResponseItem from "./terminal-chat-response-item.js";
import TerminalHeader from "./terminal-header.js";
@@ -24,7 +23,6 @@ type TerminalMessageHistoryProps = {
headerProps: TerminalHeaderProps;
fullStdout: boolean;
setOverlayMode: React.Dispatch<React.SetStateAction<OverlayModeType>>;
fileOpener: FileOpenerScheme | undefined;
};
const TerminalMessageHistory: React.FC<TerminalMessageHistoryProps> = ({
@@ -35,7 +33,6 @@ const TerminalMessageHistory: React.FC<TerminalMessageHistoryProps> = ({
thinkingSeconds: _thinkingSeconds,
fullStdout,
setOverlayMode,
fileOpener,
}) => {
// Flatten batch entries to response items.
const messages = useMemo(() => batch.map(({ item }) => item!), [batch]);
@@ -62,25 +59,16 @@ const TerminalMessageHistory: React.FC<TerminalMessageHistoryProps> = ({
key={`${message.id}-${index}`}
flexDirection="column"
marginLeft={
message.type === "message" &&
(message.role === "user" || message.role === "assistant")
? 0
: 4
message.type === "message" && message.role === "user" ? 0 : 4
}
marginTop={
message.type === "message" && message.role === "user" ? 0 : 1
}
marginBottom={
message.type === "message" && message.role === "assistant"
? 1
: 0
}
>
<TerminalChatResponseItem
item={message}
fullStdout={fullStdout}
setOverlayMode={setOverlayMode}
fileOpener={fileOpener}
/>
</Box>
);

View File

@@ -1,130 +0,0 @@
import type { TypeaheadItem } from "./typeahead-overlay.js";
import TypeaheadOverlay from "./typeahead-overlay.js";
import fs from "fs/promises";
import { Box, Text, useInput } from "ink";
import os from "os";
import path from "path";
import React, { useEffect, useState } from "react";
const SESSIONS_ROOT = path.join(os.homedir(), ".codex", "sessions");
export type SessionMeta = {
path: string;
timestamp: string;
userMessages: number;
toolCalls: number;
firstMessage: string;
};
async function loadSessions(): Promise<Array<SessionMeta>> {
try {
const entries = await fs.readdir(SESSIONS_ROOT);
const sessions: Array<SessionMeta> = [];
for (const entry of entries) {
if (!entry.endsWith(".json")) {
continue;
}
const filePath = path.join(SESSIONS_ROOT, entry);
try {
// eslint-disable-next-line no-await-in-loop
const content = await fs.readFile(filePath, "utf-8");
const data = JSON.parse(content) as {
session?: { timestamp?: string };
items?: Array<{
type: string;
role: string;
content: Array<{ text: string }>;
}>;
};
const items = Array.isArray(data.items) ? data.items : [];
const firstUser = items.find(
(i) => i?.type === "message" && i.role === "user",
);
const firstText =
firstUser?.content?.[0]?.text?.replace(/\n/g, " ").slice(0, 16) ?? "";
const userMessages = items.filter(
(i) => i?.type === "message" && i.role === "user",
).length;
const toolCalls = items.filter(
(i) => i?.type === "function_call",
).length;
sessions.push({
path: filePath,
timestamp: data.session?.timestamp || "",
userMessages,
toolCalls,
firstMessage: firstText,
});
} catch {
/* ignore invalid session */
}
}
sessions.sort((a, b) => b.timestamp.localeCompare(a.timestamp));
return sessions;
} catch {
return [];
}
}
type Props = {
onView: (sessionPath: string) => void;
onResume: (sessionPath: string) => void;
onExit: () => void;
};
export default function SessionsOverlay({
onView,
onResume,
onExit,
}: Props): JSX.Element {
const [items, setItems] = useState<Array<TypeaheadItem>>([]);
const [mode, setMode] = useState<"view" | "resume">("view");
useEffect(() => {
(async () => {
const sessions = await loadSessions();
const formatted = sessions.map((s) => {
const ts = s.timestamp
? new Date(s.timestamp).toLocaleString(undefined, {
dateStyle: "short",
timeStyle: "short",
})
: "";
const first = s.firstMessage?.slice(0, 50);
const label = `${ts} · ${s.userMessages} msgs/${s.toolCalls} tools · ${first}`;
return { label, value: s.path } as TypeaheadItem;
});
setItems(formatted);
})();
}, []);
useInput((_input, key) => {
if (key.tab) {
setMode((m) => (m === "view" ? "resume" : "view"));
}
});
return (
<TypeaheadOverlay
title={mode === "view" ? "View session" : "Resume session"}
description={
<Box flexDirection="column">
<Text>
{mode === "view" ? "press enter to view" : "press enter to resume"}
</Text>
<Text dimColor>tab to toggle mode · esc to cancel</Text>
</Box>
}
initialItems={items}
onSelect={(value) => {
if (mode === "view") {
onView(value);
} else {
onResume(value);
}
}}
onExit={onExit}
/>
);
}

View File

@@ -5,7 +5,13 @@ import type { FileOperation } from "../utils/singlepass/file_ops";
import Spinner from "./vendor/ink-spinner"; // Thirdparty / vendor components
import TextInput from "./vendor/ink-text-input";
import { createOpenAIClient } from "../utils/openai-client";
import {
OPENAI_TIMEOUT_MS,
OPENAI_ORGANIZATION,
OPENAI_PROJECT,
getBaseUrl,
getApiKey,
} from "../utils/config";
import {
generateDiffSummary,
generateEditSummary,
@@ -20,6 +26,7 @@ import { EditedFilesSchema } from "../utils/singlepass/file_ops";
import * as fsSync from "fs";
import * as fsPromises from "fs/promises";
import { Box, Text, useApp, useInput } from "ink";
import OpenAI from "openai";
import { zodResponseFormat } from "openai/helpers/zod";
import path from "path";
import React, { useEffect, useState, useRef } from "react";
@@ -392,7 +399,20 @@ export function SinglePassApp({
files,
});
const openai = createOpenAIClient(config);
const headers: Record<string, string> = {};
if (OPENAI_ORGANIZATION) {
headers["OpenAI-Organization"] = OPENAI_ORGANIZATION;
}
if (OPENAI_PROJECT) {
headers["OpenAI-Project"] = OPENAI_PROJECT;
}
const openai = new OpenAI({
apiKey: getApiKey(config.provider),
baseURL: getBaseUrl(config.provider),
timeout: OPENAI_TIMEOUT_MS,
defaultHeaders: headers,
});
const chatResp = await openai.beta.chat.completions.parse({
model: config.model,
...(config.flexMode ? { service_tier: "flex" } : {}),

View File

@@ -100,14 +100,11 @@ export default class TextBuffer {
private clipboard: string | null = null;
constructor(text = "", initialCursorIdx = 0) {
constructor(text = "") {
this.lines = text.split("\n");
if (this.lines.length === 0) {
this.lines = [""];
}
// No need to reset cursor on failure - class already default cursor position to 0,0
this.setCursorIdx(initialCursorIdx);
}
/* =======================================================================
@@ -125,39 +122,6 @@ export default class TextBuffer {
this.cursorCol = clamp(this.cursorCol, 0, this.lineLen(this.cursorRow));
}
/**
* Sets the cursor position based on a character offset from the start of the document.
* @param idx The character offset to move to (0-based)
* @returns true if successful, false if the index was invalid
*/
private setCursorIdx(idx: number): boolean {
// Reset preferred column since this is an explicit horizontal movement
this.preferredCol = null;
let remainingChars = idx;
let row = 0;
// Count characters line by line until we find the right position
while (row < this.lines.length) {
const lineLength = this.lineLen(row);
// Add 1 for the newline character (except for the last line)
const totalChars = lineLength + (row < this.lines.length - 1 ? 1 : 0);
if (remainingChars <= lineLength) {
this.cursorRow = row;
this.cursorCol = remainingChars;
return true;
}
// Move to next line, subtract this line's characters plus newline
remainingChars -= totalChars;
row++;
}
// If we get here, the index was too large
return false;
}
/* =====================================================================
* History helpers
* =================================================================== */
@@ -525,22 +489,6 @@ export default class TextBuffer {
end++;
}
/*
* After consuming the actual word we also want to swallow any immediate
* separator run that *follows* it so that a forward word-delete mirrors
* the behaviour of common shells/editors (and matches the expectations
* encoded in our test-suite).
*
* Example given the text "foo bar baz" and the caret placed at the
* beginning of "bar" (index 4) we want Alt+Delete to turn the string
* into "foo␠baz" (single space). Without this extra loop we would stop
* right before the separating space, producing "foo␠␠baz".
*/
while (end < arr.length && !isWordChar(arr[end])) {
end++;
}
this.lines[this.cursorRow] =
cpSlice(line, 0, this.cursorCol) + cpSlice(line, end);
// caret stays in place
@@ -875,42 +823,12 @@ export default class TextBuffer {
// no `key.backspace` flag set. Treat that byte exactly like an ordinary
// Backspace for parity with textarea.rs and to make interactive tests
// feedable through the simpler `(ch, {}, vp)` path.
// ------------------------------------------------------------------
// Word-wise deletions
//
// macOS (and many terminals on Linux/BSD) map the physical “Delete” key
// to a *backspace* operation emitting either the raw DEL (0x7f) byte
// or setting `key.backspace = true` in Inks parsed event. Holding the
// Option/Alt modifier therefore *also* sends backspace semantics even
// though users colloquially refer to the shortcut as “⌥+Delete”.
//
// Historically we treated **modifier + Delete** as a *forward* word
// deletion. This behaviour, however, diverges from the default found
// in shells (zsh, bash, fish, etc.) and native macOS text fields where
// ⌥+Delete removes the word *to the left* of the caret. Update the
// mapping so that both
//
// • ⌥/Alt/Meta + Backspace and
// • ⌥/Alt/Meta + Delete
//
// perform a **backward** word deletion. We keep the ability to delete
// the *next* word by requiring an additional Shift modifier a common
// binding on full-size keyboards that expose a dedicated Forward Delete
// key.
// ------------------------------------------------------------------
else if (
// ⌥/Alt/Meta + (Backspace|Delete|DEL byte) → backward word delete
(key["meta"] || key["ctrl"] || key["alt"]) &&
!key["shift"] &&
(key["backspace"] || input === "\x7f" || key["delete"])
(key["backspace"] || input === "\x7f")
) {
this.deleteWordLeft();
} else if (
// ⇧+⌥/Alt/Meta + (Backspace|Delete|DEL byte) → forward word delete
(key["meta"] || key["ctrl"] || key["alt"]) &&
key["shift"] &&
(key["backspace"] || input === "\x7f" || key["delete"])
) {
} else if ((key["meta"] || key["ctrl"] || key["alt"]) && key["delete"]) {
this.deleteWordRight();
} else if (
key["backspace"] ||

View File

@@ -8,34 +8,29 @@ import type {
ResponseItem,
ResponseCreateParams,
FunctionTool,
Tool,
} from "openai/resources/responses/responses.mjs";
import type { Reasoning } from "openai/resources.mjs";
import { CLI_VERSION } from "../../version.js";
import {
OPENAI_TIMEOUT_MS,
OPENAI_ORGANIZATION,
OPENAI_PROJECT,
getApiKey,
getBaseUrl,
AZURE_OPENAI_API_VERSION,
} from "../config.js";
import { log } from "../logger/log.js";
import { parseToolCallArguments } from "../parsers.js";
import { responsesCreateViaChatCompletions } from "../responses.js";
import {
ORIGIN,
CLI_VERSION,
getSessionId,
setCurrentModel,
setSessionId,
} from "../session.js";
import { applyPatchToolInstructions } from "./apply-patch.js";
import { handleExecCommand } from "./handle-exec-command.js";
import { HttpsProxyAgent } from "https-proxy-agent";
import { spawnSync } from "node:child_process";
import { randomUUID } from "node:crypto";
import OpenAI, { APIConnectionTimeoutError, AzureOpenAI } from "openai";
import os from "os";
import OpenAI, { APIConnectionTimeoutError } from "openai";
// Wait time before retrying after rate limit errors (ms).
const RATE_LIMIT_RETRY_WAIT_MS = parseInt(
@@ -43,9 +38,6 @@ const RATE_LIMIT_RETRY_WAIT_MS = parseInt(
10,
);
// See https://github.com/openai/openai-node/tree/v4?tab=readme-ov-file#configuring-an-https-agent-eg-for-proxies
const PROXY_URL = process.env["HTTPS_PROXY"];
export type CommandConfirmation = {
review: ReviewDecision;
applyPatch?: ApplyPatchCommand | undefined;
@@ -84,7 +76,7 @@ type AgentLoopParams = {
onLastResponseId: (lastResponseId: string) => void;
};
const shellFunctionTool: FunctionTool = {
const shellTool: FunctionTool = {
type: "function",
name: "shell",
description: "Runs a shell command, and returns its output.",
@@ -108,11 +100,6 @@ const shellFunctionTool: FunctionTool = {
},
};
const localShellTool: Tool = {
//@ts-expect-error - waiting on sdk
type: "local_shell",
};
export class AgentLoop {
private model: string;
private provider: string;
@@ -306,7 +293,7 @@ export class AgentLoop {
this.sessionId = getSessionId() || randomUUID().replaceAll("-", "");
// Configure OpenAI client with optional timeout (ms) from environment
const timeoutMs = OPENAI_TIMEOUT_MS;
const apiKey = this.config.apiKey ?? process.env["OPENAI_API_KEY"] ?? "";
const apiKey = getApiKey(this.provider);
const baseURL = getBaseUrl(this.provider);
this.oai = new OpenAI({
@@ -327,29 +314,9 @@ export class AgentLoop {
: {}),
...(OPENAI_PROJECT ? { "OpenAI-Project": OPENAI_PROJECT } : {}),
},
httpAgent: PROXY_URL ? new HttpsProxyAgent(PROXY_URL) : undefined,
...(timeoutMs !== undefined ? { timeout: timeoutMs } : {}),
});
if (this.provider.toLowerCase() === "azure") {
this.oai = new AzureOpenAI({
apiKey,
baseURL,
apiVersion: AZURE_OPENAI_API_VERSION,
defaultHeaders: {
originator: ORIGIN,
version: CLI_VERSION,
session_id: this.sessionId,
...(OPENAI_ORGANIZATION
? { "OpenAI-Organization": OPENAI_ORGANIZATION }
: {}),
...(OPENAI_PROJECT ? { "OpenAI-Project": OPENAI_PROJECT } : {}),
},
httpAgent: PROXY_URL ? new HttpsProxyAgent(PROXY_URL) : undefined,
...(timeoutMs !== undefined ? { timeout: timeoutMs } : {}),
});
}
setSessionId(this.sessionId);
setCurrentModel(this.model);
@@ -466,73 +433,6 @@ export class AgentLoop {
return [outputItem, ...additionalItems];
}
private async handleLocalShellCall(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
item: any,
): Promise<Array<ResponseInputItem>> {
// If the agent has been canceled in the meantime we should not perform any
// additional work. Returning an empty array ensures that we neither execute
// the requested tool call nor enqueue any followup input items. This keeps
// the cancellation semantics intuitive for users once they interrupt a
// task no further actions related to that task should be taken.
if (this.canceled) {
return [];
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const outputItem: any = {
type: "local_shell_call_output",
// `call_id` is mandatory ensure we never send `undefined` which would
// trigger the "No tool output found…" 400 from the API.
call_id: item.call_id,
output: "no function found",
};
// We intentionally *do not* remove this `callId` from the `pendingAborts`
// set right away. The output produced below is only queued up for the
// *next* request to the OpenAI API it has not been delivered yet. If
// the user presses ESCESC (i.e. invokes `cancel()`) in the small window
// between queuing the result and the actual network call, we need to be
// able to surface a synthetic `function_call_output` marked as
// "aborted". Keeping the ID in the set until the run concludes
// successfully lets the next `run()` differentiate between an aborted
// tool call (needs the synthetic output) and a completed one (cleared
// below in the `flush()` helper).
// used to tell model to stop if needed
const additionalItems: Array<ResponseInputItem> = [];
if (item.action.type !== "exec") {
throw new Error("Invalid action type");
}
const args = {
cmd: item.action.command,
workdir: item.action.working_directory,
timeoutInMillis: item.action.timeout_ms,
};
const {
outputText,
metadata,
additionalItems: additionalItemsFromExec,
} = await handleExecCommand(
args,
this.config,
this.approvalPolicy,
this.additionalWritableRoots,
this.getCommandConfirmation,
this.execAbortController?.signal,
);
outputItem.output = JSON.stringify({ output: outputText, metadata });
if (additionalItemsFromExec) {
additionalItems.push(...additionalItemsFromExec);
}
return [outputItem, ...additionalItems];
}
public async run(
input: Array<ResponseInputItem>,
previousResponseId: string = "",
@@ -617,11 +517,6 @@ export class AgentLoop {
// `disableResponseStorage === true`.
let transcriptPrefixLen = 0;
let tools: Array<Tool> = [shellFunctionTool];
if (this.model.startsWith("codex")) {
tools = [localShellTool];
}
const stripInternalFields = (
item: ResponseInputItem,
): ResponseInputItem => {
@@ -725,8 +620,6 @@ export class AgentLoop {
if (
(item as ResponseInputItem).type === "function_call" ||
(item as ResponseInputItem).type === "reasoning" ||
//@ts-expect-error - waiting on sdk
(item as ResponseInputItem).type === "local_shell_call" ||
((item as ResponseInputItem).type === "message" &&
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(item as any).role === "user")
@@ -765,7 +658,7 @@ export class AgentLoop {
// prompts) and so that freshly generated `function_call_output`s are
// shown immediately.
// Figure out what subset of `turnInput` constitutes *new* information
// for the UI so that we don't spam the interface with repeats of the
// for the UI so that we dont spam the interface with repeats of the
// entire transcript on every iteration when response storage is
// disabled.
const deltaInput = this.disableResponseStorage
@@ -782,26 +675,19 @@ export class AgentLoop {
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
try {
let reasoning: Reasoning | undefined;
let modelSpecificInstructions: string | undefined;
if (this.model.startsWith("o") || this.model.startsWith("codex")) {
reasoning = { effort: this.config.reasoningEffort ?? "medium" };
reasoning.summary = "auto";
if (this.model.startsWith("o")) {
reasoning = { effort: this.config.reasoningEffort ?? "high" };
if (this.model === "o3" || this.model === "o4-mini") {
reasoning.summary = "auto";
}
}
if (this.model.startsWith("gpt-4.1")) {
modelSpecificInstructions = applyPatchToolInstructions;
}
const mergedInstructions = [
prefix,
modelSpecificInstructions,
this.instructions,
]
const mergedInstructions = [prefix, this.instructions]
.filter(Boolean)
.join("\n");
const responseCall =
!this.config.provider ||
this.config.provider?.toLowerCase() === "openai" ||
this.config.provider?.toLowerCase() === "azure"
this.config.provider?.toLowerCase() === "openai"
? (params: ResponseCreateParams) =>
this.oai.responses.create(params)
: (params: ResponseCreateParams) =>
@@ -828,7 +714,7 @@ export class AgentLoop {
store: true,
previous_response_id: lastResponseId || undefined,
}),
tools: tools,
tools: [shellTool],
// Explicitly tell the model it is allowed to pick whatever
// tool it deems appropriate. Omitting this sometimes leads to
// the model ignoring the available tools and responding with
@@ -853,13 +739,7 @@ export class AgentLoop {
const errCtx = error as any;
const status =
errCtx?.status ?? errCtx?.httpStatus ?? errCtx?.statusCode;
// Treat classical 5xx *and* explicit OpenAI `server_error` types
// as transient server-side failures that qualify for a retry. The
// SDK often omits the numeric status for these, reporting only
// the `type` field.
const isServerError =
(typeof status === "number" && status >= 500) ||
errCtx?.type === "server_error";
const isServerError = typeof status === "number" && status >= 500;
if (
(isTimeout || isServerError || isConnectionError) &&
attempt < MAX_RETRIES
@@ -1048,10 +928,7 @@ export class AgentLoop {
if (maybeReasoning.type === "reasoning") {
maybeReasoning.duration_ms = Date.now() - thinkingStart;
}
if (
item.type === "function_call" ||
item.type === "local_shell_call"
) {
if (item.type === "function_call") {
// Track outstanding tool call so we can abort later if needed.
// The item comes from the streaming response, therefore it has
// either `id` (chat) or `call_id` (responses) we normalise
@@ -1174,11 +1051,7 @@ export class AgentLoop {
let reasoning: Reasoning | undefined;
if (this.model.startsWith("o")) {
reasoning = { effort: "high" };
if (
this.model === "o3" ||
this.model === "o4-mini" ||
this.model === "codex-mini-latest"
) {
if (this.model === "o3" || this.model === "o4-mini") {
reasoning.summary = "auto";
}
}
@@ -1189,8 +1062,7 @@ export class AgentLoop {
const responseCall =
!this.config.provider ||
this.config.provider?.toLowerCase() === "openai" ||
this.config.provider?.toLowerCase() === "azure"
this.config.provider?.toLowerCase() === "openai"
? (params: ResponseCreateParams) =>
this.oai.responses.create(params)
: (params: ResponseCreateParams) =>
@@ -1218,7 +1090,7 @@ export class AgentLoop {
store: true,
previous_response_id: lastResponseId || undefined,
}),
tools: tools,
tools: [shellTool],
tool_choice: "auto",
});
@@ -1580,17 +1452,6 @@ export class AgentLoop {
// eslint-disable-next-line no-await-in-loop
const result = await this.handleFunctionCall(item);
turnInput.push(...result);
//@ts-expect-error - waiting on sdk
} else if (item.type === "local_shell_call") {
//@ts-expect-error - waiting on sdk
if (alreadyProcessedResponses.has(item.id)) {
continue;
}
//@ts-expect-error - waiting on sdk
alreadyProcessedResponses.add(item.id);
// eslint-disable-next-line no-await-in-loop
const result = await this.handleLocalShellCall(item);
turnInput.push(...result);
}
emitItem(item as ResponseItem);
}
@@ -1598,19 +1459,6 @@ export class AgentLoop {
}
}
// Dynamic developer message prefix: includes user, workdir, and rg suggestion.
const userName = os.userInfo().username;
const workdir = process.cwd();
const dynamicLines: Array<string> = [
`User: ${userName}`,
`Workdir: ${workdir}`,
];
if (spawnSync("rg", ["--version"], { stdio: "ignore" }).status === 0) {
dynamicLines.push(
"- Always use rg instead of grep/ls -R because it is much faster and respects gitignore",
);
}
const dynamicPrefix = dynamicLines.join("\n");
const prefix = `You are operating as and within the Codex CLI, a terminal-based agentic coding assistant built by OpenAI. It wraps OpenAI models to enable natural language interaction with a local codebase. You are expected to be precise, safe, and helpful.
You can:
@@ -1646,6 +1494,7 @@ You MUST adhere to the following criteria when executing the task:
- If there is a .pre-commit-config.yaml, use \`pre-commit run --files ...\` to check that your changes pass the pre-commit checks. However, do not fix pre-existing errors on lines you didn't touch.
- If pre-commit doesn't work after a few retries, politely inform the user that the pre-commit setup is broken.
- Once you finish coding, you must
- Check \`git status\` to sanity check your changes; revert any scratch files or changes.
- Remove all inline comments you added as much as possible, even if they look normal. Check using \`git diff\`. Inline comments must be generally avoided, unless active maintainers of the repo, after long careful study of the code and the issue, will still misinterpret the code without the comments.
- Check if you accidentally add copyright or license headers. If so, remove them.
- Try to run pre-commit if it is available.
@@ -1655,9 +1504,7 @@ You MUST adhere to the following criteria when executing the task:
- Respond in a friendly tone as a remote teammate, who is knowledgeable, capable and eager to help with coding.
- When your task involves writing or modifying files:
- Do NOT tell the user to "save the file" or "copy the code into a file" if you already created or modified the file using \`apply_patch\`. Instead, reference the file as already saved.
- Do NOT show the full contents of large files you have already written, unless the user explicitly asks for them.
${dynamicPrefix}`;
- Do NOT show the full contents of large files you have already written, unless the user explicitly asks for them.`;
function filterToApiMessages(
items: Array<ResponseInputItem>,

View File

@@ -550,15 +550,7 @@ export function text_to_patch(
!(lines[0] ?? "").startsWith(PATCH_PREFIX.trim()) ||
lines[lines.length - 1] !== PATCH_SUFFIX.trim()
) {
let reason = "Invalid patch text: ";
if (lines.length < 2) {
reason += "Patch text must have at least two lines.";
} else if (!(lines[0] ?? "").startsWith(PATCH_PREFIX.trim())) {
reason += "Patch text must start with the correct patch prefix.";
} else if (lines[lines.length - 1] !== PATCH_SUFFIX.trim()) {
reason += "Patch text must end with the correct patch suffix.";
}
throw new DiffError(reason);
throw new DiffError("Invalid patch text");
}
const parser = new Parser(orig, lines);
parser.index = 1;
@@ -770,46 +762,3 @@ if (import.meta.url === `file://${process.argv[1]}`) {
}
});
}
export const applyPatchToolInstructions = `
To edit files, ALWAYS use the \`shell\` tool with \`apply_patch\` CLI. \`apply_patch\` effectively allows you to execute a diff/patch against a file, but the format of the diff specification is unique to this task, so pay careful attention to these instructions. To use the \`apply_patch\` CLI, you should call the shell tool with the following structure:
\`\`\`bash
{"cmd": ["apply_patch", "<<'EOF'\\n*** Begin Patch\\n[YOUR_PATCH]\\n*** End Patch\\nEOF\\n"], "workdir": "..."}
\`\`\`
Where [YOUR_PATCH] is the actual content of your patch, specified in the following V4A diff format.
*** [ACTION] File: [path/to/file] -> ACTION can be one of Add, Update, or Delete.
For each snippet of code that needs to be changed, repeat the following:
[context_before] -> See below for further instructions on context.
- [old_code] -> Precede the old code with a minus sign.
+ [new_code] -> Precede the new, replacement code with a plus sign.
[context_after] -> See below for further instructions on context.
For instructions on [context_before] and [context_after]:
- By default, show 3 lines of code immediately above and 3 lines immediately below each change. If a change is within 3 lines of a previous change, do NOT duplicate the first changes [context_after] lines in the second changes [context_before] lines.
- If 3 lines of context is insufficient to uniquely identify the snippet of code within the file, use the @@ operator to indicate the class or function to which the snippet belongs. For instance, we might have:
@@ class BaseClass
[3 lines of pre-context]
- [old_code]
+ [new_code]
[3 lines of post-context]
- If a code block is repeated so many times in a class or function such that even a single \`@@\` statement and 3 lines of context cannot uniquely identify the snippet of code, you can use multiple \`@@\` statements to jump to the right context. For instance:
@@ class BaseClass
@@ def method():
[3 lines of pre-context]
- [old_code]
+ [new_code]
[3 lines of post-context]
Note, then, that we do not use line numbers in this diff format, as the context is enough to uniquely identify code. An example of a message that you might pass as "input" to this function, in order to apply a patch, is shown below.
\`\`\`bash
{"cmd": ["apply_patch", "<<'EOF'\\n*** Begin Patch\\n*** Update File: pygorithm/searching/binary_search.py\\n@@ class BaseClass\\n@@ def search():\\n- pass\\n+ raise NotImplementedError()\\n@@ class Subclass\\n@@ def search():\\n- pass\\n+ raise NotImplementedError()\\n*** End Patch\\nEOF\\n"], "workdir": "..."}
\`\`\`
File references can only be relative, NEVER ABSOLUTE. After the apply_patch command is run, it will always say "Done!", regardless of whether the patch was successfully applied or not. However, you can determine if there are issue and errors by looking at any warnings or logging lines printed BEFORE the "Done!" is output.
`;

View File

@@ -1,21 +1,17 @@
import type { AppConfig } from "../config.js";
import type { ExecInput, ExecResult } from "./sandbox/interface.js";
import type { SpawnOptions } from "child_process";
import type { ParseEntry } from "shell-quote";
import { process_patch } from "./apply-patch.js";
import { SandboxType } from "./sandbox/interface.js";
import { execWithLandlock } from "./sandbox/landlock.js";
import { execWithSeatbelt } from "./sandbox/macos-seatbelt.js";
import { exec as rawExec } from "./sandbox/raw-exec.js";
import { formatCommandForDisplay } from "../../format-command.js";
import { log } from "../logger/log.js";
import fs from "fs";
import os from "os";
import path from "path";
import { parse } from "shell-quote";
import { resolvePathAgainstWorkdir } from "src/approvals.js";
import { PATCH_SUFFIX } from "src/parse-apply-patch.js";
const DEFAULT_TIMEOUT_MS = 10_000; // 10 seconds
@@ -44,61 +40,38 @@ export function exec(
additionalWritableRoots,
}: ExecInput & { additionalWritableRoots: ReadonlyArray<string> },
sandbox: SandboxType,
config: AppConfig,
abortSignal?: AbortSignal,
): Promise<ExecResult> {
// This is a temporary measure to understand what are the common base commands
// until we start persisting and uploading rollouts
const execForSandbox =
sandbox === SandboxType.MACOS_SEATBELT ? execWithSeatbelt : rawExec;
const opts: SpawnOptions = {
timeout: timeoutInMillis || DEFAULT_TIMEOUT_MS,
...(requiresShell(cmd) ? { shell: true } : {}),
...(workdir ? { cwd: workdir } : {}),
};
switch (sandbox) {
case SandboxType.NONE: {
// SandboxType.NONE uses the raw exec implementation.
return rawExec(cmd, opts, config, abortSignal);
}
case SandboxType.MACOS_SEATBELT: {
// Merge default writable roots with any user-specified ones.
const writableRoots = [
process.cwd(),
os.tmpdir(),
...additionalWritableRoots,
];
return execWithSeatbelt(cmd, opts, writableRoots, config, abortSignal);
}
case SandboxType.LINUX_LANDLOCK: {
return execWithLandlock(
cmd,
opts,
additionalWritableRoots,
config,
abortSignal,
);
}
}
// Merge default writable roots with any user-specified ones.
const writableRoots = [
process.cwd(),
os.tmpdir(),
...additionalWritableRoots,
];
return execForSandbox(cmd, opts, writableRoots, abortSignal);
}
export function execApplyPatch(
patchText: string,
workdir: string | undefined = undefined,
): ExecResult {
// This find/replace is required from some models like 4.1 where the patch
// text is wrapped in quotes that breaks the apply_patch command.
let applyPatchInput = patchText
.replace(/('|")?<<('|")EOF('|")/, "")
.replace(/\*\*\* End Patch\nEOF('|")?/, "*** End Patch")
.trim();
if (!applyPatchInput.endsWith(PATCH_SUFFIX)) {
applyPatchInput += "\n" + PATCH_SUFFIX;
}
log(`Applying patch: \`\`\`${applyPatchInput}\`\`\`\n\n`);
// This is a temporary measure to understand what are the common base commands
// until we start persisting and uploading rollouts
try {
const result = process_patch(
applyPatchInput,
patchText,
(p) => fs.readFileSync(resolvePathAgainstWorkdir(p, workdir), "utf8"),
(p, c) => {
const resolvedPath = resolvePathAgainstWorkdir(p, workdir);

View File

@@ -94,7 +94,6 @@ export async function handleExecCommand(
/* applyPatch */ undefined,
/* runInSandbox */ false,
additionalWritableRoots,
config,
abortSignal,
).then(convertSummaryToResult);
}
@@ -143,7 +142,6 @@ export async function handleExecCommand(
applyPatch,
runInSandbox,
additionalWritableRoots,
config,
abortSignal,
);
// If the operation was aborted in the meantime, propagate the cancellation
@@ -181,7 +179,6 @@ export async function handleExecCommand(
applyPatch,
false,
additionalWritableRoots,
config,
abortSignal,
);
return convertSummaryToResult(summary);
@@ -216,7 +213,6 @@ async function execCommand(
applyPatchCommand: ApplyPatchCommand | undefined,
runInSandbox: boolean,
additionalWritableRoots: ReadonlyArray<string>,
config: AppConfig,
abortSignal?: AbortSignal,
): Promise<ExecCommandSummary> {
let { workdir } = execInput;
@@ -256,7 +252,6 @@ async function execCommand(
: await exec(
{ ...execInput, additionalWritableRoots },
await getSandbox(runInSandbox),
config,
abortSignal,
);
const duration = Date.now() - start;
@@ -308,11 +303,6 @@ async function getSandbox(runInSandbox: boolean): Promise<SandboxType> {
"Sandbox was mandated, but 'sandbox-exec' was not found in PATH!",
);
}
} else if (process.platform === "linux") {
// TODO: Need to verify that the Landlock sandbox is working. For example,
// using Landlock in a Linux Docker container from a macOS host may not
// work.
return SandboxType.LINUX_LANDLOCK;
} else if (CODEX_UNSAFE_ALLOW_NO_SANDBOX) {
// Allow running without a sandbox if the user has explicitly marked the
// environment as already being sufficiently locked-down.

View File

@@ -1,6 +1,7 @@
// Maximum output cap: either MAX_OUTPUT_LINES lines or MAX_OUTPUT_BYTES bytes,
// whichever limit is reached first.
import { DEFAULT_SHELL_MAX_BYTES, DEFAULT_SHELL_MAX_LINES } from "../../config";
const MAX_OUTPUT_BYTES = 1024 * 10; // 10 KB
const MAX_OUTPUT_LINES = 256;
/**
* Creates a collector that accumulates data Buffers from a stream up to
@@ -9,8 +10,8 @@ import { DEFAULT_SHELL_MAX_BYTES, DEFAULT_SHELL_MAX_LINES } from "../../config";
*/
export function createTruncatingCollector(
stream: NodeJS.ReadableStream,
byteLimit: number = DEFAULT_SHELL_MAX_BYTES,
lineLimit: number = DEFAULT_SHELL_MAX_LINES,
byteLimit: number = MAX_OUTPUT_BYTES,
lineLimit: number = MAX_OUTPUT_LINES,
): {
getString: () => string;
hit: boolean;

View File

@@ -1,175 +0,0 @@
import type { ExecResult } from "./interface.js";
import type { AppConfig } from "../../config.js";
import type { SpawnOptions } from "child_process";
import { exec } from "./raw-exec.js";
import { execFile } from "child_process";
import fs from "fs";
import path from "path";
import { log } from "src/utils/logger/log.js";
import { fileURLToPath } from "url";
/**
* Runs Landlock with the following permissions:
* - can read any file on disk
* - can write to process.cwd()
* - can write to the platform user temp folder
* - can write to any user-provided writable root
*/
export async function execWithLandlock(
cmd: Array<string>,
opts: SpawnOptions,
userProvidedWritableRoots: ReadonlyArray<string>,
config: AppConfig,
abortSignal?: AbortSignal,
): Promise<ExecResult> {
const sandboxExecutable = await getSandboxExecutable();
const extraSandboxPermissions = userProvidedWritableRoots.flatMap(
(root: string) => ["--sandbox-permission", `disk-write-folder=${root}`],
);
const fullCommand = [
sandboxExecutable,
"--sandbox-permission",
"disk-full-read-access",
"--sandbox-permission",
"disk-write-cwd",
"--sandbox-permission",
"disk-write-platform-user-temp-folder",
...extraSandboxPermissions,
"--",
...cmd,
];
return exec(fullCommand, opts, config, abortSignal);
}
/**
* Lazily initialized promise that resolves to the absolute path of the
* architecture-specific Landlock helper binary.
*/
let sandboxExecutablePromise: Promise<string> | null = null;
async function detectSandboxExecutable(): Promise<string> {
// Find the executable relative to the package.json file.
const __filename = fileURLToPath(import.meta.url);
let dir: string = path.dirname(__filename);
// Ascend until package.json is found or we reach the filesystem root.
// eslint-disable-next-line no-constant-condition
while (true) {
try {
// eslint-disable-next-line no-await-in-loop
await fs.promises.access(
path.join(dir, "package.json"),
fs.constants.F_OK,
);
break; // Found the package.json ⇒ dir is our project root.
} catch {
// keep searching
}
const parent = path.dirname(dir);
if (parent === dir) {
throw new Error("Unable to locate package.json");
}
dir = parent;
}
const sandboxExecutable = getLinuxSandboxExecutableForCurrentArchitecture();
const candidate = path.join(dir, "bin", sandboxExecutable);
try {
await fs.promises.access(candidate, fs.constants.X_OK);
} catch {
throw new Error(`${candidate} not found or not executable`);
}
// Will throw if the executable is not working in this environment.
await verifySandboxExecutable(candidate);
return candidate;
}
const ERROR_WHEN_LANDLOCK_NOT_SUPPORTED = `\
The combination of seccomp/landlock that Codex uses for sandboxing is not
supported in this environment.
If you are running in a Docker container, you may want to try adding
restrictions to your Docker container such that it provides your desired
sandboxing guarantees and then run Codex with the
--dangerously-auto-approve-everything option inside the container.
If you are running on an older Linux kernel that does not support newer
features of seccomp/landlock, you will have to update your kernel to a newer
version.
`;
/**
* Now that we have the path to the executable, make sure that it works in
* this environment. For example, when running a Linux Docker container from
* macOS like so:
*
* docker run -it alpine:latest /bin/sh
*
* Running `codex-linux-sandbox-x64 -- true` in the container fails with:
*
* ```
* Error: sandbox error: seccomp setup error
*
* Caused by:
* 0: seccomp setup error
* 1: Error calling `seccomp`: Invalid argument (os error 22)
* 2: Invalid argument (os error 22)
* ```
*/
function verifySandboxExecutable(sandboxExecutable: string): Promise<void> {
// Note we are running `true` rather than `bash -lc true` because we want to
// ensure we run an executable, not a shell built-in. Note that `true` should
// always be available in a POSIX environment.
return new Promise((resolve, reject) => {
const args = ["--", "true"];
execFile(sandboxExecutable, args, (error, stdout, stderr) => {
if (error) {
log(
`Sandbox check failed for ${sandboxExecutable} ${args.join(" ")}: ${error}`,
);
log(`stdout: ${stdout}`);
log(`stderr: ${stderr}`);
reject(new Error(ERROR_WHEN_LANDLOCK_NOT_SUPPORTED));
} else {
resolve();
}
});
});
}
/**
* Returns the absolute path to the architecture-specific Landlock helper
* binary. (Could be a rejected promise if not found.)
*/
function getSandboxExecutable(): Promise<string> {
if (!sandboxExecutablePromise) {
sandboxExecutablePromise = detectSandboxExecutable();
}
return sandboxExecutablePromise;
}
/** @return name of the native executable to use for Linux sandboxing. */
function getLinuxSandboxExecutableForCurrentArchitecture(): string {
switch (process.arch) {
case "arm64":
return "codex-linux-sandbox-arm64";
case "x64":
return "codex-linux-sandbox-x64";
// Fall back to the x86_64 build for anything else it will obviously
// fail on incompatible systems but gives a sane error message rather
// than crashing earlier.
default:
return "codex-linux-sandbox-x64";
}
}

View File

@@ -1,5 +1,4 @@
import type { ExecResult } from "./interface.js";
import type { AppConfig } from "../../config.js";
import type { SpawnOptions } from "child_process";
import { exec } from "./raw-exec.js";
@@ -25,7 +24,6 @@ export function execWithSeatbelt(
cmd: Array<string>,
opts: SpawnOptions,
writableRoots: ReadonlyArray<string>,
config: AppConfig,
abortSignal?: AbortSignal,
): Promise<ExecResult> {
let scopedWritePolicy: string;
@@ -74,7 +72,7 @@ export function execWithSeatbelt(
"--",
...cmd,
];
return exec(fullCommand, opts, config, abortSignal);
return exec(fullCommand, opts, writableRoots, abortSignal);
}
const READ_ONLY_SEATBELT_POLICY = `

View File

@@ -1,5 +1,4 @@
import type { ExecResult } from "./interface";
import type { AppConfig } from "../../config";
import type {
ChildProcess,
SpawnOptions,
@@ -21,7 +20,7 @@ import * as os from "os";
export function exec(
command: Array<string>,
options: SpawnOptions,
config: AppConfig,
_writableRoots: ReadonlyArray<string>,
abortSignal?: AbortSignal,
): Promise<ExecResult> {
// Adapt command for the current platform (e.g., convert 'ls' to 'dir' on Windows)
@@ -144,21 +143,9 @@ export function exec(
// ExecResult object so the rest of the agent loop can carry on gracefully.
return new Promise<ExecResult>((resolve) => {
// Get shell output limits from config if available
const maxBytes = config?.tools?.shell?.maxBytes;
const maxLines = config?.tools?.shell?.maxLines;
// Collect stdout and stderr up to configured limits.
const stdoutCollector = createTruncatingCollector(
child.stdout!,
maxBytes,
maxLines,
);
const stderrCollector = createTruncatingCollector(
child.stderr!,
maxBytes,
maxLines,
);
const stdoutCollector = createTruncatingCollector(child.stdout!);
const stderrCollector = createTruncatingCollector(child.stderr!);
child.on("exit", (code, signal) => {
const stdout = stdoutCollector.getString();

View File

@@ -1,7 +1,7 @@
import type { AgentName } from "package-manager-detector";
import { detectInstallerByPath } from "./package-manager-detector";
import { CLI_VERSION } from "../version";
import { CLI_VERSION } from "./session";
import boxen from "boxen";
import chalk from "chalk";
import { getLatestVersion } from "fast-npm-meta";

View File

@@ -1,14 +1,12 @@
import type { AppConfig } from "./config.js";
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
import { createOpenAIClient } from "./openai-client.js";
import { getBaseUrl, getApiKey } from "./config.js";
import OpenAI from "openai";
/**
* Generate a condensed summary of the conversation items.
* @param items The list of conversation items to summarize
* @param model The model to use for generating the summary
* @param flexMode Whether to use the flex-mode service tier
* @param config The configuration object
* @returns A concise structured summary string
*/
/**
@@ -25,7 +23,10 @@ export async function generateCompactSummary(
flexMode = false,
config: AppConfig,
): Promise<string> {
const oai = createOpenAIClient(config);
const oai = new OpenAI({
apiKey: getApiKey(config.provider),
baseURL: getBaseUrl(config.provider),
});
const conversationText = items
.filter(

View File

@@ -43,15 +43,11 @@ if (!isVitest) {
loadDotenv({ path: USER_WIDE_CONFIG_PATH });
}
export const DEFAULT_AGENTIC_MODEL = "codex-mini-latest";
export const DEFAULT_AGENTIC_MODEL = "o4-mini";
export const DEFAULT_FULL_CONTEXT_MODEL = "gpt-4.1";
export const DEFAULT_APPROVAL_MODE = AutoApprovalMode.SUGGEST;
export const DEFAULT_INSTRUCTIONS = "";
// Default shell output limits
export const DEFAULT_SHELL_MAX_BYTES = 1024 * 10; // 10 KB
export const DEFAULT_SHELL_MAX_LINES = 256;
export const CONFIG_DIR = join(homedir(), ".codex");
export const CONFIG_JSON_FILEPATH = join(CONFIG_DIR, "config.json");
export const CONFIG_YAML_FILEPATH = join(CONFIG_DIR, "config.yaml");
@@ -68,15 +64,12 @@ export const OPENAI_TIMEOUT_MS =
export const OPENAI_BASE_URL = process.env["OPENAI_BASE_URL"] || "";
export let OPENAI_API_KEY = process.env["OPENAI_API_KEY"] || "";
export const AZURE_OPENAI_API_VERSION =
process.env["AZURE_OPENAI_API_VERSION"] || "2025-04-01-preview";
export const DEFAULT_REASONING_EFFORT = "high";
export const OPENAI_ORGANIZATION = process.env["OPENAI_ORGANIZATION"] || "";
export const OPENAI_PROJECT = process.env["OPENAI_PROJECT"] || "";
// Can be set `true` when Codex is running in an environment that is marked as already
// considered sufficiently locked-down so that we allow running without an explicit sandbox.
// considered sufficiently locked-down so that we allow running wihtout an explicit sandbox.
export const CODEX_UNSAFE_ALLOW_NO_SANDBOX = Boolean(
process.env["CODEX_UNSAFE_ALLOW_NO_SANDBOX"] || "",
);
@@ -120,7 +113,7 @@ export function getApiKey(provider: string = "openai"): string | undefined {
return process.env[providerInfo.envKey];
}
// Checking `PROVIDER_API_KEY` feels more intuitive with a custom provider.
// Checking `PROVIDER_API_KEY feels more intuitive with a custom provider.
const customApiKey = process.env[`${provider.toUpperCase()}_API_KEY`];
if (customApiKey) {
return customApiKey;
@@ -135,8 +128,6 @@ export function getApiKey(provider: string = "openai"): string | undefined {
return undefined;
}
export type FileOpenerScheme = "vscode" | "cursor" | "windsurf";
// Represents config as persisted in config.json.
export type StoredConfig = {
model?: string;
@@ -148,28 +139,15 @@ export type StoredConfig = {
notify?: boolean;
/** Disable server-side response storage (send full transcript each request) */
disableResponseStorage?: boolean;
flexMode?: boolean;
providers?: Record<string, { name: string; baseURL: string; envKey: string }>;
history?: {
maxSize?: number;
saveHistory?: boolean;
sensitivePatterns?: Array<string>;
};
tools?: {
shell?: {
maxBytes?: number;
maxLines?: number;
};
};
/** User-defined safe commands */
safeCommands?: Array<string>;
reasoningEffort?: ReasoningEffort;
/**
* URI-based file opener. This is used when linking code references in
* terminal output.
*/
fileOpener?: FileOpenerScheme;
};
// Minimal config written on first run. An *empty* model string ensures that
@@ -208,35 +186,18 @@ export type AppConfig = {
saveHistory: boolean;
sensitivePatterns: Array<string>;
};
tools?: {
shell?: {
maxBytes: number;
maxLines: number;
};
};
fileOpener?: FileOpenerScheme;
};
// Formatting (quiet mode-only).
export const PRETTY_PRINT = Boolean(process.env["PRETTY_PRINT"] || "");
// ---------------------------------------------------------------------------
// Project doc support (AGENTS.md / codex.md)
// Project doc support (codex.md)
// ---------------------------------------------------------------------------
export const PROJECT_DOC_MAX_BYTES = 32 * 1024; // 32 kB
// We support multiple filenames for project-level agent instructions. As of
// 2025 the recommended convention is to use `AGENTS.md`, however we keep
// the legacy `codex.md` variants for backwards-compatibility so that existing
// repositories continue to work without changes. The list is ordered so that
// the first match wins newer conventions first, older fallbacks later.
const PROJECT_DOC_FILENAMES = [
"AGENTS.md", // preferred
"codex.md", // legacy
".codex.md",
"CODEX.md",
];
const PROJECT_DOC_FILENAMES = ["codex.md", ".codex.md", "CODEX.md"];
const PROJECT_DOC_SEPARATOR = "\n\n--- project-doc ---\n\n";
export function discoverProjectDocPath(startDir: string): string | null {
@@ -277,8 +238,7 @@ export function discoverProjectDocPath(startDir: string): string | null {
}
/**
* Load the project documentation markdown (`AGENTS.md` or the legacy
* `codex.md`) if present. If the file
* Load the project documentation markdown (codex.md) if present. If the file
* exceeds {@link PROJECT_DOC_MAX_BYTES} it will be truncated and a warning is
* logged.
*
@@ -428,17 +388,8 @@ export const loadConfig = (
instructions: combinedInstructions,
notify: storedConfig.notify === true,
approvalMode: storedConfig.approvalMode,
tools: {
shell: {
maxBytes:
storedConfig.tools?.shell?.maxBytes ?? DEFAULT_SHELL_MAX_BYTES,
maxLines:
storedConfig.tools?.shell?.maxLines ?? DEFAULT_SHELL_MAX_LINES,
},
},
disableResponseStorage: storedConfig.disableResponseStorage === true,
reasoningEffort: storedConfig.reasoningEffort,
fileOpener: storedConfig.fileOpener,
};
// -----------------------------------------------------------------------
@@ -500,10 +451,6 @@ export const loadConfig = (
}
// Notification setting: enable desktop notifications when set in config
config.notify = storedConfig.notify === true;
// Flex-mode setting: enable the flex-mode service tier when set in config
if (storedConfig.flexMode !== undefined) {
config.flexMode = storedConfig.flexMode;
}
// Add default history config if not provided
if (storedConfig.history !== undefined) {
@@ -558,7 +505,6 @@ export const saveConfig = (
providers: config.providers,
approvalMode: config.approvalMode,
disableResponseStorage: config.disableResponseStorage,
flexMode: config.flexMode,
reasoningEffort: config.reasoningEffort,
};
@@ -571,18 +517,6 @@ export const saveConfig = (
};
}
// Add tools settings if they exist
if (config.tools) {
configToSave.tools = {
shell: config.tools.shell
? {
maxBytes: config.tools.shell.maxBytes,
maxLines: config.tools.shell.maxLines,
}
: undefined,
};
}
if (ext === ".yaml" || ext === ".yml") {
writeFileSync(targetPath, dumpYaml(configToSave), "utf-8");
} else {

View File

@@ -2,24 +2,7 @@ import fs from "fs";
import os from "os";
import path from "path";
/**
* Represents a file system suggestion with path and directory information
*/
export interface FileSystemSuggestion {
/** The full path of the suggestion */
path: string;
/** Whether the suggestion is a directory */
isDirectory: boolean;
}
/**
* Gets file system suggestions based on a path prefix
* @param pathPrefix The path prefix to search for
* @returns Array of file system suggestions
*/
export function getFileSystemSuggestions(
pathPrefix: string,
): Array<FileSystemSuggestion> {
export function getFileSystemSuggestions(pathPrefix: string): Array<string> {
if (!pathPrefix) {
return [];
}
@@ -48,10 +31,10 @@ export function getFileSystemSuggestions(
.map((item) => {
const fullPath = path.join(readDir, item);
const isDirectory = fs.statSync(fullPath).isDirectory();
return {
path: isDirectory ? path.join(fullPath, sep) : fullPath,
isDirectory,
};
if (isDirectory) {
return path.join(fullPath, sep);
}
return fullPath;
});
} catch {
return [];

View File

@@ -1,62 +0,0 @@
import fs from "fs";
import path from "path";
/**
* Replaces @path tokens in the input string with <path>file contents</path> XML blocks for LLM context.
* Only replaces if the path points to a file; directories are ignored.
*/
export async function expandFileTags(raw: string): Promise<string> {
const re = /@([\w./~-]+)/g;
let out = raw;
type MatchInfo = { index: number; length: number; path: string };
const matches: Array<MatchInfo> = [];
for (const m of raw.matchAll(re) as IterableIterator<RegExpMatchArray>) {
const idx = m.index;
const captured = m[1];
if (idx !== undefined && captured) {
matches.push({ index: idx, length: m[0].length, path: captured });
}
}
// Process in reverse to avoid index shifting.
for (let i = matches.length - 1; i >= 0; i--) {
const { index, length, path: p } = matches[i]!;
const resolved = path.resolve(process.cwd(), p);
try {
const st = fs.statSync(resolved);
if (st.isFile()) {
const content = fs.readFileSync(resolved, "utf-8");
const rel = path.relative(process.cwd(), resolved);
const xml = `<${rel}>\n${content}\n</${rel}>`;
out = out.slice(0, index) + xml + out.slice(index + length);
}
} catch {
// If path invalid, leave token as is
}
}
return out;
}
/**
* Collapses <path>content</path> XML blocks back to @path format.
* This is the reverse operation of expandFileTags.
* Only collapses blocks where the path points to a valid file; invalid paths remain unchanged.
*/
export function collapseXmlBlocks(text: string): string {
return text.replace(
/<([^\n>]+)>([\s\S]*?)<\/\1>/g,
(match, path1: string) => {
const filePath = path.normalize(path1.trim());
try {
// Only convert to @path format if it's a valid file
return fs.statSync(path.resolve(process.cwd(), filePath)).isFile()
? "@" + filePath
: match;
} catch {
return match; // Keep XML block if path is invalid
}
},
);
}

View File

@@ -1,75 +0,0 @@
import SelectInput from "../components/select-input/select-input.js";
import Spinner from "../components/vendor/ink-spinner.js";
import TextInput from "../components/vendor/ink-text-input.js";
import { Box, Text } from "ink";
import React, { useState } from "react";
export type Choice = { type: "signin" } | { type: "apikey"; key: string };
export function ApiKeyPrompt({
onDone,
}: {
onDone: (choice: Choice) => void;
}): JSX.Element {
const [step, setStep] = useState<"select" | "paste">("select");
const [apiKey, setApiKey] = useState("");
if (step === "select") {
return (
<Box flexDirection="column" gap={1}>
<Box flexDirection="column">
<Text>
Sign in with ChatGPT to generate an API key or paste one you already
have.
</Text>
<Text dimColor>[use arrows to move, enter to select]</Text>
</Box>
<SelectInput
items={[
{ label: "Sign in with ChatGPT", value: "signin" },
{
label: "Paste an API key (or set as OPENAI_API_KEY)",
value: "paste",
},
]}
onSelect={(item: { value: string }) => {
if (item.value === "signin") {
onDone({ type: "signin" });
} else {
setStep("paste");
}
}}
/>
</Box>
);
}
return (
<Box flexDirection="column">
<Text>Paste your OpenAI API key and press &lt;Enter&gt;:</Text>
<TextInput
value={apiKey}
onChange={setApiKey}
onSubmit={(value: string) => {
if (value.trim() !== "") {
onDone({ type: "apikey", key: value.trim() });
}
}}
placeholder="sk-..."
mask="*"
/>
</Box>
);
}
export function WaitingForAuth(): JSX.Element {
return (
<Box flexDirection="row" marginTop={1}>
<Spinner type="ball" />
<Text>
{" "}
Waiting for authentication <Text dimColor>ctrl + c to quit</Text>
</Text>
</Box>
);
}

View File

@@ -1,766 +0,0 @@
import type { Choice } from "./get-api-key-components";
import type { Request, Response } from "express";
import { ApiKeyPrompt, WaitingForAuth } from "./get-api-key-components";
import chalk from "chalk";
import express from "express";
import fs from "fs/promises";
import { render } from "ink";
import crypto from "node:crypto";
import { URL } from "node:url";
import open from "open";
import os from "os";
import path from "path";
import React from "react";
function promptUserForChoice(): Promise<Choice> {
return new Promise<Choice>((resolve) => {
const instance = render(
<ApiKeyPrompt
onDone={(choice: Choice) => {
resolve(choice);
instance.unmount();
}}
/>,
);
});
}
interface OidcConfiguration {
issuer: string;
authorization_endpoint: string;
token_endpoint: string;
}
async function getOidcConfiguration(
issuer: string,
): Promise<OidcConfiguration> {
const discoveryUrl = new URL(issuer);
discoveryUrl.pathname = "/.well-known/openid-configuration";
if (issuer === "https://auth.openai.com") {
// Account for legacy quirk in production tenant
discoveryUrl.pathname = "/v2.0" + discoveryUrl.pathname;
}
const res = await fetch(discoveryUrl.toString());
if (!res.ok) {
throw new Error("Failed to fetch OIDC configuration");
}
return (await res.json()) as OidcConfiguration;
}
interface IDTokenClaims {
"exp": number;
"https://api.openai.com/auth": {
organization_id: string;
project_id: string;
completed_platform_onboarding: boolean;
is_org_owner: boolean;
chatgpt_subscription_active_start: string;
chatgpt_subscription_active_until: string;
chatgpt_plan_type: string;
};
}
interface AccessTokenClaims {
"https://api.openai.com/auth": {
chatgpt_plan_type: string;
};
}
function generatePKCECodes(): {
code_verifier: string;
code_challenge: string;
} {
const code_verifier = crypto.randomBytes(64).toString("hex");
const code_challenge = crypto
.createHash("sha256")
.update(code_verifier)
.digest("base64url");
return { code_verifier, code_challenge };
}
async function maybeRedeemCredits(
issuer: string,
clientId: string,
refreshToken: string,
idToken?: string,
): Promise<void> {
try {
let currentIdToken = idToken;
let idClaims: IDTokenClaims | undefined;
if (
currentIdToken &&
typeof currentIdToken === "string" &&
currentIdToken.split(".")[1]
) {
idClaims = JSON.parse(
Buffer.from(currentIdToken.split(".")[1]!, "base64url").toString(
"utf8",
),
) as IDTokenClaims;
} else {
currentIdToken = "";
}
// Validate idToken expiration
// if expired, attempt token-exchange for a fresh idToken
if (!idClaims || !idClaims.exp || Date.now() >= idClaims.exp * 1000) {
// eslint-disable-next-line no-console
console.log(chalk.dim("Refreshing credentials..."));
try {
const refreshRes = await fetch("https://auth.openai.com/oauth/token", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
client_id: clientId,
grant_type: "refresh_token",
refresh_token: refreshToken,
scope: "openid profile email",
}),
});
if (!refreshRes.ok) {
// eslint-disable-next-line no-console
console.warn(
`Failed to refresh credentials: ${refreshRes.status} ${refreshRes.statusText}\n${chalk.dim(await refreshRes.text())}`,
);
// eslint-disable-next-line no-console
console.warn(
`Please sign in again to redeem credits: ${chalk.bold("codex --login")}`,
);
return;
}
const refreshData = (await refreshRes.json()) as {
id_token: string;
refresh_token?: string;
};
currentIdToken = refreshData.id_token;
idClaims = JSON.parse(
Buffer.from(currentIdToken.split(".")[1]!, "base64url").toString(
"utf8",
),
) as IDTokenClaims;
if (refreshData.refresh_token) {
try {
const home = os.homedir();
const authDir = path.join(home, ".codex");
const authFile = path.join(authDir, "auth.json");
const existingJson = JSON.parse(
await fs.readFile(authFile, "utf-8"),
);
existingJson.tokens.id_token = currentIdToken;
existingJson.tokens.refresh_token = refreshData.refresh_token;
existingJson.last_refresh = new Date().toISOString();
await fs.writeFile(
authFile,
JSON.stringify(existingJson, null, 2),
{ mode: 0o600 },
);
} catch (err) {
// eslint-disable-next-line no-console
console.warn("Unable to update refresh token in auth file:", err);
}
}
} catch (err) {
// eslint-disable-next-line no-console
console.warn("Unable to refresh ID token via token-exchange:", err);
return;
}
}
// Confirm the subscription is active for more than 7 days
const subStart =
idClaims["https://api.openai.com/auth"]
?.chatgpt_subscription_active_start;
if (
typeof subStart === "string" &&
Date.now() - new Date(subStart).getTime() < 7 * 24 * 60 * 60 * 1000
) {
// eslint-disable-next-line no-console
console.warn(
"Sorry, your subscription must be active for more than 7 days to redeem credits.\nMore info: " +
chalk.dim("https://help.openai.com/en/articles/11381614") +
chalk.bold(
"\nPlease try again on " +
new Date(
new Date(subStart).getTime() + 7 * 24 * 60 * 60 * 1000,
).toLocaleDateString() +
" " +
new Date(
new Date(subStart).getTime() + 7 * 24 * 60 * 60 * 1000,
).toLocaleTimeString(),
),
);
return;
}
const completed = Boolean(
idClaims["https://api.openai.com/auth"]?.completed_platform_onboarding,
);
const isOwner = Boolean(
idClaims["https://api.openai.com/auth"]?.is_org_owner,
);
const needsSetup = !completed && isOwner;
const planType = idClaims["https://api.openai.com/auth"]
?.chatgpt_plan_type as string | undefined;
if (needsSetup || !(planType === "plus" || planType === "pro")) {
// eslint-disable-next-line no-console
console.warn(
"Users with Plus or Pro subscriptions can redeem free API credits.\nMore info: " +
chalk.dim("https://help.openai.com/en/articles/11381614"),
);
return;
}
const apiHost =
issuer === "https://auth.openai.com"
? "https://api.openai.com"
: "https://api.openai.org";
const redeemRes = await fetch(`${apiHost}/v1/billing/redeem_credits`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ id_token: currentIdToken }),
});
if (!redeemRes.ok) {
// eslint-disable-next-line no-console
console.warn(
`Credit redemption request failed: ${redeemRes.status} ${redeemRes.statusText}`,
);
return;
}
try {
const redeemData = (await redeemRes.json()) as {
granted_chatgpt_subscriber_api_credits?: number;
};
const granted = redeemData?.granted_chatgpt_subscriber_api_credits ?? 0;
if (granted > 0) {
// eslint-disable-next-line no-console
console.log(
chalk.green(
`${chalk.bold(
`Thanks for being a ChatGPT ${
planType === "plus" ? "Plus" : "Pro"
} subscriber!`,
)}\nIf you haven't already redeemed, you should receive ${
planType === "plus" ? "$5" : "$50"
} in API credits\nCredits: ${chalk.dim(chalk.underline("https://platform.openai.com/settings/organization/billing/credit-grants"))}\nMore info: ${chalk.dim(chalk.underline("https://help.openai.com/en/articles/11381614"))}`,
),
);
} else {
// eslint-disable-next-line no-console
console.log(
chalk.green(
`It looks like no credits were granted:\n${JSON.stringify(
redeemData,
null,
2,
)}\nCredits: ${chalk.dim(
chalk.underline(
"https://platform.openai.com/settings/organization/billing/credit-grants",
),
)}\nMore info: ${chalk.dim(
chalk.underline("https://help.openai.com/en/articles/11381614"),
)}`,
),
);
}
} catch (parseErr) {
// eslint-disable-next-line no-console
console.warn("Unable to parse credit redemption response:", parseErr);
}
} catch (err) {
// eslint-disable-next-line no-console
console.warn("Unable to redeem ChatGPT subscriber API credits:", err);
}
}
async function handleCallback(
req: Request,
issuer: string,
oidcConfig: OidcConfiguration,
codeVerifier: string,
clientId: string,
redirectUri: string,
expectedState: string,
): Promise<{ access_token: string; success_url: string }> {
const state = (req.query as Record<string, string>)["state"] as
| string
| undefined;
if (!state || state !== expectedState) {
throw new Error("Invalid state parameter");
}
const code = (req.query as Record<string, string>)["code"] as
| string
| undefined;
if (!code) {
throw new Error("Missing authorization code");
}
const params = new URLSearchParams();
params.append("grant_type", "authorization_code");
params.append("code", code);
params.append("redirect_uri", redirectUri);
params.append("client_id", clientId);
params.append("code_verifier", codeVerifier);
oidcConfig.token_endpoint = `${issuer}/oauth/token`;
const tokenRes = await fetch(oidcConfig.token_endpoint, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
},
body: params.toString(),
});
if (!tokenRes.ok) {
throw new Error("Failed to exchange authorization code for tokens");
}
const tokenData = (await tokenRes.json()) as {
id_token: string;
access_token: string;
refresh_token: string;
};
const idTokenParts = tokenData.id_token.split(".");
if (idTokenParts.length !== 3) {
throw new Error("Invalid ID token");
}
const accessTokenParts = tokenData.access_token.split(".");
if (accessTokenParts.length !== 3) {
throw new Error("Invalid access token");
}
const idTokenClaims = JSON.parse(
Buffer.from(idTokenParts[1]!, "base64url").toString("utf8"),
) as IDTokenClaims;
const accessTokenClaims = JSON.parse(
Buffer.from(accessTokenParts[1]!, "base64url").toString("utf8"),
) as AccessTokenClaims;
const org_id = idTokenClaims["https://api.openai.com/auth"]?.organization_id;
if (!org_id) {
throw new Error("Missing organization in id_token claims");
}
const project_id = idTokenClaims["https://api.openai.com/auth"]?.project_id;
if (!project_id) {
throw new Error("Missing project in id_token claims");
}
const randomId = crypto.randomBytes(6).toString("hex");
const exchangeParams = new URLSearchParams({
grant_type: "urn:ietf:params:oauth:grant-type:token-exchange",
client_id: clientId,
requested_token: "openai-api-key",
subject_token: tokenData.id_token,
subject_token_type: "urn:ietf:params:oauth:token-type:id_token",
name: `Codex CLI [auto-generated] (${new Date().toISOString().slice(0, 10)}) [${
randomId
}]`,
});
const exchangeRes = await fetch(oidcConfig.token_endpoint, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
},
body: exchangeParams.toString(),
});
if (!exchangeRes.ok) {
throw new Error(`Failed to create API key: ${await exchangeRes.text()}`);
}
const exchanged = (await exchangeRes.json()) as {
access_token: string;
// NOTE(mbolin): I did not see the "key" property set in practice. Note
// this property is not read by the code.
key: string;
};
// Determine whether the organization still requires additional
// setup (e.g., adding a payment method) based on the ID-token
// claim provided by the auth service.
const completedOnboarding = Boolean(
idTokenClaims["https://api.openai.com/auth"]?.completed_platform_onboarding,
);
const chatgptPlanType =
accessTokenClaims["https://api.openai.com/auth"]?.chatgpt_plan_type;
const isOrgOwner = Boolean(
idTokenClaims["https://api.openai.com/auth"]?.is_org_owner,
);
const needsSetup = !completedOnboarding && isOrgOwner;
// Build the success URL on the same host/port as the callback and
// include the required query parameters for the front-end page.
// console.log("Redirecting to success page");
const successUrl = new URL("/success", redirectUri);
if (issuer === "https://auth.openai.com") {
successUrl.searchParams.set("platform_url", "https://platform.openai.com");
} else {
successUrl.searchParams.set(
"platform_url",
"https://platform.api.openai.org",
);
}
successUrl.searchParams.set("id_token", tokenData.id_token);
successUrl.searchParams.set("needs_setup", needsSetup ? "true" : "false");
successUrl.searchParams.set("org_id", org_id);
successUrl.searchParams.set("project_id", project_id);
successUrl.searchParams.set("plan_type", chatgptPlanType);
try {
const home = os.homedir();
const authDir = path.join(home, ".codex");
await fs.mkdir(authDir, { recursive: true });
const authFile = path.join(authDir, "auth.json");
const authData = {
tokens: tokenData,
last_refresh: new Date().toISOString(),
OPENAI_API_KEY: exchanged.access_token,
};
await fs.writeFile(authFile, JSON.stringify(authData, null, 2), {
mode: 0o600,
});
} catch (err) {
// eslint-disable-next-line no-console
console.warn("Unable to save auth file:", err);
}
await maybeRedeemCredits(
issuer,
clientId,
tokenData.refresh_token,
tokenData.id_token,
);
return {
access_token: exchanged.access_token,
success_url: successUrl.toString(),
};
}
const LOGIN_SUCCESS_HTML = String.raw`
<html lang="en">
<head>
<meta charset="utf-8" />
<title>Sign into Codex CLI</title>
<link rel="icon" href='data:image/svg+xml,%3Csvg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="none" viewBox="0 0 32 32"%3E%3Cpath stroke="%23000" stroke-linecap="round" stroke-width="2.484" d="M22.356 19.797H17.17M9.662 12.29l1.979 3.576a.511.511 0 0 1-.005.504l-1.974 3.409M30.758 16c0 8.15-6.607 14.758-14.758 14.758-8.15 0-14.758-6.607-14.758-14.758C1.242 7.85 7.85 1.242 16 1.242c8.15 0 14.758 6.608 14.758 14.758Z"/%3E%3C/svg%3E' type="image/svg+xml">
<style>
.container {
margin: auto;
height: 100%;
display: flex;
align-items: center;
justify-content: center;
position: relative;
background: white;
font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
}
.inner-container {
width: 400px;
flex-direction: column;
justify-content: flex-start;
align-items: center;
gap: 20px;
display: inline-flex;
}
.content {
align-self: stretch;
flex-direction: column;
justify-content: flex-start;
align-items: center;
gap: 20px;
display: flex;
}
.svg-wrapper {
position: relative;
}
.title {
text-align: center;
color: var(--text-primary, #0D0D0D);
font-size: 28px;
font-weight: 400;
line-height: 36.40px;
word-wrap: break-word;
}
.setup-box {
width: 600px;
padding: 16px 20px;
background: var(--bg-primary, white);
box-shadow: 0px 4px 16px rgba(0, 0, 0, 0.05);
border-radius: 16px;
outline: 1px var(--border-default, rgba(13, 13, 13, 0.10)) solid;
outline-offset: -1px;
justify-content: flex-start;
align-items: center;
gap: 16px;
display: inline-flex;
}
.setup-content {
flex: 1 1 0;
justify-content: flex-start;
align-items: center;
gap: 24px;
display: flex;
}
.setup-text {
flex: 1 1 0;
flex-direction: column;
justify-content: flex-start;
align-items: flex-start;
gap: 4px;
display: inline-flex;
}
.setup-title {
align-self: stretch;
color: var(--text-primary, #0D0D0D);
font-size: 14px;
font-weight: 510;
line-height: 20px;
word-wrap: break-word;
}
.setup-description {
align-self: stretch;
color: var(--text-secondary, #5D5D5D);
font-size: 14px;
font-weight: 400;
line-height: 20px;
word-wrap: break-word;
}
.redirect-box {
justify-content: flex-start;
align-items: center;
gap: 8px;
display: flex;
}
.close-button,
.redirect-button {
height: 28px;
padding: 8px 16px;
background: var(--interactive-bg-primary-default, #0D0D0D);
border-radius: 999px;
justify-content: center;
align-items: center;
gap: 4px;
display: flex;
}
.close-button,
.redirect-text {
color: var(--interactive-label-primary-default, white);
font-size: 14px;
font-weight: 510;
line-height: 20px;
word-wrap: break-word;
text-decoration: none;
}
</style>
</head>
<body>
<div class="container">
<div class="inner-container">
<div class="content">
<div data-svg-wrapper class="svg-wrapper">
<svg width="56" height="56" viewBox="0 0 56 56" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M4.6665 28.0003C4.6665 15.1137 15.1132 4.66699 27.9998 4.66699C40.8865 4.66699 51.3332 15.1137 51.3332 28.0003C51.3332 40.887 40.8865 51.3337 27.9998 51.3337C15.1132 51.3337 4.6665 40.887 4.6665 28.0003ZM37.5093 18.5088C36.4554 17.7672 34.9999 18.0203 34.2583 19.0742L24.8508 32.4427L20.9764 28.1808C20.1095 27.2272 18.6338 27.1569 17.6803 28.0238C16.7267 28.8906 16.6565 30.3664 17.5233 31.3199L23.3566 37.7366C23.833 38.2606 24.5216 38.5399 25.2284 38.4958C25.9353 38.4517 26.5838 38.089 26.9914 37.5098L38.0747 21.7598C38.8163 20.7059 38.5632 19.2504 37.5093 18.5088Z" fill="var(--green-400, #04B84C)"/>
</svg>
</div>
<div class="title">Signed in to Codex CLI</div>
</div>
<div class="close-box" style="display: none;">
<div class="setup-description">You may now close this page</div>
</div>
<div class="setup-box" style="display: none;">
<div class="setup-content">
<div class="setup-text">
<div class="setup-title">Finish setting up your API organization</div>
<div class="setup-description">Add a payment method to use your organization.</div>
</div>
<div class="redirect-box">
<div data-hasendicon="false" data-hasstarticon="false" data-ishovered="false" data-isinactive="false" data-ispressed="false" data-size="large" data-type="primary" class="redirect-button">
<div class="redirect-text">Redirecting in 3s...</div>
</div>
</div>
</div>
</div>
</div>
</div>
<script>
(function () {
const params = new URLSearchParams(window.location.search);
const needsSetup = params.get('needs_setup') === 'true';
const platformUrl = params.get('platform_url') || 'https://platform.openai.com';
const orgId = params.get('org_id');
const projectId = params.get('project_id');
const planType = params.get('plan_type');
const idToken = params.get('id_token');
// Show different message and optional redirect when setup is required
if (needsSetup) {
const setupBox = document.querySelector('.setup-box');
setupBox.style.display = 'flex';
const redirectUrlObj = new URL('/org-setup', platformUrl);
redirectUrlObj.searchParams.set('p', planType);
redirectUrlObj.searchParams.set('t', idToken);
redirectUrlObj.searchParams.set('with_org', orgId);
redirectUrlObj.searchParams.set('project_id', projectId);
const redirectUrl = redirectUrlObj.toString();
const message = document.querySelector('.redirect-text');
let countdown = 3;
function tick() {
message.textContent =
'Redirecting in ' + countdown + 's…';
if (countdown === 0) {
window.location.replace(redirectUrl);
} else {
countdown -= 1;
setTimeout(tick, 1000);
}
}
tick();
} else {
const closeBox = document.querySelector('.close-box');
closeBox.style.display = 'flex';
}
})();
</script>
</body>
</html>`;
async function signInFlow(issuer: string, clientId: string): Promise<string> {
const app = express();
let codeVerifier = "";
let redirectUri = "";
let server: ReturnType<typeof app.listen>;
const state = crypto.randomBytes(32).toString("hex");
const apiKeyPromise = new Promise<string>((resolve, reject) => {
let _apiKey: string | undefined;
app.get("/success", (_req: Request, res: Response) => {
res.type("text/html").send(LOGIN_SUCCESS_HTML);
if (_apiKey) {
resolve(_apiKey);
} else {
// eslint-disable-next-line no-console
console.error(
"Sorry, it seems like the authentication flow failed. Please try again, or submit an issue on our GitHub if it continues.",
);
process.exit(1);
}
});
// Callback route -------------------------------------------------------
app.get("/auth/callback", async (req: Request, res: Response) => {
try {
const oidcConfig = await getOidcConfiguration(issuer);
oidcConfig.token_endpoint = `${issuer}/oauth/token`;
oidcConfig.authorization_endpoint = `${issuer}/oauth/authorize`;
const { access_token, success_url } = await handleCallback(
req,
issuer,
oidcConfig,
codeVerifier,
clientId,
redirectUri,
state,
);
_apiKey = access_token;
res.redirect(success_url);
} catch (err) {
reject(err);
}
});
server = app.listen(1455, "127.0.0.1", async () => {
const address = server.address();
if (typeof address === "string" || !address) {
// eslint-disable-next-line no-console
console.log(
"It seems like you might already be trying to sign in (port :1455 already in use)",
);
process.exit(1);
return;
}
const port = address.port;
redirectUri = `http://localhost:${port}/auth/callback`;
try {
const oidcConfig = await getOidcConfiguration(issuer);
oidcConfig.token_endpoint = `${issuer}/oauth/token`;
oidcConfig.authorization_endpoint = `${issuer}/oauth/authorize`;
const pkce = generatePKCECodes();
codeVerifier = pkce.code_verifier;
const authUrl = new URL(oidcConfig.authorization_endpoint);
authUrl.searchParams.append("response_type", "code");
authUrl.searchParams.append("client_id", clientId);
authUrl.searchParams.append("redirect_uri", redirectUri);
authUrl.searchParams.append(
"scope",
"openid profile email offline_access",
);
authUrl.searchParams.append("code_challenge", pkce.code_challenge);
authUrl.searchParams.append("code_challenge_method", "S256");
authUrl.searchParams.append("id_token_add_organizations", "true");
authUrl.searchParams.append("state", state);
// Open the browser immediately.
open(authUrl.toString());
setTimeout(() => {
// eslint-disable-next-line no-console
console.log(
`\nOpening login page in your browser: ${authUrl.toString()}\n`,
);
}, 500);
} catch (err) {
reject(err);
}
});
});
// Ensure the server is closed afterwards.
return apiKeyPromise.finally(() => {
if (server) {
server.close();
}
});
}
export async function getApiKey(
issuer: string,
clientId: string,
forceLogin: boolean = false,
): Promise<string> {
if (!forceLogin && process.env["OPENAI_API_KEY"]) {
return process.env["OPENAI_API_KEY"]!;
}
const choice = await promptUserForChoice();
if (choice.type === "apikey") {
process.env["OPENAI_API_KEY"] = choice.key;
return choice.key;
}
const spinner = render(<WaitingForAuth />);
try {
const key = await signInFlow(issuer, clientId);
spinner.clear();
spinner.unmount();
process.env["OPENAI_API_KEY"] = key;
return key;
} catch (err) {
spinner.clear();
spinner.unmount();
throw err;
}
}
export { maybeRedeemCredits };

View File

@@ -1,4 +1,4 @@
import { execSync, execFileSync } from "node:child_process";
import { execSync } from "node:child_process";
// The objects thrown by `child_process.execSync()` are `Error` instances that
// include additional, undocumented properties such as `status` (exit code) and
@@ -89,18 +89,12 @@ export function getGitDiff(): {
//
// `git diff --color --no-index /dev/null <file>` exits with status 1
// when differences are found, so we capture stdout from the thrown
// error object instead of letting it propagate. Using `execFileSync`
// avoids shell interpolation issues with special characters in the
// path.
execFileSync(
"git",
["diff", "--color", "--no-index", "--", nullDevice, file],
{
encoding: "utf8",
stdio: ["ignore", "pipe", "ignore"],
maxBuffer: 10 * 1024 * 1024,
},
);
// error object instead of letting it propagate.
execSync(`git diff --color --no-index -- "${nullDevice}" "${file}"`, {
encoding: "utf8",
stdio: ["ignore", "pipe", "ignore"],
maxBuffer: 10 * 1024 * 1024,
});
} catch (err) {
if (
isExecSyncError(err) &&

View File

@@ -19,10 +19,6 @@ export const openAiModelInfo = {
label: "o3 (2025-04-16)",
maxContextLength: 200000,
},
"codex-mini-latest": {
label: "codex-mini-latest",
maxContextLength: 200000,
},
"o4-mini": {
label: "o4 Mini",
maxContextLength: 200000,

View File

@@ -1,9 +1,14 @@
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
import { approximateTokensUsed } from "./approximate-tokens-used.js";
import { getApiKey } from "./config.js";
import {
OPENAI_ORGANIZATION,
OPENAI_PROJECT,
getBaseUrl,
getApiKey,
} from "./config";
import { type SupportedModelId, openAiModelInfo } from "./model-info.js";
import { createOpenAIClient } from "./openai-client.js";
import OpenAI from "openai";
const MODEL_LIST_TIMEOUT_MS = 2_000; // 2 seconds
export const RECOMMENDED_MODELS: Array<string> = ["o4-mini", "o3"];
@@ -22,7 +27,19 @@ async function fetchModels(provider: string): Promise<Array<string>> {
}
try {
const openai = createOpenAIClient({ provider });
const headers: Record<string, string> = {};
if (OPENAI_ORGANIZATION) {
headers["OpenAI-Organization"] = OPENAI_ORGANIZATION;
}
if (OPENAI_PROJECT) {
headers["OpenAI-Project"] = OPENAI_PROJECT;
}
const openai = new OpenAI({
apiKey: getApiKey(provider),
baseURL: getBaseUrl(provider),
defaultHeaders: headers,
});
const list = await openai.models.list();
const models: Array<string> = [];
for await (const model of list as AsyncIterable<{ id?: string }>) {

View File

@@ -1,51 +0,0 @@
import type { AppConfig } from "./config.js";
import {
getBaseUrl,
getApiKey,
AZURE_OPENAI_API_VERSION,
OPENAI_TIMEOUT_MS,
OPENAI_ORGANIZATION,
OPENAI_PROJECT,
} from "./config.js";
import OpenAI, { AzureOpenAI } from "openai";
type OpenAIClientConfig = {
provider: string;
};
/**
* Creates an OpenAI client instance based on the provided configuration.
* Handles both standard OpenAI and Azure OpenAI configurations.
*
* @param config The configuration containing provider information
* @returns An instance of either OpenAI or AzureOpenAI client
*/
export function createOpenAIClient(
config: OpenAIClientConfig | AppConfig,
): OpenAI | AzureOpenAI {
const headers: Record<string, string> = {};
if (OPENAI_ORGANIZATION) {
headers["OpenAI-Organization"] = OPENAI_ORGANIZATION;
}
if (OPENAI_PROJECT) {
headers["OpenAI-Project"] = OPENAI_PROJECT;
}
if (config.provider?.toLowerCase() === "azure") {
return new AzureOpenAI({
apiKey: getApiKey(config.provider),
baseURL: getBaseUrl(config.provider),
apiVersion: AZURE_OPENAI_API_VERSION,
timeout: OPENAI_TIMEOUT_MS,
defaultHeaders: headers,
});
}
return new OpenAI({
apiKey: getApiKey(config.provider),
baseURL: getBaseUrl(config.provider),
timeout: OPENAI_TIMEOUT_MS,
defaultHeaders: headers,
});
}

View File

@@ -35,7 +35,6 @@ export function parseToolCallOutput(toolCallOutput: string): {
export type CommandReviewDetails = {
cmd: Array<string>;
cmdReadableText: string;
workdir: string | undefined;
};
/**
@@ -52,13 +51,12 @@ export function parseToolCall(
return undefined;
}
const { cmd, workdir } = toolCallArgs;
const { cmd } = toolCallArgs;
const cmdReadableText = formatCommandForDisplay(cmd);
return {
cmd,
cmdReadableText,
workdir,
};
}

View File

@@ -12,11 +12,6 @@ export const providers: Record<
baseURL: "https://openrouter.ai/api/v1",
envKey: "OPENROUTER_API_KEY",
},
azure: {
name: "AzureOpenAI",
baseURL: "https://YOUR_PROJECT_NAME.openai.azure.com/openai",
envKey: "AZURE_OPENAI_API_KEY",
},
gemini: {
name: "Gemini",
baseURL: "https://generativelanguage.googleapis.com/v1beta/openai",
@@ -47,9 +42,4 @@ export const providers: Record<
baseURL: "https://api.groq.com/openai/v1",
envKey: "GROQ_API_KEY",
},
arceeai: {
name: "ArceeAI",
baseURL: "https://conductor.arcee.ai/v1",
envKey: "ARCEEAI_API_KEY",
},
};

Some files were not shown because too many files have changed in this diff Show More