mirror of
https://github.com/openai/codex.git
synced 2026-02-03 07:23:39 +00:00
Compare commits
5 Commits
concurrent
...
cost-track
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba45d2f601 | ||
|
|
b051fcb804 | ||
|
|
ada5e2249a | ||
|
|
0613fd35e2 | ||
|
|
cdc0897a25 |
@@ -1 +0,0 @@
|
||||
iTerm
|
||||
@@ -1,6 +0,0 @@
|
||||
[codespell]
|
||||
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
|
||||
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts
|
||||
check-hidden = true
|
||||
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
|
||||
ignore-words-list = ratatui,ser
|
||||
@@ -1,27 +0,0 @@
|
||||
FROM ubuntu:24.04
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
# enable 'universe' because musl-tools & clang live there
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
software-properties-common && \
|
||||
add-apt-repository --yes universe
|
||||
|
||||
# now install build deps
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential curl git ca-certificates \
|
||||
pkg-config clang musl-tools libssl-dev just && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Ubuntu 24.04 ships with user 'ubuntu' already created with UID 1000.
|
||||
USER ubuntu
|
||||
|
||||
# install Rust + musl target as dev user
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal && \
|
||||
~/.cargo/bin/rustup target add aarch64-unknown-linux-musl && \
|
||||
~/.cargo/bin/rustup component add clippy rustfmt
|
||||
|
||||
ENV PATH="/home/ubuntu/.cargo/bin:${PATH}"
|
||||
|
||||
WORKDIR /workspace
|
||||
@@ -1,30 +0,0 @@
|
||||
# Containerized Development
|
||||
|
||||
We provide the following options to facilitate Codex development in a container. This is particularly useful for verifying the Linux build when working on a macOS host.
|
||||
|
||||
## Docker
|
||||
|
||||
To build the Docker image locally for x64 and then run it with the repo mounted under `/workspace`:
|
||||
|
||||
```shell
|
||||
CODEX_DOCKER_IMAGE_NAME=codex-linux-dev
|
||||
docker build --platform=linux/amd64 -t "$CODEX_DOCKER_IMAGE_NAME" ./.devcontainer
|
||||
docker run --platform=linux/amd64 --rm -it -e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64 -v "$PWD":/workspace -w /workspace/codex-rs "$CODEX_DOCKER_IMAGE_NAME"
|
||||
```
|
||||
|
||||
Note that `/workspace/target` will contain the binaries built for your host platform, so we include `-e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64` in the `docker run` command so that the binaries built inside your container are written to a separate directory.
|
||||
|
||||
For arm64, specify `--platform=linux/amd64` instead for both `docker build` and `docker run`.
|
||||
|
||||
Currently, the `Dockerfile` works for both x64 and arm64 Linux, though you need to run `rustup target add x86_64-unknown-linux-musl` yourself to install the musl toolchain for x64.
|
||||
|
||||
## VS Code
|
||||
|
||||
VS Code recognizes the `devcontainer.json` file and gives you the option to develop Codex in a container. Currently, `devcontainer.json` builds and runs the `arm64` flavor of the container.
|
||||
|
||||
From the integrated terminal in VS Code, you can build either flavor of the `arm64` build (GNU or musl):
|
||||
|
||||
```shell
|
||||
cargo build --target aarch64-unknown-linux-musl
|
||||
cargo build --target aarch64-unknown-linux-gnu
|
||||
```
|
||||
@@ -1,27 +0,0 @@
|
||||
{
|
||||
"name": "Codex",
|
||||
"build": {
|
||||
"dockerfile": "Dockerfile",
|
||||
"context": "..",
|
||||
"platform": "linux/arm64"
|
||||
},
|
||||
|
||||
/* Force VS Code to run the container as arm64 in
|
||||
case your host is x86 (or vice-versa). */
|
||||
"runArgs": ["--platform=linux/arm64"],
|
||||
|
||||
"containerEnv": {
|
||||
"RUST_BACKTRACE": "1",
|
||||
"CARGO_TARGET_DIR": "${containerWorkspaceFolder}/codex-rs/target-arm64"
|
||||
},
|
||||
|
||||
"remoteUser": "ubuntu",
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"settings": {
|
||||
"terminal.integrated.defaultProfile.linux": "bash"
|
||||
},
|
||||
"extensions": ["rust-lang.rust-analyzer", "tamasfe.even-better-toml"]
|
||||
}
|
||||
}
|
||||
}
|
||||
3
.github/ISSUE_TEMPLATE/2-bug-report.yml
vendored
3
.github/ISSUE_TEMPLATE/2-bug-report.yml
vendored
@@ -19,14 +19,13 @@ body:
|
||||
id: version
|
||||
attributes:
|
||||
label: What version of Codex is running?
|
||||
description: Copy the output of `codex --version`
|
||||
description: Copy the output of `codex --revision`
|
||||
- type: input
|
||||
id: model
|
||||
attributes:
|
||||
label: Which model were you using?
|
||||
description: Like `gpt-4.1`, `o4-mini`, `o3`, etc.
|
||||
- type: input
|
||||
id: platform
|
||||
attributes:
|
||||
label: What platform is your computer?
|
||||
description: |
|
||||
|
||||
1
.github/actions/codex/.gitignore
vendored
1
.github/actions/codex/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
/node_modules/
|
||||
8
.github/actions/codex/.prettierrc.toml
vendored
8
.github/actions/codex/.prettierrc.toml
vendored
@@ -1,8 +0,0 @@
|
||||
printWidth = 80
|
||||
quoteProps = "consistent"
|
||||
semi = true
|
||||
tabWidth = 2
|
||||
trailingComma = "all"
|
||||
|
||||
# Preserve existing behavior for markdown/text wrapping.
|
||||
proseWrap = "preserve"
|
||||
140
.github/actions/codex/README.md
vendored
140
.github/actions/codex/README.md
vendored
@@ -1,140 +0,0 @@
|
||||
# openai/codex-action
|
||||
|
||||
`openai/codex-action` is a GitHub Action that facilitates the use of [Codex](https://github.com/openai/codex) on GitHub issues and pull requests. Using the action, associate **labels** to run Codex with the appropriate prompt for the given context. Codex will respond by posting comments or creating PRs, whichever you specify!
|
||||
|
||||
Here is a sample workflow that uses `openai/codex-action`:
|
||||
|
||||
```yaml
|
||||
name: Codex
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
codex:
|
||||
if: ... # optional, but can be effective in conserving CI resources
|
||||
runs-on: ubuntu-latest
|
||||
# TODO(mbolin): Need to verify if/when `write` is necessary.
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
# By default, Codex runs network disabled using --full-auto, so perform
|
||||
# any setup that requires network (such as installing dependencies)
|
||||
# before openai/codex-action.
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run Codex
|
||||
uses: openai/codex-action@latest
|
||||
with:
|
||||
openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
```
|
||||
|
||||
See sample usage in [`codex.yml`](../../workflows/codex.yml).
|
||||
|
||||
## Triggering the Action
|
||||
|
||||
Using the sample workflow above, we have:
|
||||
|
||||
```yaml
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
types: [labeled]
|
||||
```
|
||||
|
||||
which means our workflow will be triggered when any of the following events occur:
|
||||
|
||||
- a label is added to an issue
|
||||
- a label is added to a pull request against the `main` branch
|
||||
|
||||
### Label-Based Triggers
|
||||
|
||||
To define a GitHub label that should trigger Codex, create a file named `.github/codex/labels/LABEL-NAME.md` in your repository where `LABEL-NAME` is the name of the label. The content of the file is the prompt template to use when the label is added (see more on [Prompt Template Variables](#prompt-template-variables) below).
|
||||
|
||||
For example, if the file `.github/codex/labels/codex-review.md` exists, then:
|
||||
|
||||
- Adding the `codex-review` label will trigger the workflow containing the `openai/codex-action` GitHub Action.
|
||||
- When `openai/codex-action` starts, it will replace the `codex-review` label with `codex-review-in-progress`.
|
||||
- When `openai/codex-action` is finished, it will replace the `codex-review-in-progress` label with `codex-review-completed`.
|
||||
|
||||
If Codex sees that either `codex-review-in-progress` or `codex-review-completed` is already present, it will not perform the action.
|
||||
|
||||
As determined by the [default config](./src/default-label-config.ts), Codex will act on the following labels by default:
|
||||
|
||||
- Adding the `codex-review` label to a pull request will have Codex review the PR and add it to the PR as a comment.
|
||||
- Adding the `codex-triage` label to an issue will have Codex investigate the issue and report its findings as a comment.
|
||||
- Adding the `codex-issue-fix` label to an issue will have Codex attempt to fix the issue and create a PR wit the fix, if any.
|
||||
|
||||
## Action Inputs
|
||||
|
||||
The `openai/codex-action` GitHub Action takes the following inputs
|
||||
|
||||
### `openai_api_key` (required)
|
||||
|
||||
Set your `OPENAI_API_KEY` as a [repository secret](https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions). See **Secrets and varaibles** then **Actions** in the settings for your GitHub repo.
|
||||
|
||||
Note that the secret name does not have to be `OPENAI_API_KEY`. For example, you might want to name it `CODEX_OPENAI_API_KEY` and then configure it on `openai/codex-action` as follows:
|
||||
|
||||
```yaml
|
||||
openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
```
|
||||
|
||||
### `github_token` (required)
|
||||
|
||||
This is required so that Codex can post a comment or create a PR. Set this value on the action as follows:
|
||||
|
||||
```yaml
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
```
|
||||
|
||||
### `codex_args`
|
||||
|
||||
A whitespace-delimited list of arguments to pass to Codex. Defaults to `--full-auto`, but if you want to override the default model to use `o3`:
|
||||
|
||||
```yaml
|
||||
codex_args: "--full-auto --model o3"
|
||||
```
|
||||
|
||||
For more complex configurations, use the `codex_home` input.
|
||||
|
||||
### `codex_home`
|
||||
|
||||
If set, the value to use for the `$CODEX_HOME` environment variable when running Codex. As explained [in the docs](https://github.com/openai/codex/tree/main/codex-rs#readme), this folder can contain the `config.toml` to configure Codex, custom instructions, and log files.
|
||||
|
||||
This should be a relative path within your repo.
|
||||
|
||||
## Prompt Template Variables
|
||||
|
||||
As shown above, `"prompt"` and `"promptPath"` are used to define prompt templates that will be populated and passed to Codex in response to certain events. All template variables are of the form `{CODEX_ACTION_...}` and the supported values are defined below.
|
||||
|
||||
### `CODEX_ACTION_ISSUE_TITLE`
|
||||
|
||||
If the action was triggered on a GitHub issue, this is the issue title.
|
||||
|
||||
Specifically it is read as the `.issue.title` from the `$GITHUB_EVENT_PATH`.
|
||||
|
||||
### `CODEX_ACTION_ISSUE_BODY`
|
||||
|
||||
If the action was triggered on a GitHub issue, this is the issue body.
|
||||
|
||||
Specifically it is read as the `.issue.body` from the `$GITHUB_EVENT_PATH`.
|
||||
|
||||
### `CODEX_ACTION_GITHUB_EVENT_PATH`
|
||||
|
||||
The value of the `$GITHUB_EVENT_PATH` environment variable, which is the path to the file that contains the JSON payload for the event that triggered the workflow. Codex can use `jq` to read only the fields of interest from this file.
|
||||
|
||||
### `CODEX_ACTION_PR_DIFF`
|
||||
|
||||
If the action was triggered on a pull request, this is the diff between the base and head commits of the PR. It is the output from `git diff`.
|
||||
|
||||
Note that the content of the diff could be quite large, so is generally safer to point Codex at `CODEX_ACTION_GITHUB_EVENT_PATH` and let it decide how it wants to explore the change.
|
||||
127
.github/actions/codex/action.yml
vendored
127
.github/actions/codex/action.yml
vendored
@@ -1,127 +0,0 @@
|
||||
name: "Codex [reusable action]"
|
||||
description: "A reusable action that runs a Codex model."
|
||||
|
||||
inputs:
|
||||
openai_api_key:
|
||||
description: "The value to use as the OPENAI_API_KEY environment variable when running Codex."
|
||||
required: true
|
||||
trigger_phrase:
|
||||
description: "Text to trigger Codex from a PR/issue body or comment."
|
||||
required: false
|
||||
default: ""
|
||||
github_token:
|
||||
description: "Token so Codex can comment on the PR or issue."
|
||||
required: true
|
||||
codex_args:
|
||||
description: "A whitespace-delimited list of arguments to pass to Codex. Due to limitations in YAML, arguments with spaces are not supported. For more complex configurations, use the `codex_home` input."
|
||||
required: false
|
||||
default: "--config hide_agent_reasoning=true --full-auto"
|
||||
codex_home:
|
||||
description: "Value to use as the CODEX_HOME environment variable when running Codex."
|
||||
required: false
|
||||
codex_release_tag:
|
||||
description: "The release tag of the Codex model to run, e.g., 'rust-v0.3.0'. Defaults to the latest release."
|
||||
required: false
|
||||
default: ""
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
# Do this in Bash so we do not even bother to install Bun if the sender does
|
||||
# not have write access to the repo.
|
||||
- name: Verify user has write access to the repo.
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
PERMISSION=$(gh api \
|
||||
"/repos/${GITHUB_REPOSITORY}/collaborators/${{ github.event.sender.login }}/permission" \
|
||||
| jq -r '.permission')
|
||||
|
||||
if [[ "$PERMISSION" != "admin" && "$PERMISSION" != "write" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Download Codex
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Determine OS/arch and corresponding Codex artifact name.
|
||||
uname_s=$(uname -s)
|
||||
uname_m=$(uname -m)
|
||||
|
||||
case "$uname_s" in
|
||||
Linux*) os="linux" ;;
|
||||
Darwin*) os="apple-darwin" ;;
|
||||
*) echo "Unsupported operating system: $uname_s"; exit 1 ;;
|
||||
esac
|
||||
|
||||
case "$uname_m" in
|
||||
x86_64*) arch="x86_64" ;;
|
||||
arm64*|aarch64*) arch="aarch64" ;;
|
||||
*) echo "Unsupported architecture: $uname_m"; exit 1 ;;
|
||||
esac
|
||||
|
||||
# linux builds differentiate between musl and gnu.
|
||||
if [[ "$os" == "linux" ]]; then
|
||||
if [[ "$arch" == "x86_64" ]]; then
|
||||
triple="${arch}-unknown-linux-musl"
|
||||
else
|
||||
# Only other supported linux build is aarch64 gnu.
|
||||
triple="${arch}-unknown-linux-gnu"
|
||||
fi
|
||||
else
|
||||
# macOS
|
||||
triple="${arch}-apple-darwin"
|
||||
fi
|
||||
|
||||
# Note that if we start baking version numbers into the artifact name,
|
||||
# we will need to update this action.yml file to match.
|
||||
artifact="codex-exec-${triple}.tar.gz"
|
||||
|
||||
TAG_ARG="${{ inputs.codex_release_tag }}"
|
||||
# The usage is `gh release download [<tag>] [flags]`, so if TAG_ARG
|
||||
# is empty, we do not pass it so we can default to the latest release.
|
||||
gh release download ${TAG_ARG:+$TAG_ARG} --repo openai/codex \
|
||||
--pattern "$artifact" --output - \
|
||||
| tar xzO > /usr/local/bin/codex-exec
|
||||
chmod +x /usr/local/bin/codex-exec
|
||||
|
||||
# Display Codex version to confirm binary integrity; ensure we point it
|
||||
# at the checked-out repository via --cd so that any subsequent commands
|
||||
# use the correct working directory.
|
||||
codex-exec --cd "$GITHUB_WORKSPACE" --version
|
||||
|
||||
- name: Install Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: 1.2.11
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
cd ${{ github.action_path }}
|
||||
bun install --production
|
||||
|
||||
- name: Run Codex
|
||||
shell: bash
|
||||
run: bun run ${{ github.action_path }}/src/main.ts
|
||||
# Process args plus environment variables often have a max of 128 KiB,
|
||||
# so we should fit within that limit?
|
||||
env:
|
||||
INPUT_CODEX_ARGS: ${{ inputs.codex_args || '' }}
|
||||
INPUT_CODEX_HOME: ${{ inputs.codex_home || ''}}
|
||||
INPUT_TRIGGER_PHRASE: ${{ inputs.trigger_phrase || '' }}
|
||||
OPENAI_API_KEY: ${{ inputs.openai_api_key }}
|
||||
GITHUB_TOKEN: ${{ inputs.github_token }}
|
||||
GITHUB_EVENT_ACTION: ${{ github.event.action || '' }}
|
||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name || '' }}
|
||||
GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number || '' }}
|
||||
GITHUB_EVENT_ISSUE_BODY: ${{ github.event.issue.body || '' }}
|
||||
GITHUB_EVENT_REVIEW_BODY: ${{ github.event.review.body || '' }}
|
||||
GITHUB_EVENT_COMMENT_BODY: ${{ github.event.comment.body || '' }}
|
||||
89
.github/actions/codex/bun.lock
vendored
89
.github/actions/codex/bun.lock
vendored
@@ -1,89 +0,0 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "codex-action",
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.11.1",
|
||||
"@actions/github": "^6.0.1",
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/bun": "^1.2.18",
|
||||
"@types/node": "^24.0.13",
|
||||
"prettier": "^3.6.2",
|
||||
"typescript": "^5.8.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
"packages": {
|
||||
"@actions/core": ["@actions/core@1.11.1", "", { "dependencies": { "@actions/exec": "^1.1.1", "@actions/http-client": "^2.0.1" } }, "sha512-hXJCSrkwfA46Vd9Z3q4cpEpHB1rL5NG04+/rbqW9d3+CSvtB1tYe8UTpAlixa1vj0m/ULglfEK2UKxMGxCxv5A=="],
|
||||
|
||||
"@actions/exec": ["@actions/exec@1.1.1", "", { "dependencies": { "@actions/io": "^1.0.1" } }, "sha512-+sCcHHbVdk93a0XT19ECtO/gIXoxvdsgQLzb2fE2/5sIZmWQuluYyjPQtrtTHdU1YzTZ7bAPN4sITq2xi1679w=="],
|
||||
|
||||
"@actions/github": ["@actions/github@6.0.1", "", { "dependencies": { "@actions/http-client": "^2.2.0", "@octokit/core": "^5.0.1", "@octokit/plugin-paginate-rest": "^9.2.2", "@octokit/plugin-rest-endpoint-methods": "^10.4.0", "@octokit/request": "^8.4.1", "@octokit/request-error": "^5.1.1", "undici": "^5.28.5" } }, "sha512-xbZVcaqD4XnQAe35qSQqskb3SqIAfRyLBrHMd/8TuL7hJSz2QtbDwnNM8zWx4zO5l2fnGtseNE3MbEvD7BxVMw=="],
|
||||
|
||||
"@actions/http-client": ["@actions/http-client@2.2.3", "", { "dependencies": { "tunnel": "^0.0.6", "undici": "^5.25.4" } }, "sha512-mx8hyJi/hjFvbPokCg4uRd4ZX78t+YyRPtnKWwIl+RzNaVuFpQHfmlGVfsKEJN8LwTCvL+DfVgAM04XaHkm6bA=="],
|
||||
|
||||
"@actions/io": ["@actions/io@1.1.3", "", {}, "sha512-wi9JjgKLYS7U/z8PPbco+PvTb/nRWjeoFlJ1Qer83k/3C5PHQi28hiVdeE2kHXmIL99mQFawx8qt/JPjZilJ8Q=="],
|
||||
|
||||
"@fastify/busboy": ["@fastify/busboy@2.1.1", "", {}, "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA=="],
|
||||
|
||||
"@octokit/auth-token": ["@octokit/auth-token@4.0.0", "", {}, "sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA=="],
|
||||
|
||||
"@octokit/core": ["@octokit/core@5.2.1", "", { "dependencies": { "@octokit/auth-token": "^4.0.0", "@octokit/graphql": "^7.1.0", "@octokit/request": "^8.4.1", "@octokit/request-error": "^5.1.1", "@octokit/types": "^13.0.0", "before-after-hook": "^2.2.0", "universal-user-agent": "^6.0.0" } }, "sha512-dKYCMuPO1bmrpuogcjQ8z7ICCH3FP6WmxpwC03yjzGfZhj9fTJg6+bS1+UAplekbN2C+M61UNllGOOoAfGCrdQ=="],
|
||||
|
||||
"@octokit/endpoint": ["@octokit/endpoint@9.0.6", "", { "dependencies": { "@octokit/types": "^13.1.0", "universal-user-agent": "^6.0.0" } }, "sha512-H1fNTMA57HbkFESSt3Y9+FBICv+0jFceJFPWDePYlR/iMGrwM5ph+Dd4XRQs+8X+PUFURLQgX9ChPfhJ/1uNQw=="],
|
||||
|
||||
"@octokit/graphql": ["@octokit/graphql@7.1.1", "", { "dependencies": { "@octokit/request": "^8.4.1", "@octokit/types": "^13.0.0", "universal-user-agent": "^6.0.0" } }, "sha512-3mkDltSfcDUoa176nlGoA32RGjeWjl3K7F/BwHwRMJUW/IteSa4bnSV8p2ThNkcIcZU2umkZWxwETSSCJf2Q7g=="],
|
||||
|
||||
"@octokit/openapi-types": ["@octokit/openapi-types@24.2.0", "", {}, "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg=="],
|
||||
|
||||
"@octokit/plugin-paginate-rest": ["@octokit/plugin-paginate-rest@9.2.2", "", { "dependencies": { "@octokit/types": "^12.6.0" }, "peerDependencies": { "@octokit/core": "5" } }, "sha512-u3KYkGF7GcZnSD/3UP0S7K5XUFT2FkOQdcfXZGZQPGv3lm4F2Xbf71lvjldr8c1H3nNbF+33cLEkWYbokGWqiQ=="],
|
||||
|
||||
"@octokit/plugin-rest-endpoint-methods": ["@octokit/plugin-rest-endpoint-methods@10.4.1", "", { "dependencies": { "@octokit/types": "^12.6.0" }, "peerDependencies": { "@octokit/core": "5" } }, "sha512-xV1b+ceKV9KytQe3zCVqjg+8GTGfDYwaT1ATU5isiUyVtlVAO3HNdzpS4sr4GBx4hxQ46s7ITtZrAsxG22+rVg=="],
|
||||
|
||||
"@octokit/request": ["@octokit/request@8.4.1", "", { "dependencies": { "@octokit/endpoint": "^9.0.6", "@octokit/request-error": "^5.1.1", "@octokit/types": "^13.1.0", "universal-user-agent": "^6.0.0" } }, "sha512-qnB2+SY3hkCmBxZsR/MPCybNmbJe4KAlfWErXq+rBKkQJlbjdJeS85VI9r8UqeLYLvnAenU8Q1okM/0MBsAGXw=="],
|
||||
|
||||
"@octokit/request-error": ["@octokit/request-error@5.1.1", "", { "dependencies": { "@octokit/types": "^13.1.0", "deprecation": "^2.0.0", "once": "^1.4.0" } }, "sha512-v9iyEQJH6ZntoENr9/yXxjuezh4My67CBSu9r6Ve/05Iu5gNgnisNWOsoJHTP6k0Rr0+HQIpnH+kyammu90q/g=="],
|
||||
|
||||
"@octokit/types": ["@octokit/types@13.10.0", "", { "dependencies": { "@octokit/openapi-types": "^24.2.0" } }, "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA=="],
|
||||
|
||||
"@types/bun": ["@types/bun@1.2.18", "", { "dependencies": { "bun-types": "1.2.18" } }, "sha512-Xf6RaWVheyemaThV0kUfaAUvCNokFr+bH8Jxp+tTZfx7dAPA8z9ePnP9S9+Vspzuxxx9JRAXhnyccRj3GyCMdQ=="],
|
||||
|
||||
"@types/node": ["@types/node@24.0.13", "", { "dependencies": { "undici-types": "~7.8.0" } }, "sha512-Qm9OYVOFHFYg3wJoTSrz80hoec5Lia/dPp84do3X7dZvLikQvM1YpmvTBEdIr/e+U8HTkFjLHLnl78K/qjf+jQ=="],
|
||||
|
||||
"@types/react": ["@types/react@19.1.8", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g=="],
|
||||
|
||||
"before-after-hook": ["before-after-hook@2.2.3", "", {}, "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ=="],
|
||||
|
||||
"bun-types": ["bun-types@1.2.18", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-04+Eha5NP7Z0A9YgDAzMk5PHR16ZuLVa83b26kH5+cp1qZW4F6FmAURngE7INf4tKOvCE69vYvDEwoNl1tGiWw=="],
|
||||
|
||||
"csstype": ["csstype@3.1.3", "", {}, "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw=="],
|
||||
|
||||
"deprecation": ["deprecation@2.3.1", "", {}, "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ=="],
|
||||
|
||||
"once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="],
|
||||
|
||||
"prettier": ["prettier@3.6.2", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ=="],
|
||||
|
||||
"tunnel": ["tunnel@0.0.6", "", {}, "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg=="],
|
||||
|
||||
"typescript": ["typescript@5.8.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ=="],
|
||||
|
||||
"undici": ["undici@5.29.0", "", { "dependencies": { "@fastify/busboy": "^2.0.0" } }, "sha512-raqeBD6NQK4SkWhQzeYKd1KmIG6dllBOTt55Rmkt4HtI9mwdWtJljnrXjAFUBLTSN67HWrOIZ3EPF4kjUw80Bg=="],
|
||||
|
||||
"undici-types": ["undici-types@7.8.0", "", {}, "sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw=="],
|
||||
|
||||
"universal-user-agent": ["universal-user-agent@6.0.1", "", {}, "sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ=="],
|
||||
|
||||
"wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="],
|
||||
|
||||
"@octokit/plugin-paginate-rest/@octokit/types": ["@octokit/types@12.6.0", "", { "dependencies": { "@octokit/openapi-types": "^20.0.0" } }, "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw=="],
|
||||
|
||||
"@octokit/plugin-rest-endpoint-methods/@octokit/types": ["@octokit/types@12.6.0", "", { "dependencies": { "@octokit/openapi-types": "^20.0.0" } }, "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw=="],
|
||||
|
||||
"@octokit/plugin-paginate-rest/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@20.0.0", "", {}, "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA=="],
|
||||
|
||||
"@octokit/plugin-rest-endpoint-methods/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@20.0.0", "", {}, "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA=="],
|
||||
}
|
||||
}
|
||||
21
.github/actions/codex/package.json
vendored
21
.github/actions/codex/package.json
vendored
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"name": "codex-action",
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"format": "prettier --check src",
|
||||
"format:fix": "prettier --write src",
|
||||
"test": "bun test",
|
||||
"typecheck": "tsc"
|
||||
},
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.11.1",
|
||||
"@actions/github": "^6.0.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/bun": "^1.2.18",
|
||||
"@types/node": "^24.0.13",
|
||||
"prettier": "^3.6.2",
|
||||
"typescript": "^5.8.3"
|
||||
}
|
||||
}
|
||||
85
.github/actions/codex/src/add-reaction.ts
vendored
85
.github/actions/codex/src/add-reaction.ts
vendored
@@ -1,85 +0,0 @@
|
||||
import * as github from "@actions/github";
|
||||
import type { EnvContext } from "./env-context";
|
||||
|
||||
/**
|
||||
* Add an "eyes" reaction to the entity (issue, issue comment, or pull request
|
||||
* review comment) that triggered the current Codex invocation.
|
||||
*
|
||||
* The purpose is to provide immediate feedback to the user – similar to the
|
||||
* *-in-progress label flow – indicating that the bot has acknowledged the
|
||||
* request and is working on it.
|
||||
*
|
||||
* We attempt to add the reaction best suited for the current GitHub event:
|
||||
*
|
||||
* • issues → POST /repos/{owner}/{repo}/issues/{issue_number}/reactions
|
||||
* • issue_comment → POST /repos/{owner}/{repo}/issues/comments/{comment_id}/reactions
|
||||
* • pull_request_review_comment → POST /repos/{owner}/{repo}/pulls/comments/{comment_id}/reactions
|
||||
*
|
||||
* If the specific target is unavailable (e.g. unexpected payload shape) we
|
||||
* silently skip instead of failing the whole action because the reaction is
|
||||
* merely cosmetic.
|
||||
*/
|
||||
export async function addEyesReaction(ctx: EnvContext): Promise<void> {
|
||||
const octokit = ctx.getOctokit();
|
||||
const { owner, repo } = github.context.repo;
|
||||
const eventName = github.context.eventName;
|
||||
|
||||
try {
|
||||
switch (eventName) {
|
||||
case "issue_comment": {
|
||||
const commentId = (github.context.payload as any)?.comment?.id;
|
||||
if (commentId) {
|
||||
await octokit.rest.reactions.createForIssueComment({
|
||||
owner,
|
||||
repo,
|
||||
comment_id: commentId,
|
||||
content: "eyes",
|
||||
});
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "pull_request_review_comment": {
|
||||
const commentId = (github.context.payload as any)?.comment?.id;
|
||||
if (commentId) {
|
||||
await octokit.rest.reactions.createForPullRequestReviewComment({
|
||||
owner,
|
||||
repo,
|
||||
comment_id: commentId,
|
||||
content: "eyes",
|
||||
});
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "issues": {
|
||||
const issueNumber = github.context.issue.number;
|
||||
if (issueNumber) {
|
||||
await octokit.rest.reactions.createForIssue({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issueNumber,
|
||||
content: "eyes",
|
||||
});
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
// Fallback: try to react to the issue/PR if we have a number.
|
||||
const issueNumber = github.context.issue.number;
|
||||
if (issueNumber) {
|
||||
await octokit.rest.reactions.createForIssue({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issueNumber,
|
||||
content: "eyes",
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Do not fail the action if reaction creation fails – log and continue.
|
||||
console.warn(`Failed to add \"eyes\" reaction: ${error}`);
|
||||
}
|
||||
}
|
||||
53
.github/actions/codex/src/comment.ts
vendored
53
.github/actions/codex/src/comment.ts
vendored
@@ -1,53 +0,0 @@
|
||||
import type { EnvContext } from "./env-context";
|
||||
import { runCodex } from "./run-codex";
|
||||
import { postComment } from "./post-comment";
|
||||
import { addEyesReaction } from "./add-reaction";
|
||||
|
||||
/**
|
||||
* Handle `issue_comment` and `pull_request_review_comment` events once we know
|
||||
* the action is supported.
|
||||
*/
|
||||
export async function onComment(ctx: EnvContext): Promise<void> {
|
||||
const triggerPhrase = ctx.tryGet("INPUT_TRIGGER_PHRASE");
|
||||
if (!triggerPhrase) {
|
||||
console.warn("Empty trigger phrase: skipping.");
|
||||
return;
|
||||
}
|
||||
|
||||
// Attempt to get the body of the comment from the environment. Depending on
|
||||
// the event type either `GITHUB_EVENT_COMMENT_BODY` (issue & PR comments) or
|
||||
// `GITHUB_EVENT_REVIEW_BODY` (PR reviews) is set.
|
||||
const commentBody =
|
||||
ctx.tryGetNonEmpty("GITHUB_EVENT_COMMENT_BODY") ??
|
||||
ctx.tryGetNonEmpty("GITHUB_EVENT_REVIEW_BODY") ??
|
||||
ctx.tryGetNonEmpty("GITHUB_EVENT_ISSUE_BODY");
|
||||
|
||||
if (!commentBody) {
|
||||
console.warn("Comment body not found in environment: skipping.");
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if the trigger phrase is present.
|
||||
if (!commentBody.includes(triggerPhrase)) {
|
||||
console.log(
|
||||
`Trigger phrase '${triggerPhrase}' not found: nothing to do for this comment.`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Derive the prompt by removing the trigger phrase. Remove only the first
|
||||
// occurrence to keep any additional occurrences that might be meaningful.
|
||||
const prompt = commentBody.replace(triggerPhrase, "").trim();
|
||||
|
||||
if (prompt.length === 0) {
|
||||
console.warn("Prompt is empty after removing trigger phrase: skipping");
|
||||
return;
|
||||
}
|
||||
|
||||
// Provide immediate feedback that we are working on the request.
|
||||
await addEyesReaction(ctx);
|
||||
|
||||
// Run Codex and post the response as a new comment.
|
||||
const lastMessage = await runCodex(prompt, ctx);
|
||||
await postComment(lastMessage, ctx);
|
||||
}
|
||||
11
.github/actions/codex/src/config.ts
vendored
11
.github/actions/codex/src/config.ts
vendored
@@ -1,11 +0,0 @@
|
||||
import { readdirSync, statSync } from "fs";
|
||||
import * as path from "path";
|
||||
|
||||
export interface Config {
|
||||
labels: Record<string, LabelConfig>;
|
||||
}
|
||||
|
||||
export interface LabelConfig {
|
||||
/** Returns the prompt template. */
|
||||
getPromptTemplate(): string;
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
import type { Config } from "./config";
|
||||
|
||||
export function getDefaultConfig(): Config {
|
||||
return {
|
||||
labels: {
|
||||
"codex-investigate-issue": {
|
||||
getPromptTemplate: () =>
|
||||
`
|
||||
Troubleshoot whether the reported issue is valid.
|
||||
|
||||
Provide a concise and respectful comment summarizing the findings.
|
||||
|
||||
### {CODEX_ACTION_ISSUE_TITLE}
|
||||
|
||||
{CODEX_ACTION_ISSUE_BODY}
|
||||
`.trim(),
|
||||
},
|
||||
"codex-code-review": {
|
||||
getPromptTemplate: () =>
|
||||
`
|
||||
Review this PR and respond with a very concise final message, formatted in Markdown.
|
||||
|
||||
There should be a summary of the changes (1-2 sentences) and a few bullet points if necessary.
|
||||
|
||||
Then provide the **review** (1-2 sentences plus bullet points, friendly tone).
|
||||
|
||||
{CODEX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the \`base\` and \`head\` refs that define this PR. Both refs are available locally.
|
||||
`.trim(),
|
||||
},
|
||||
"codex-attempt-fix": {
|
||||
getPromptTemplate: () =>
|
||||
`
|
||||
Attempt to solve the reported issue.
|
||||
|
||||
If a code change is required, create a new branch, commit the fix, and open a pull-request that resolves the problem.
|
||||
|
||||
### {CODEX_ACTION_ISSUE_TITLE}
|
||||
|
||||
{CODEX_ACTION_ISSUE_BODY}
|
||||
`.trim(),
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
116
.github/actions/codex/src/env-context.ts
vendored
116
.github/actions/codex/src/env-context.ts
vendored
@@ -1,116 +0,0 @@
|
||||
/*
|
||||
* Centralised access to environment variables used by the Codex GitHub
|
||||
* Action.
|
||||
*
|
||||
* To enable proper unit-testing we avoid reading from `process.env` at module
|
||||
* initialisation time. Instead a `EnvContext` object is created (usually from
|
||||
* the real `process.env`) and passed around explicitly or – where that is not
|
||||
* yet practical – imported as the shared `defaultContext` singleton. Tests can
|
||||
* create their own context backed by a stubbed map of variables without having
|
||||
* to mutate global state.
|
||||
*/
|
||||
|
||||
import { fail } from "./fail";
|
||||
import * as github from "@actions/github";
|
||||
|
||||
export interface EnvContext {
|
||||
/**
|
||||
* Return the value for a given environment variable or terminate the action
|
||||
* via `fail` if it is missing / empty.
|
||||
*/
|
||||
get(name: string): string;
|
||||
|
||||
/**
|
||||
* Attempt to read an environment variable. Returns the value when present;
|
||||
* otherwise returns undefined (does not call `fail`).
|
||||
*/
|
||||
tryGet(name: string): string | undefined;
|
||||
|
||||
/**
|
||||
* Attempt to read an environment variable. Returns non-empty string value or
|
||||
* null if unset or empty string.
|
||||
*/
|
||||
tryGetNonEmpty(name: string): string | null;
|
||||
|
||||
/**
|
||||
* Return a memoised Octokit instance authenticated via the token resolved
|
||||
* from the provided argument (when defined) or the environment variables
|
||||
* `GITHUB_TOKEN`/`GH_TOKEN`.
|
||||
*
|
||||
* Subsequent calls return the same cached instance to avoid spawning
|
||||
* multiple REST clients within a single action run.
|
||||
*/
|
||||
getOctokit(token?: string): ReturnType<typeof github.getOctokit>;
|
||||
}
|
||||
|
||||
/** Internal helper – *not* exported. */
|
||||
function _getRequiredEnv(
|
||||
name: string,
|
||||
env: Record<string, string | undefined>,
|
||||
): string | undefined {
|
||||
const value = env[name];
|
||||
|
||||
// Avoid leaking secrets into logs while still logging non-secret variables.
|
||||
if (name.endsWith("KEY") || name.endsWith("TOKEN")) {
|
||||
if (value) {
|
||||
console.log(`value for ${name} was found`);
|
||||
}
|
||||
} else {
|
||||
console.log(`${name}=${value}`);
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
/** Create a context backed by the supplied environment map (defaults to `process.env`). */
|
||||
export function createEnvContext(
|
||||
env: Record<string, string | undefined> = process.env,
|
||||
): EnvContext {
|
||||
// Lazily instantiated Octokit client – shared across this context.
|
||||
let cachedOctokit: ReturnType<typeof github.getOctokit> | null = null;
|
||||
|
||||
return {
|
||||
get(name: string): string {
|
||||
const value = _getRequiredEnv(name, env);
|
||||
if (value == null) {
|
||||
fail(`Missing required environment variable: ${name}`);
|
||||
}
|
||||
return value;
|
||||
},
|
||||
|
||||
tryGet(name: string): string | undefined {
|
||||
return _getRequiredEnv(name, env);
|
||||
},
|
||||
|
||||
tryGetNonEmpty(name: string): string | null {
|
||||
const value = _getRequiredEnv(name, env);
|
||||
return value == null || value === "" ? null : value;
|
||||
},
|
||||
|
||||
getOctokit(token?: string) {
|
||||
if (cachedOctokit) {
|
||||
return cachedOctokit;
|
||||
}
|
||||
|
||||
// Determine the token to authenticate with.
|
||||
const githubToken = token ?? env["GITHUB_TOKEN"] ?? env["GH_TOKEN"];
|
||||
|
||||
if (!githubToken) {
|
||||
fail(
|
||||
"Unable to locate a GitHub token. `github_token` should have been set on the action.",
|
||||
);
|
||||
}
|
||||
|
||||
cachedOctokit = github.getOctokit(githubToken!);
|
||||
return cachedOctokit;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Shared context built from the actual `process.env`. Production code that is
|
||||
* not yet refactored to receive a context explicitly may import and use this
|
||||
* singleton. Tests should avoid the singleton and instead pass their own
|
||||
* context to the functions they exercise.
|
||||
*/
|
||||
export const defaultContext: EnvContext = createEnvContext();
|
||||
4
.github/actions/codex/src/fail.ts
vendored
4
.github/actions/codex/src/fail.ts
vendored
@@ -1,4 +0,0 @@
|
||||
export function fail(message: string): never {
|
||||
console.error(message);
|
||||
process.exit(1);
|
||||
}
|
||||
149
.github/actions/codex/src/git-helpers.ts
vendored
149
.github/actions/codex/src/git-helpers.ts
vendored
@@ -1,149 +0,0 @@
|
||||
import { spawnSync } from "child_process";
|
||||
import * as github from "@actions/github";
|
||||
import { EnvContext } from "./env-context";
|
||||
|
||||
function runGit(args: string[], silent = true): string {
|
||||
console.info(`Running git ${args.join(" ")}`);
|
||||
const res = spawnSync("git", args, {
|
||||
encoding: "utf8",
|
||||
stdio: silent ? ["ignore", "pipe", "pipe"] : "inherit",
|
||||
});
|
||||
if (res.error) {
|
||||
throw res.error;
|
||||
}
|
||||
if (res.status !== 0) {
|
||||
// Return stderr so caller may handle; else throw.
|
||||
throw new Error(
|
||||
`git ${args.join(" ")} failed with code ${res.status}: ${res.stderr}`,
|
||||
);
|
||||
}
|
||||
return res.stdout.trim();
|
||||
}
|
||||
|
||||
function stageAllChanges() {
|
||||
runGit(["add", "-A"]);
|
||||
}
|
||||
|
||||
function hasStagedChanges(): boolean {
|
||||
const res = spawnSync("git", ["diff", "--cached", "--quiet", "--exit-code"]);
|
||||
return res.status !== 0;
|
||||
}
|
||||
|
||||
function ensureOnBranch(
|
||||
issueNumber: number,
|
||||
protectedBranches: string[],
|
||||
suggestedSlug?: string,
|
||||
): string {
|
||||
let branch = "";
|
||||
try {
|
||||
branch = runGit(["symbolic-ref", "--short", "-q", "HEAD"]);
|
||||
} catch {
|
||||
branch = "";
|
||||
}
|
||||
|
||||
// If detached HEAD or on a protected branch, create a new branch.
|
||||
if (!branch || protectedBranches.includes(branch)) {
|
||||
if (suggestedSlug) {
|
||||
const safeSlug = suggestedSlug
|
||||
.toLowerCase()
|
||||
.replace(/[^\w\s-]/g, "")
|
||||
.trim()
|
||||
.replace(/\s+/g, "-");
|
||||
branch = `codex-fix-${issueNumber}-${safeSlug}`;
|
||||
} else {
|
||||
branch = `codex-fix-${issueNumber}-${Date.now()}`;
|
||||
}
|
||||
runGit(["switch", "-c", branch]);
|
||||
}
|
||||
return branch;
|
||||
}
|
||||
|
||||
function commitIfNeeded(issueNumber: number) {
|
||||
if (hasStagedChanges()) {
|
||||
runGit([
|
||||
"commit",
|
||||
"-m",
|
||||
`fix: automated fix for #${issueNumber} via Codex`,
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
function pushBranch(branch: string, githubToken: string, ctx: EnvContext) {
|
||||
const repoSlug = ctx.get("GITHUB_REPOSITORY"); // owner/repo
|
||||
const remoteUrl = `https://x-access-token:${githubToken}@github.com/${repoSlug}.git`;
|
||||
|
||||
runGit(["push", "--force-with-lease", "-u", remoteUrl, `HEAD:${branch}`]);
|
||||
}
|
||||
|
||||
/**
|
||||
* If this returns a string, it is the URL of the created PR.
|
||||
*/
|
||||
export async function maybePublishPRForIssue(
|
||||
issueNumber: number,
|
||||
lastMessage: string,
|
||||
ctx: EnvContext,
|
||||
): Promise<string | undefined> {
|
||||
// Only proceed if GITHUB_TOKEN available.
|
||||
const githubToken =
|
||||
ctx.tryGetNonEmpty("GITHUB_TOKEN") ?? ctx.tryGetNonEmpty("GH_TOKEN");
|
||||
if (!githubToken) {
|
||||
console.warn("No GitHub token - skipping PR creation.");
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// Print `git status` for debugging.
|
||||
runGit(["status"]);
|
||||
|
||||
// Stage any remaining changes so they can be committed and pushed.
|
||||
stageAllChanges();
|
||||
|
||||
const octokit = ctx.getOctokit(githubToken);
|
||||
|
||||
const { owner, repo } = github.context.repo;
|
||||
|
||||
// Determine default branch to treat as protected.
|
||||
let defaultBranch = "main";
|
||||
try {
|
||||
const repoInfo = await octokit.rest.repos.get({ owner, repo });
|
||||
defaultBranch = repoInfo.data.default_branch ?? "main";
|
||||
} catch (e) {
|
||||
console.warn(`Failed to get default branch, assuming 'main': ${e}`);
|
||||
}
|
||||
|
||||
const sanitizedMessage = lastMessage.replace(/\u2022/g, "-");
|
||||
const [summaryLine] = sanitizedMessage.split(/\r?\n/);
|
||||
const branch = ensureOnBranch(issueNumber, [defaultBranch, "master"], summaryLine);
|
||||
commitIfNeeded(issueNumber);
|
||||
pushBranch(branch, githubToken, ctx);
|
||||
|
||||
// Try to find existing PR for this branch
|
||||
const headParam = `${owner}:${branch}`;
|
||||
const existing = await octokit.rest.pulls.list({
|
||||
owner,
|
||||
repo,
|
||||
head: headParam,
|
||||
state: "open",
|
||||
});
|
||||
if (existing.data.length > 0) {
|
||||
return existing.data[0].html_url;
|
||||
}
|
||||
|
||||
// Determine base branch (default to main)
|
||||
let baseBranch = "main";
|
||||
try {
|
||||
const repoInfo = await octokit.rest.repos.get({ owner, repo });
|
||||
baseBranch = repoInfo.data.default_branch ?? "main";
|
||||
} catch (e) {
|
||||
console.warn(`Failed to get default branch, assuming 'main': ${e}`);
|
||||
}
|
||||
|
||||
const pr = await octokit.rest.pulls.create({
|
||||
owner,
|
||||
repo,
|
||||
title: summaryLine,
|
||||
head: branch,
|
||||
base: baseBranch,
|
||||
body: sanitizedMessage,
|
||||
});
|
||||
return pr.data.html_url;
|
||||
}
|
||||
16
.github/actions/codex/src/git-user.ts
vendored
16
.github/actions/codex/src/git-user.ts
vendored
@@ -1,16 +0,0 @@
|
||||
export function setGitHubActionsUser(): void {
|
||||
const commands = [
|
||||
["git", "config", "--global", "user.name", "github-actions[bot]"],
|
||||
[
|
||||
"git",
|
||||
"config",
|
||||
"--global",
|
||||
"user.email",
|
||||
"41898282+github-actions[bot]@users.noreply.github.com",
|
||||
],
|
||||
];
|
||||
|
||||
for (const command of commands) {
|
||||
Bun.spawnSync(command);
|
||||
}
|
||||
}
|
||||
11
.github/actions/codex/src/github-workspace.ts
vendored
11
.github/actions/codex/src/github-workspace.ts
vendored
@@ -1,11 +0,0 @@
|
||||
import * as pathMod from "path";
|
||||
import { EnvContext } from "./env-context";
|
||||
|
||||
export function resolveWorkspacePath(path: string, ctx: EnvContext): string {
|
||||
if (pathMod.isAbsolute(path)) {
|
||||
return path;
|
||||
} else {
|
||||
const workspace = ctx.get("GITHUB_WORKSPACE");
|
||||
return pathMod.join(workspace, path);
|
||||
}
|
||||
}
|
||||
56
.github/actions/codex/src/load-config.ts
vendored
56
.github/actions/codex/src/load-config.ts
vendored
@@ -1,56 +0,0 @@
|
||||
import type { Config, LabelConfig } from "./config";
|
||||
|
||||
import { getDefaultConfig } from "./default-label-config";
|
||||
import { readFileSync, readdirSync, statSync } from "fs";
|
||||
import * as path from "path";
|
||||
|
||||
/**
|
||||
* Build an in-memory configuration object by scanning the repository for
|
||||
* Markdown templates located in `.github/codex/labels`.
|
||||
*
|
||||
* Each `*.md` file in that directory represents a label that can trigger the
|
||||
* Codex GitHub Action. The filename **without** the extension is interpreted
|
||||
* as the label name, e.g. `codex-review.md` ➜ `codex-review`.
|
||||
*
|
||||
* For every such label we derive the corresponding `doneLabel` by appending
|
||||
* the suffix `-completed`.
|
||||
*/
|
||||
export function loadConfig(workspace: string): Config {
|
||||
const labelsDir = path.join(workspace, ".github", "codex", "labels");
|
||||
|
||||
let entries: string[];
|
||||
try {
|
||||
entries = readdirSync(labelsDir);
|
||||
} catch {
|
||||
// If the directory is missing, return the default configuration.
|
||||
return getDefaultConfig();
|
||||
}
|
||||
|
||||
const labels: Record<string, LabelConfig> = {};
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.endsWith(".md")) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const fullPath = path.join(labelsDir, entry);
|
||||
|
||||
if (!statSync(fullPath).isFile()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const labelName = entry.slice(0, -3); // trim ".md"
|
||||
|
||||
labels[labelName] = new FileLabelConfig(fullPath);
|
||||
}
|
||||
|
||||
return { labels };
|
||||
}
|
||||
|
||||
class FileLabelConfig implements LabelConfig {
|
||||
constructor(private readonly promptPath: string) {}
|
||||
|
||||
getPromptTemplate(): string {
|
||||
return readFileSync(this.promptPath, "utf8");
|
||||
}
|
||||
}
|
||||
80
.github/actions/codex/src/main.ts
vendored
80
.github/actions/codex/src/main.ts
vendored
@@ -1,80 +0,0 @@
|
||||
#!/usr/bin/env bun
|
||||
|
||||
import type { Config } from "./config";
|
||||
|
||||
import { defaultContext, EnvContext } from "./env-context";
|
||||
import { loadConfig } from "./load-config";
|
||||
import { setGitHubActionsUser } from "./git-user";
|
||||
import { onLabeled } from "./process-label";
|
||||
import { ensureBaseAndHeadCommitsForPRAreAvailable } from "./prompt-template";
|
||||
import { performAdditionalValidation } from "./verify-inputs";
|
||||
import { onComment } from "./comment";
|
||||
import { onReview } from "./review";
|
||||
|
||||
async function main(): Promise<void> {
|
||||
const ctx: EnvContext = defaultContext;
|
||||
|
||||
// Build the configuration dynamically by scanning `.github/codex/labels`.
|
||||
const GITHUB_WORKSPACE = ctx.get("GITHUB_WORKSPACE");
|
||||
const config: Config = loadConfig(GITHUB_WORKSPACE);
|
||||
|
||||
// Optionally perform additional validation of prompt template files.
|
||||
performAdditionalValidation(config, GITHUB_WORKSPACE);
|
||||
|
||||
const GITHUB_EVENT_NAME = ctx.get("GITHUB_EVENT_NAME");
|
||||
const GITHUB_EVENT_ACTION = ctx.get("GITHUB_EVENT_ACTION");
|
||||
|
||||
// Set user.name and user.email to a bot before Codex runs, just in case it
|
||||
// creates a commit.
|
||||
setGitHubActionsUser();
|
||||
|
||||
switch (GITHUB_EVENT_NAME) {
|
||||
case "issues": {
|
||||
if (GITHUB_EVENT_ACTION === "labeled") {
|
||||
await onLabeled(config, ctx);
|
||||
return;
|
||||
} else if (GITHUB_EVENT_ACTION === "opened") {
|
||||
await onComment(ctx);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "issue_comment": {
|
||||
if (GITHUB_EVENT_ACTION === "created") {
|
||||
await onComment(ctx);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "pull_request": {
|
||||
if (GITHUB_EVENT_ACTION === "labeled") {
|
||||
await ensureBaseAndHeadCommitsForPRAreAvailable(ctx);
|
||||
await onLabeled(config, ctx);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "pull_request_review": {
|
||||
await ensureBaseAndHeadCommitsForPRAreAvailable(ctx);
|
||||
if (GITHUB_EVENT_ACTION === "submitted") {
|
||||
await onReview(ctx);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "pull_request_review_comment": {
|
||||
await ensureBaseAndHeadCommitsForPRAreAvailable(ctx);
|
||||
if (GITHUB_EVENT_ACTION === "created") {
|
||||
await onComment(ctx);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
console.warn(
|
||||
`Unsupported action '${GITHUB_EVENT_ACTION}' for event '${GITHUB_EVENT_NAME}'.`,
|
||||
);
|
||||
}
|
||||
|
||||
main();
|
||||
62
.github/actions/codex/src/post-comment.ts
vendored
62
.github/actions/codex/src/post-comment.ts
vendored
@@ -1,62 +0,0 @@
|
||||
import { fail } from "./fail";
|
||||
import * as github from "@actions/github";
|
||||
import { EnvContext } from "./env-context";
|
||||
|
||||
/**
|
||||
* Post a comment to the issue / pull request currently in scope.
|
||||
*
|
||||
* Provide the environment context so that token lookup (inside getOctokit) does
|
||||
* not rely on global state.
|
||||
*/
|
||||
export async function postComment(
|
||||
commentBody: string,
|
||||
ctx: EnvContext,
|
||||
): Promise<void> {
|
||||
// Append a footer with a link back to the workflow run, if available.
|
||||
const footer = buildWorkflowRunFooter(ctx);
|
||||
const bodyWithFooter = footer ? `${commentBody}${footer}` : commentBody;
|
||||
|
||||
const octokit = ctx.getOctokit();
|
||||
console.info("Got Octokit instance for posting comment");
|
||||
const { owner, repo } = github.context.repo;
|
||||
const issueNumber = github.context.issue.number;
|
||||
|
||||
if (!issueNumber) {
|
||||
console.warn(
|
||||
"No issue or pull_request number found in GitHub context; skipping comment creation.",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
console.info("Calling octokit.rest.issues.createComment()");
|
||||
await octokit.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issueNumber,
|
||||
body: bodyWithFooter,
|
||||
});
|
||||
} catch (error) {
|
||||
fail(`Failed to create comment via GitHub API: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to build a Markdown fragment linking back to the workflow run that
|
||||
* generated the current comment. Returns `undefined` if required environment
|
||||
* variables are missing – e.g. when running outside of GitHub Actions – so we
|
||||
* can gracefully skip the footer in those cases.
|
||||
*/
|
||||
function buildWorkflowRunFooter(ctx: EnvContext): string | undefined {
|
||||
const serverUrl =
|
||||
ctx.tryGetNonEmpty("GITHUB_SERVER_URL") ?? "https://github.com";
|
||||
const repository = ctx.tryGetNonEmpty("GITHUB_REPOSITORY");
|
||||
const runId = ctx.tryGetNonEmpty("GITHUB_RUN_ID");
|
||||
|
||||
if (!repository || !runId) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const url = `${serverUrl}/${repository}/actions/runs/${runId}`;
|
||||
return `\n\n---\n*[_View workflow run_](${url})*`;
|
||||
}
|
||||
195
.github/actions/codex/src/process-label.ts
vendored
195
.github/actions/codex/src/process-label.ts
vendored
@@ -1,195 +0,0 @@
|
||||
import { fail } from "./fail";
|
||||
import { EnvContext } from "./env-context";
|
||||
import { renderPromptTemplate } from "./prompt-template";
|
||||
|
||||
import { postComment } from "./post-comment";
|
||||
import { runCodex } from "./run-codex";
|
||||
|
||||
import * as github from "@actions/github";
|
||||
import { Config, LabelConfig } from "./config";
|
||||
import { maybePublishPRForIssue } from "./git-helpers";
|
||||
|
||||
export async function onLabeled(
|
||||
config: Config,
|
||||
ctx: EnvContext,
|
||||
): Promise<void> {
|
||||
const GITHUB_EVENT_LABEL_NAME = ctx.get("GITHUB_EVENT_LABEL_NAME");
|
||||
const labelConfig = config.labels[GITHUB_EVENT_LABEL_NAME] as
|
||||
| LabelConfig
|
||||
| undefined;
|
||||
if (!labelConfig) {
|
||||
fail(
|
||||
`Label \`${GITHUB_EVENT_LABEL_NAME}\` not found in config: ${JSON.stringify(config)}`,
|
||||
);
|
||||
}
|
||||
|
||||
await processLabelConfig(ctx, GITHUB_EVENT_LABEL_NAME, labelConfig);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper that handles `-in-progress` and `-completed` semantics around the core lint/fix/review
|
||||
* processing. It will:
|
||||
*
|
||||
* - Skip execution if the `-in-progress` or `-completed` label is already present.
|
||||
* - Mark the PR/issue as `-in-progress`.
|
||||
* - After successful execution, mark the PR/issue as `-completed`.
|
||||
*/
|
||||
async function processLabelConfig(
|
||||
ctx: EnvContext,
|
||||
label: string,
|
||||
labelConfig: LabelConfig,
|
||||
): Promise<void> {
|
||||
const octokit = ctx.getOctokit();
|
||||
const { owner, repo, issueNumber, labelNames } =
|
||||
await getCurrentLabels(octokit);
|
||||
|
||||
const inProgressLabel = `${label}-in-progress`;
|
||||
const completedLabel = `${label}-completed`;
|
||||
for (const markerLabel of [inProgressLabel, completedLabel]) {
|
||||
if (labelNames.includes(markerLabel)) {
|
||||
console.log(
|
||||
`Label '${markerLabel}' already present on issue/PR #${issueNumber}. Skipping Codex action.`,
|
||||
);
|
||||
|
||||
// Clean up: remove the triggering label to avoid confusion and re-runs.
|
||||
await addAndRemoveLabels(octokit, {
|
||||
owner,
|
||||
repo,
|
||||
issueNumber,
|
||||
remove: markerLabel,
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the PR/issue as in progress.
|
||||
await addAndRemoveLabels(octokit, {
|
||||
owner,
|
||||
repo,
|
||||
issueNumber,
|
||||
add: inProgressLabel,
|
||||
remove: label,
|
||||
});
|
||||
|
||||
// Run the core Codex processing.
|
||||
await processLabel(ctx, label, labelConfig);
|
||||
|
||||
// Mark the PR/issue as completed.
|
||||
await addAndRemoveLabels(octokit, {
|
||||
owner,
|
||||
repo,
|
||||
issueNumber,
|
||||
add: completedLabel,
|
||||
remove: inProgressLabel,
|
||||
});
|
||||
}
|
||||
|
||||
async function processLabel(
|
||||
ctx: EnvContext,
|
||||
label: string,
|
||||
labelConfig: LabelConfig,
|
||||
): Promise<void> {
|
||||
const template = labelConfig.getPromptTemplate();
|
||||
const populatedTemplate = await renderPromptTemplate(template, ctx);
|
||||
|
||||
// Always run Codex and post the resulting message as a comment.
|
||||
let commentBody = await runCodex(populatedTemplate, ctx);
|
||||
|
||||
// Current heuristic: only try to create a PR if "attempt" or "fix" is in the
|
||||
// label name. (Yes, we plan to evolve this.)
|
||||
if (label.indexOf("fix") !== -1 || label.indexOf("attempt") !== -1) {
|
||||
console.info(`label ${label} indicates we should attempt to create a PR`);
|
||||
const prUrl = await maybeFixIssue(ctx, commentBody);
|
||||
if (prUrl) {
|
||||
commentBody += `\n\n---\nOpened pull request: ${prUrl}`;
|
||||
}
|
||||
} else {
|
||||
console.info(
|
||||
`label ${label} does not indicate we should attempt to create a PR`,
|
||||
);
|
||||
}
|
||||
|
||||
await postComment(commentBody, ctx);
|
||||
}
|
||||
|
||||
async function maybeFixIssue(
|
||||
ctx: EnvContext,
|
||||
lastMessage: string,
|
||||
): Promise<string | undefined> {
|
||||
// Attempt to create a PR out of any changes Codex produced.
|
||||
const issueNumber = github.context.issue.number!; // exists for issues triggering this path
|
||||
try {
|
||||
return await maybePublishPRForIssue(issueNumber, lastMessage, ctx);
|
||||
} catch (e) {
|
||||
console.warn(`Failed to publish PR: ${e}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function getCurrentLabels(
|
||||
octokit: ReturnType<typeof github.getOctokit>,
|
||||
): Promise<{
|
||||
owner: string;
|
||||
repo: string;
|
||||
issueNumber: number;
|
||||
labelNames: Array<string>;
|
||||
}> {
|
||||
const { owner, repo } = github.context.repo;
|
||||
const issueNumber = github.context.issue.number;
|
||||
|
||||
if (!issueNumber) {
|
||||
fail("No issue or pull_request number found in GitHub context.");
|
||||
}
|
||||
|
||||
const { data: issueData } = await octokit.rest.issues.get({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issueNumber,
|
||||
});
|
||||
|
||||
const labelNames =
|
||||
issueData.labels?.map((label: any) =>
|
||||
typeof label === "string" ? label : label.name,
|
||||
) ?? [];
|
||||
|
||||
return { owner, repo, issueNumber, labelNames };
|
||||
}
|
||||
|
||||
async function addAndRemoveLabels(
|
||||
octokit: ReturnType<typeof github.getOctokit>,
|
||||
opts: {
|
||||
owner: string;
|
||||
repo: string;
|
||||
issueNumber: number;
|
||||
add?: string;
|
||||
remove?: string;
|
||||
},
|
||||
): Promise<void> {
|
||||
const { owner, repo, issueNumber, add, remove } = opts;
|
||||
|
||||
if (add) {
|
||||
try {
|
||||
await octokit.rest.issues.addLabels({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issueNumber,
|
||||
labels: [add],
|
||||
});
|
||||
} catch (error) {
|
||||
console.warn(`Failed to add label '${add}': ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (remove) {
|
||||
try {
|
||||
await octokit.rest.issues.removeLabel({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issueNumber,
|
||||
name: remove,
|
||||
});
|
||||
} catch (error) {
|
||||
console.warn(`Failed to remove label '${remove}': ${error}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
284
.github/actions/codex/src/prompt-template.ts
vendored
284
.github/actions/codex/src/prompt-template.ts
vendored
@@ -1,284 +0,0 @@
|
||||
/*
|
||||
* Utilities to render Codex prompt templates.
|
||||
*
|
||||
* A template is a Markdown (or plain-text) file that may contain one or more
|
||||
* placeholders of the form `{CODEX_ACTION_<NAME>}`. At runtime these
|
||||
* placeholders are substituted with dynamically generated content. Each
|
||||
* placeholder is resolved **exactly once** even if it appears multiple times
|
||||
* in the same template.
|
||||
*/
|
||||
|
||||
import { readFile } from "fs/promises";
|
||||
|
||||
import { EnvContext } from "./env-context";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Lazily caches parsed `$GITHUB_EVENT_PATH` contents keyed by the file path so
|
||||
* we only hit the filesystem once per unique event payload.
|
||||
*/
|
||||
const githubEventDataCache: Map<string, Promise<any>> = new Map();
|
||||
|
||||
function getGitHubEventData(ctx: EnvContext): Promise<any> {
|
||||
const eventPath = ctx.get("GITHUB_EVENT_PATH");
|
||||
let cached = githubEventDataCache.get(eventPath);
|
||||
if (!cached) {
|
||||
cached = readFile(eventPath, "utf8").then((raw) => JSON.parse(raw));
|
||||
githubEventDataCache.set(eventPath, cached);
|
||||
}
|
||||
return cached;
|
||||
}
|
||||
|
||||
async function runCommand(args: Array<string>): Promise<string> {
|
||||
const result = Bun.spawnSync(args, {
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
if (result.success) {
|
||||
return result.stdout.toString();
|
||||
}
|
||||
|
||||
console.error(`Error running ${JSON.stringify(args)}: ${result.stderr}`);
|
||||
return "";
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public API
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Regex that captures the variable name without the surrounding { } braces.
|
||||
const VAR_REGEX = /\{(CODEX_ACTION_[A-Z0-9_]+)\}/g;
|
||||
|
||||
// Cache individual placeholder values so each one is resolved at most once per
|
||||
// process even if many templates reference it.
|
||||
const placeholderCache: Map<string, Promise<string>> = new Map();
|
||||
|
||||
/**
|
||||
* Parse a template string, resolve all placeholders and return the rendered
|
||||
* result.
|
||||
*/
|
||||
export async function renderPromptTemplate(
|
||||
template: string,
|
||||
ctx: EnvContext,
|
||||
): Promise<string> {
|
||||
// ---------------------------------------------------------------------
|
||||
// 1) Gather all *unique* placeholders present in the template.
|
||||
// ---------------------------------------------------------------------
|
||||
const variables = new Set<string>();
|
||||
for (const match of template.matchAll(VAR_REGEX)) {
|
||||
variables.add(match[1]);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// 2) Kick off (or reuse) async resolution for each variable.
|
||||
// ---------------------------------------------------------------------
|
||||
for (const variable of variables) {
|
||||
if (!placeholderCache.has(variable)) {
|
||||
placeholderCache.set(variable, resolveVariable(variable, ctx));
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// 3) Await completion so we can perform a simple synchronous replace below.
|
||||
// ---------------------------------------------------------------------
|
||||
const resolvedEntries: [string, string][] = [];
|
||||
for (const [key, promise] of placeholderCache.entries()) {
|
||||
resolvedEntries.push([key, await promise]);
|
||||
}
|
||||
const resolvedMap = new Map<string, string>(resolvedEntries);
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// 4) Replace each occurrence. We use replace with a callback to ensure
|
||||
// correct substitution even if variable names overlap (they shouldn't,
|
||||
// but better safe than sorry).
|
||||
// ---------------------------------------------------------------------
|
||||
return template.replace(VAR_REGEX, (_, varName: string) => {
|
||||
return resolvedMap.get(varName) ?? "";
|
||||
});
|
||||
}
|
||||
|
||||
export async function ensureBaseAndHeadCommitsForPRAreAvailable(
|
||||
ctx: EnvContext,
|
||||
): Promise<{ baseSha: string; headSha: string } | null> {
|
||||
const prShas = await getPrShas(ctx);
|
||||
if (prShas == null) {
|
||||
console.warn("Unable to resolve PR branches");
|
||||
return null;
|
||||
}
|
||||
|
||||
const event = await getGitHubEventData(ctx);
|
||||
const pr = event.pull_request;
|
||||
if (!pr) {
|
||||
console.warn("event.pull_request is not defined - unexpected");
|
||||
return null;
|
||||
}
|
||||
|
||||
const workspace = ctx.get("GITHUB_WORKSPACE");
|
||||
|
||||
// Refs (branch names)
|
||||
const baseRef: string | undefined = pr.base?.ref;
|
||||
const headRef: string | undefined = pr.head?.ref;
|
||||
|
||||
// Clone URLs
|
||||
const baseRemoteUrl: string | undefined = pr.base?.repo?.clone_url;
|
||||
const headRemoteUrl: string | undefined = pr.head?.repo?.clone_url;
|
||||
|
||||
if (!baseRef || !headRef || !baseRemoteUrl || !headRemoteUrl) {
|
||||
console.warn(
|
||||
"Missing PR ref or remote URL information - cannot fetch commits",
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Ensure we have the base branch.
|
||||
await runCommand([
|
||||
"git",
|
||||
"-C",
|
||||
workspace,
|
||||
"fetch",
|
||||
"--no-tags",
|
||||
"origin",
|
||||
baseRef,
|
||||
]);
|
||||
|
||||
// Ensure we have the head branch.
|
||||
if (headRemoteUrl === baseRemoteUrl) {
|
||||
// Same repository – the commit is available from `origin`.
|
||||
await runCommand([
|
||||
"git",
|
||||
"-C",
|
||||
workspace,
|
||||
"fetch",
|
||||
"--no-tags",
|
||||
"origin",
|
||||
headRef,
|
||||
]);
|
||||
} else {
|
||||
// Fork – make sure a `pr` remote exists that points at the fork. Attempting
|
||||
// to add a remote that already exists causes git to error, so we swallow
|
||||
// any non-zero exit codes from that specific command.
|
||||
await runCommand([
|
||||
"git",
|
||||
"-C",
|
||||
workspace,
|
||||
"remote",
|
||||
"add",
|
||||
"pr",
|
||||
headRemoteUrl,
|
||||
]);
|
||||
|
||||
// Whether adding succeeded or the remote already existed, attempt to fetch
|
||||
// the head ref from the `pr` remote.
|
||||
await runCommand([
|
||||
"git",
|
||||
"-C",
|
||||
workspace,
|
||||
"fetch",
|
||||
"--no-tags",
|
||||
"pr",
|
||||
headRef,
|
||||
]);
|
||||
}
|
||||
|
||||
return prShas;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Internal helpers – still exported for use by other modules.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export async function resolvePrDiff(ctx: EnvContext): Promise<string> {
|
||||
const prShas = await ensureBaseAndHeadCommitsForPRAreAvailable(ctx);
|
||||
if (prShas == null) {
|
||||
console.warn("Unable to resolve PR branches");
|
||||
return "";
|
||||
}
|
||||
|
||||
const workspace = ctx.get("GITHUB_WORKSPACE");
|
||||
const { baseSha, headSha } = prShas;
|
||||
return runCommand([
|
||||
"git",
|
||||
"-C",
|
||||
workspace,
|
||||
"diff",
|
||||
"--color=never",
|
||||
`${baseSha}..${headSha}`,
|
||||
]);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Placeholder resolution
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async function resolveVariable(name: string, ctx: EnvContext): Promise<string> {
|
||||
switch (name) {
|
||||
case "CODEX_ACTION_ISSUE_TITLE": {
|
||||
const event = await getGitHubEventData(ctx);
|
||||
const issue = event.issue ?? event.pull_request;
|
||||
return issue?.title ?? "";
|
||||
}
|
||||
|
||||
case "CODEX_ACTION_ISSUE_BODY": {
|
||||
const event = await getGitHubEventData(ctx);
|
||||
const issue = event.issue ?? event.pull_request;
|
||||
return issue?.body ?? "";
|
||||
}
|
||||
|
||||
case "CODEX_ACTION_GITHUB_EVENT_PATH": {
|
||||
return ctx.get("GITHUB_EVENT_PATH");
|
||||
}
|
||||
|
||||
case "CODEX_ACTION_BASE_REF": {
|
||||
const event = await getGitHubEventData(ctx);
|
||||
return event?.pull_request?.base?.ref ?? "";
|
||||
}
|
||||
|
||||
case "CODEX_ACTION_HEAD_REF": {
|
||||
const event = await getGitHubEventData(ctx);
|
||||
return event?.pull_request?.head?.ref ?? "";
|
||||
}
|
||||
|
||||
case "CODEX_ACTION_PR_DIFF": {
|
||||
return resolvePrDiff(ctx);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Add new template variables here.
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
default: {
|
||||
// Unknown variable – leave it blank to avoid leaking placeholders to the
|
||||
// final prompt. The alternative would be to `fail()` here, but silently
|
||||
// ignoring unknown placeholders is more forgiving and better matches the
|
||||
// behaviour of typical template engines.
|
||||
console.warn(`Unknown template variable: ${name}`);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function getPrShas(
|
||||
ctx: EnvContext,
|
||||
): Promise<{ baseSha: string; headSha: string } | null> {
|
||||
const event = await getGitHubEventData(ctx);
|
||||
const pr = event.pull_request;
|
||||
if (!pr) {
|
||||
console.warn("event.pull_request is not defined");
|
||||
return null;
|
||||
}
|
||||
|
||||
// Prefer explicit SHAs if available to avoid relying on local branch names.
|
||||
const baseSha: string | undefined = pr.base?.sha;
|
||||
const headSha: string | undefined = pr.head?.sha;
|
||||
|
||||
if (!baseSha || !headSha) {
|
||||
console.warn("one of base or head is not defined on event.pull_request");
|
||||
return null;
|
||||
}
|
||||
|
||||
return { baseSha, headSha };
|
||||
}
|
||||
42
.github/actions/codex/src/review.ts
vendored
42
.github/actions/codex/src/review.ts
vendored
@@ -1,42 +0,0 @@
|
||||
import type { EnvContext } from "./env-context";
|
||||
import { runCodex } from "./run-codex";
|
||||
import { postComment } from "./post-comment";
|
||||
import { addEyesReaction } from "./add-reaction";
|
||||
|
||||
/**
|
||||
* Handle `pull_request_review` events. We treat the review body the same way
|
||||
* as a normal comment.
|
||||
*/
|
||||
export async function onReview(ctx: EnvContext): Promise<void> {
|
||||
const triggerPhrase = ctx.tryGet("INPUT_TRIGGER_PHRASE");
|
||||
if (!triggerPhrase) {
|
||||
console.warn("Empty trigger phrase: skipping.");
|
||||
return;
|
||||
}
|
||||
|
||||
const reviewBody = ctx.tryGet("GITHUB_EVENT_REVIEW_BODY");
|
||||
|
||||
if (!reviewBody) {
|
||||
console.warn("Review body not found in environment: skipping.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!reviewBody.includes(triggerPhrase)) {
|
||||
console.log(
|
||||
`Trigger phrase '${triggerPhrase}' not found: nothing to do for this review.`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const prompt = reviewBody.replace(triggerPhrase, "").trim();
|
||||
|
||||
if (prompt.length === 0) {
|
||||
console.warn("Prompt is empty after removing trigger phrase: skipping.");
|
||||
return;
|
||||
}
|
||||
|
||||
await addEyesReaction(ctx);
|
||||
|
||||
const lastMessage = await runCodex(prompt, ctx);
|
||||
await postComment(lastMessage, ctx);
|
||||
}
|
||||
56
.github/actions/codex/src/run-codex.ts
vendored
56
.github/actions/codex/src/run-codex.ts
vendored
@@ -1,56 +0,0 @@
|
||||
import { fail } from "./fail";
|
||||
import { EnvContext } from "./env-context";
|
||||
import { tmpdir } from "os";
|
||||
import { join } from "node:path";
|
||||
import { readFile, mkdtemp } from "fs/promises";
|
||||
import { resolveWorkspacePath } from "./github-workspace";
|
||||
|
||||
/**
|
||||
* Runs the Codex CLI with the provided prompt and returns the output written
|
||||
* to the "last message" file.
|
||||
*/
|
||||
export async function runCodex(
|
||||
prompt: string,
|
||||
ctx: EnvContext,
|
||||
): Promise<string> {
|
||||
const OPENAI_API_KEY = ctx.get("OPENAI_API_KEY");
|
||||
|
||||
const tempDirPath = await mkdtemp(join(tmpdir(), "codex-"));
|
||||
const lastMessageOutput = join(tempDirPath, "codex-prompt.md");
|
||||
|
||||
const args = ["/usr/local/bin/codex-exec"];
|
||||
|
||||
const inputCodexArgs = ctx.tryGet("INPUT_CODEX_ARGS")?.trim();
|
||||
if (inputCodexArgs) {
|
||||
args.push(...inputCodexArgs.split(/\s+/));
|
||||
}
|
||||
|
||||
args.push("--output-last-message", lastMessageOutput, prompt);
|
||||
|
||||
const env: Record<string, string> = { ...process.env, OPENAI_API_KEY };
|
||||
const INPUT_CODEX_HOME = ctx.tryGet("INPUT_CODEX_HOME");
|
||||
if (INPUT_CODEX_HOME) {
|
||||
env.CODEX_HOME = resolveWorkspacePath(INPUT_CODEX_HOME, ctx);
|
||||
}
|
||||
|
||||
console.log(`Running Codex: ${JSON.stringify(args)}`);
|
||||
const result = Bun.spawnSync(args, {
|
||||
stdout: "inherit",
|
||||
stderr: "inherit",
|
||||
env,
|
||||
});
|
||||
|
||||
if (!result.success) {
|
||||
fail(`Codex failed: see above for details.`);
|
||||
}
|
||||
|
||||
// Read the output generated by Codex.
|
||||
let lastMessage: string;
|
||||
try {
|
||||
lastMessage = await readFile(lastMessageOutput, "utf8");
|
||||
} catch (err) {
|
||||
fail(`Failed to read Codex output at '${lastMessageOutput}': ${err}`);
|
||||
}
|
||||
|
||||
return lastMessage;
|
||||
}
|
||||
33
.github/actions/codex/src/verify-inputs.ts
vendored
33
.github/actions/codex/src/verify-inputs.ts
vendored
@@ -1,33 +0,0 @@
|
||||
// Validate the inputs passed to the composite action.
|
||||
// The script currently ensures that the provided configuration file exists and
|
||||
// matches the expected schema.
|
||||
|
||||
import type { Config } from "./config";
|
||||
|
||||
import { existsSync } from "fs";
|
||||
import * as path from "path";
|
||||
import { fail } from "./fail";
|
||||
|
||||
export function performAdditionalValidation(config: Config, workspace: string) {
|
||||
// Additional validation: ensure referenced prompt files exist and are Markdown.
|
||||
for (const [label, details] of Object.entries(config.labels)) {
|
||||
// Determine which prompt key is present (the schema guarantees exactly one).
|
||||
const promptPathStr =
|
||||
(details as any).prompt ?? (details as any).promptPath;
|
||||
|
||||
if (promptPathStr) {
|
||||
const promptPath = path.isAbsolute(promptPathStr)
|
||||
? promptPathStr
|
||||
: path.join(workspace, promptPathStr);
|
||||
|
||||
if (!existsSync(promptPath)) {
|
||||
fail(`Prompt file for label '${label}' not found: ${promptPath}`);
|
||||
}
|
||||
if (!promptPath.endsWith(".md")) {
|
||||
fail(
|
||||
`Prompt file for label '${label}' must be a .md file (got ${promptPathStr}).`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
15
.github/actions/codex/tsconfig.json
vendored
15
.github/actions/codex/tsconfig.json
vendored
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"lib": ["ESNext"],
|
||||
"target": "ESNext",
|
||||
"module": "ESNext",
|
||||
"moduleDetection": "force",
|
||||
"moduleResolution": "bundler",
|
||||
|
||||
"noEmit": true,
|
||||
"strict": true,
|
||||
"skipLibCheck": true
|
||||
},
|
||||
|
||||
"include": ["src"]
|
||||
}
|
||||
3
.github/codex/home/config.toml
vendored
3
.github/codex/home/config.toml
vendored
@@ -1,3 +0,0 @@
|
||||
model = "o3"
|
||||
|
||||
# Consider setting [mcp_servers] here!
|
||||
9
.github/codex/labels/codex-attempt.md
vendored
9
.github/codex/labels/codex-attempt.md
vendored
@@ -1,9 +0,0 @@
|
||||
Attempt to solve the reported issue.
|
||||
|
||||
If a code change is required, create a new branch, commit the fix, and open a pull request that resolves the problem.
|
||||
|
||||
Here is the original GitHub issue that triggered this run:
|
||||
|
||||
### {CODEX_ACTION_ISSUE_TITLE}
|
||||
|
||||
{CODEX_ACTION_ISSUE_BODY}
|
||||
7
.github/codex/labels/codex-review.md
vendored
7
.github/codex/labels/codex-review.md
vendored
@@ -1,7 +0,0 @@
|
||||
Review this PR and respond with a very concise final message, formatted in Markdown.
|
||||
|
||||
There should be a summary of the changes (1-2 sentences) and a few bullet points if necessary.
|
||||
|
||||
Then provide the **review** (1-2 sentences plus bullet points, friendly tone).
|
||||
|
||||
{CODEX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the `base` and `head` refs that define this PR. Both refs are available locally.
|
||||
7
.github/codex/labels/codex-triage.md
vendored
7
.github/codex/labels/codex-triage.md
vendored
@@ -1,7 +0,0 @@
|
||||
Troubleshoot whether the reported issue is valid.
|
||||
|
||||
Provide a concise and respectful comment summarizing the findings.
|
||||
|
||||
### {CODEX_ACTION_ISSUE_TITLE}
|
||||
|
||||
{CODEX_ACTION_ISSUE_BODY}
|
||||
26
.github/dependabot.yaml
vendored
26
.github/dependabot.yaml
vendored
@@ -1,26 +0,0 @@
|
||||
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/dependabot-options-reference#package-ecosystem-
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: bun
|
||||
directory: .github/actions/codex
|
||||
schedule:
|
||||
interval: weekly
|
||||
- package-ecosystem: cargo
|
||||
directories:
|
||||
- codex-rs
|
||||
- codex-rs/*
|
||||
schedule:
|
||||
interval: weekly
|
||||
- package-ecosystem: devcontainers
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
- package-ecosystem: docker
|
||||
directory: codex-cli
|
||||
schedule:
|
||||
interval: weekly
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
28
.github/dotslash-config.json
vendored
28
.github/dotslash-config.json
vendored
@@ -1,28 +0,0 @@
|
||||
{
|
||||
"outputs": {
|
||||
"codex-exec": {
|
||||
"platforms": {
|
||||
"macos-aarch64": { "regex": "^codex-exec-aarch64-apple-darwin\\.zst$", "path": "codex-exec" },
|
||||
"macos-x86_64": { "regex": "^codex-exec-x86_64-apple-darwin\\.zst$", "path": "codex-exec" },
|
||||
"linux-x86_64": { "regex": "^codex-exec-x86_64-unknown-linux-musl\\.zst$", "path": "codex-exec" },
|
||||
"linux-aarch64": { "regex": "^codex-exec-aarch64-unknown-linux-musl\\.zst$", "path": "codex-exec" }
|
||||
}
|
||||
},
|
||||
|
||||
"codex": {
|
||||
"platforms": {
|
||||
"macos-aarch64": { "regex": "^codex-aarch64-apple-darwin\\.zst$", "path": "codex" },
|
||||
"macos-x86_64": { "regex": "^codex-x86_64-apple-darwin\\.zst$", "path": "codex" },
|
||||
"linux-x86_64": { "regex": "^codex-x86_64-unknown-linux-musl\\.zst$", "path": "codex" },
|
||||
"linux-aarch64": { "regex": "^codex-aarch64-unknown-linux-musl\\.zst$", "path": "codex" }
|
||||
}
|
||||
},
|
||||
|
||||
"codex-linux-sandbox": {
|
||||
"platforms": {
|
||||
"linux-x86_64": { "regex": "^codex-linux-sandbox-x86_64-unknown-linux-musl\\.zst$", "path": "codex-linux-sandbox" },
|
||||
"linux-aarch64": { "regex": "^codex-linux-sandbox-aarch64-unknown-linux-musl\\.zst$", "path": "codex-linux-sandbox" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
75
.github/workflows/ci.yml
vendored
75
.github/workflows/ci.yml
vendored
@@ -19,67 +19,40 @@ jobs:
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10.8.1
|
||||
run_install: false
|
||||
# Run codex-cli/ tasks first because they are higher signal.
|
||||
|
||||
- name: Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: |
|
||||
echo "store_path=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Setup pnpm cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ steps.pnpm-cache.outputs.store_path }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install
|
||||
|
||||
# Run all tasks using workspace filters
|
||||
|
||||
- name: Check TypeScript code formatting
|
||||
- name: Install dependencies (codex-cli)
|
||||
working-directory: codex-cli
|
||||
run: pnpm run format
|
||||
run: npm ci
|
||||
|
||||
- name: Check Markdown and config file formatting
|
||||
run: pnpm run format
|
||||
- name: Check formatting (codex-cli)
|
||||
working-directory: codex-cli
|
||||
run: npm run format
|
||||
|
||||
- name: Run tests
|
||||
run: pnpm run test
|
||||
- name: Run tests (codex-cli)
|
||||
working-directory: codex-cli
|
||||
run: npm run test
|
||||
|
||||
- name: Lint
|
||||
- name: Lint (codex-cli)
|
||||
working-directory: codex-cli
|
||||
run: |
|
||||
pnpm --filter @openai/codex exec -- eslint src tests --ext ts --ext tsx \
|
||||
--report-unused-disable-directives \
|
||||
npm run lint -- \
|
||||
--rule "no-console:error" \
|
||||
--rule "no-debugger:error" \
|
||||
--max-warnings=-1
|
||||
|
||||
- name: Type-check
|
||||
run: pnpm run typecheck
|
||||
|
||||
- name: Build
|
||||
run: pnpm run build
|
||||
|
||||
- name: Ensure staging a release works.
|
||||
- name: Type‑check (codex-cli)
|
||||
working-directory: codex-cli
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: pnpm stage-release
|
||||
run: npm run typecheck
|
||||
|
||||
- name: Ensure root README.md contains only ASCII and certain Unicode code points
|
||||
run: ./scripts/asciicheck.py README.md
|
||||
- name: Check root README ToC
|
||||
run: python3 scripts/readme_toc.py README.md
|
||||
- name: Build (codex-cli)
|
||||
working-directory: codex-cli
|
||||
run: npm run build
|
||||
|
||||
- name: Ensure codex-cli/README.md contains only ASCII and certain Unicode code points
|
||||
run: ./scripts/asciicheck.py codex-cli/README.md
|
||||
- name: Check codex-cli/README ToC
|
||||
run: python3 scripts/readme_toc.py codex-cli/README.md
|
||||
# Run formatting checks in the root directory last.
|
||||
|
||||
- name: Install dependencies (root)
|
||||
run: npm ci
|
||||
|
||||
- name: Check formatting (root)
|
||||
run: npm run format
|
||||
|
||||
2
.github/workflows/cla.yml
vendored
2
.github/workflows/cla.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
path-to-document: https://github.com/openai/codex/blob/main/docs/CLA.md
|
||||
path-to-document: docs/CLA.md
|
||||
path-to-signatures: signatures/cla.json
|
||||
branch: cla-signatures
|
||||
allowlist: dependabot[bot]
|
||||
|
||||
27
.github/workflows/codespell.yml
vendored
27
.github/workflows/codespell.yml
vendored
@@ -1,27 +0,0 @@
|
||||
# Codespell configuration is within .codespellrc
|
||||
---
|
||||
name: Codespell
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
codespell:
|
||||
name: Check for spelling errors
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Annotate locations with typos
|
||||
uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1
|
||||
- name: Codespell
|
||||
uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2
|
||||
with:
|
||||
ignore_words_file: .codespellignore
|
||||
95
.github/workflows/codex.yml
vendored
95
.github/workflows/codex.yml
vendored
@@ -1,95 +0,0 @@
|
||||
name: Codex
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
codex:
|
||||
# This `if` check provides complex filtering logic to avoid running Codex
|
||||
# on every PR. Admittedly, one thing this does not verify is whether the
|
||||
# sender has write access to the repo: that must be done as part of a
|
||||
# runtime step.
|
||||
#
|
||||
# Note the label values should match the ones in the .github/codex/labels
|
||||
# folder.
|
||||
if: |
|
||||
(github.event_name == 'issues' && (
|
||||
(github.event.action == 'labeled' && (github.event.label.name == 'codex-attempt' || github.event.label.name == 'codex-triage'))
|
||||
)) ||
|
||||
(github.event_name == 'pull_request' && github.event.action == 'labeled' && github.event.label.name == 'codex-review')
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # can push or create branches
|
||||
issues: write # for comments + labels on issues/PRs
|
||||
pull-requests: write # for PR comments/labels
|
||||
steps:
|
||||
# TODO: Consider adding an optional mode (--dry-run?) to actions/codex
|
||||
# that verifies whether Codex should actually be run for this event.
|
||||
# (For example, it may be rejected because the sender does not have
|
||||
# write access to the repo.) The benefit would be two-fold:
|
||||
# 1. As the first step of this job, it gives us a chance to add a reaction
|
||||
# or comment to the PR/issue ASAP to "ack" the request.
|
||||
# 2. It saves resources by skipping the clone and setup steps below if
|
||||
# Codex is not going to run.
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# We install the dependencies like we would for an ordinary CI job,
|
||||
# particularly because Codex will not have network access to install
|
||||
# these dependencies.
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10.8.1
|
||||
run_install: false
|
||||
|
||||
- name: Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: |
|
||||
echo "store_path=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Setup pnpm cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ steps.pnpm-cache.outputs.store_path }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.88
|
||||
with:
|
||||
targets: x86_64-unknown-linux-gnu
|
||||
components: clippy
|
||||
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
${{ github.workspace }}/codex-rs/target/
|
||||
key: cargo-ubuntu-24.04-x86_64-unknown-linux-gnu-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
# Note it is possible that the `verify` step internal to Run Codex will
|
||||
# fail, in which case the work to setup the repo was worthless :(
|
||||
- name: Run Codex
|
||||
uses: ./.github/actions/codex
|
||||
with:
|
||||
openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
codex_home: ./.github/codex/home
|
||||
118
.github/workflows/rust-ci.yml
vendored
118
.github/workflows/rust-ci.yml
vendored
@@ -1,118 +0,0 @@
|
||||
name: rust-ci
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "codex-rs/**"
|
||||
- ".github/**"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
# For CI, we build in debug (`--profile dev`) rather than release mode so we
|
||||
# get signal faster.
|
||||
|
||||
jobs:
|
||||
# CI that don't need specific targets
|
||||
general:
|
||||
name: Format / etc
|
||||
runs-on: ubuntu-24.04
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@1.88
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: cargo fmt
|
||||
run: cargo fmt -- --config imports_granularity=Item --check
|
||||
|
||||
# CI to validate on different os/targets
|
||||
lint_build_test:
|
||||
name: ${{ matrix.runner }} - ${{ matrix.target }}
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# Note: While Codex CLI does not support Windows today, we include
|
||||
# Windows in CI to ensure the code at least builds there.
|
||||
include:
|
||||
- runner: macos-14
|
||||
target: aarch64-apple-darwin
|
||||
- runner: macos-14
|
||||
target: x86_64-apple-darwin
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-gnu
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-gnu
|
||||
- runner: windows-latest
|
||||
target: x86_64-pc-windows-msvc
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@1.88
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
components: clippy
|
||||
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
${{ github.workspace }}/codex-rs/target/
|
||||
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
|
||||
name: Install musl build tools
|
||||
run: |
|
||||
sudo apt install -y musl-tools pkg-config
|
||||
|
||||
- name: cargo clippy
|
||||
id: clippy
|
||||
continue-on-error: true
|
||||
run: cargo clippy --target ${{ matrix.target }} --all-features --tests -- -D warnings
|
||||
|
||||
# Running `cargo build` from the workspace root builds the workspace using
|
||||
# the union of all features from third-party crates. This can mask errors
|
||||
# where individual crates have underspecified features. To avoid this, we
|
||||
# run `cargo build` for each crate individually, though because this is
|
||||
# slower, we only do this for the x86_64-unknown-linux-gnu target.
|
||||
- name: cargo build individual crates
|
||||
id: build
|
||||
if: ${{ matrix.target == 'x86_64-unknown-linux-gnu' }}
|
||||
continue-on-error: true
|
||||
run: find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 | xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo build'
|
||||
|
||||
- name: cargo test
|
||||
id: test
|
||||
continue-on-error: true
|
||||
run: cargo test --all-features --target ${{ matrix.target }}
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
# Fail the job if any of the previous steps failed.
|
||||
- name: verify all steps passed
|
||||
if: |
|
||||
steps.clippy.outcome == 'failure' ||
|
||||
steps.build.outcome == 'failure' ||
|
||||
steps.test.outcome == 'failure'
|
||||
run: |
|
||||
echo "One or more checks failed (clippy, build, or test). See logs for details."
|
||||
exit 1
|
||||
193
.github/workflows/rust-release.yml
vendored
193
.github/workflows/rust-release.yml
vendored
@@ -1,193 +0,0 @@
|
||||
# Release workflow for codex-rs.
|
||||
# To release, follow a workflow like:
|
||||
# ```
|
||||
# git tag -a rust-v0.1.0 -m "Release 0.1.0"
|
||||
# git push origin rust-v0.1.0
|
||||
# ```
|
||||
|
||||
name: rust-release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "rust-v*.*.*"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
tag-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Validate tag matches Cargo.toml version
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "::group::Tag validation"
|
||||
|
||||
# 1. Must be a tag and match the regex
|
||||
[[ "${GITHUB_REF_TYPE}" == "tag" ]] \
|
||||
|| { echo "❌ Not a tag push"; exit 1; }
|
||||
[[ "${GITHUB_REF_NAME}" =~ ^rust-v[0-9]+\.[0-9]+\.[0-9]+(-(alpha|beta)(\.[0-9]+)?)?$ ]] \
|
||||
|| { echo "❌ Tag '${GITHUB_REF_NAME}' doesn't match expected format"; exit 1; }
|
||||
|
||||
# 2. Extract versions
|
||||
tag_ver="${GITHUB_REF_NAME#rust-v}"
|
||||
cargo_ver="$(grep -m1 '^version' codex-rs/Cargo.toml \
|
||||
| sed -E 's/version *= *"([^"]+)".*/\1/')"
|
||||
|
||||
# 3. Compare
|
||||
[[ "${tag_ver}" == "${cargo_ver}" ]] \
|
||||
|| { echo "❌ Tag ${tag_ver} ≠ Cargo.toml ${cargo_ver}"; exit 1; }
|
||||
|
||||
echo "✅ Tag and Cargo.toml agree (${tag_ver})"
|
||||
echo "::endgroup::"
|
||||
|
||||
build:
|
||||
needs: tag-check
|
||||
name: ${{ matrix.runner }} - ${{ matrix.target }}
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: macos-14
|
||||
target: aarch64-apple-darwin
|
||||
- runner: macos-14
|
||||
target: x86_64-apple-darwin
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-gnu
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-gnu
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@1.88
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
${{ github.workspace }}/codex-rs/target/
|
||||
key: cargo-release-${{ matrix.runner }}-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
|
||||
name: Install musl build tools
|
||||
run: |
|
||||
sudo apt install -y musl-tools pkg-config
|
||||
|
||||
- name: Cargo build
|
||||
run: cargo build --target ${{ matrix.target }} --release --all-targets --all-features
|
||||
|
||||
- name: Stage artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
dest="dist/${{ matrix.target }}"
|
||||
mkdir -p "$dest"
|
||||
|
||||
cp target/${{ matrix.target }}/release/codex-exec "$dest/codex-exec-${{ matrix.target }}"
|
||||
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
|
||||
|
||||
# After https://github.com/openai/codex/pull/1228 is merged and a new
|
||||
# release is cut with an artifacts built after that PR, the `-gnu`
|
||||
# variants can go away as we will only use the `-musl` variants.
|
||||
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'x86_64-unknown-linux-gnu' || matrix.target == 'aarch64-unknown-linux-gnu' || matrix.target == 'aarch64-unknown-linux-musl' }}
|
||||
name: Stage Linux-only artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
dest="dist/${{ matrix.target }}"
|
||||
cp target/${{ matrix.target }}/release/codex-linux-sandbox "$dest/codex-linux-sandbox-${{ matrix.target }}"
|
||||
|
||||
- name: Compress artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
# Path that contains the uncompressed binaries for the current
|
||||
# ${{ matrix.target }}
|
||||
dest="dist/${{ matrix.target }}"
|
||||
|
||||
# For compatibility with environments that lack the `zstd` tool we
|
||||
# additionally create a `.tar.gz` alongside every single binary that
|
||||
# we publish. The end result is:
|
||||
# codex-<target>.zst (existing)
|
||||
# codex-<target>.tar.gz (new)
|
||||
# ...same naming for codex-exec-* and codex-linux-sandbox-*
|
||||
|
||||
# 1. Produce a .tar.gz for every file in the directory *before* we
|
||||
# run `zstd --rm`, because that flag deletes the original files.
|
||||
for f in "$dest"/*; do
|
||||
base="$(basename "$f")"
|
||||
# Skip files that are already archives (shouldn't happen, but be
|
||||
# safe).
|
||||
if [[ "$base" == *.tar.gz ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Create per-binary tar.gz
|
||||
tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base"
|
||||
|
||||
# Also create .zst (existing behaviour) *and* remove the original
|
||||
# uncompressed binary to keep the directory small.
|
||||
zstd -T0 -19 --rm "$dest/$base"
|
||||
done
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.target }}
|
||||
# Upload the per-binary .zst files as well as the new .tar.gz
|
||||
# equivalents we generated in the previous step.
|
||||
path: |
|
||||
codex-rs/dist/${{ matrix.target }}/*
|
||||
|
||||
release:
|
||||
needs: build
|
||||
name: release
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: dist
|
||||
|
||||
- name: List
|
||||
run: ls -R dist/
|
||||
|
||||
- name: Define release name
|
||||
id: release_name
|
||||
run: |
|
||||
# Extract the version from the tag name, which is in the format
|
||||
# "rust-v0.1.0".
|
||||
version="${GITHUB_REF_NAME#rust-v}"
|
||||
echo "name=${version}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
name: ${{ steps.release_name.outputs.name }}
|
||||
tag_name: ${{ github.ref_name }}
|
||||
files: dist/**
|
||||
# For now, tag releases as "prerelease" because we are not claiming
|
||||
# the Rust CLI is stable yet.
|
||||
prerelease: true
|
||||
|
||||
- uses: facebook/dotslash-publish-release@v2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag: ${{ github.ref_name }}
|
||||
config: .github/dotslash-config.json
|
||||
18
.gitignore
vendored
18
.gitignore
vendored
@@ -1,11 +1,5 @@
|
||||
# deps
|
||||
# Node.js dependencies
|
||||
node_modules
|
||||
.pnpm-store
|
||||
.pnpm-debug.log
|
||||
|
||||
# Keep pnpm-lock.yaml
|
||||
!pnpm-lock.yaml
|
||||
|
||||
# build
|
||||
dist/
|
||||
@@ -23,14 +17,9 @@ result
|
||||
.vscode/
|
||||
.idea/
|
||||
.history/
|
||||
.zed/
|
||||
*.swp
|
||||
*~
|
||||
|
||||
# cli tools
|
||||
CLAUDE.md
|
||||
.claude/
|
||||
|
||||
# caches
|
||||
.cache/
|
||||
.turbo/
|
||||
@@ -72,12 +61,9 @@ Icon?
|
||||
# Unwanted package managers
|
||||
.yarn/
|
||||
yarn.lock
|
||||
pnpm-lock.yaml
|
||||
|
||||
# release
|
||||
package.json-e
|
||||
session.ts-e
|
||||
CHANGELOG.ignore.md
|
||||
|
||||
# nix related
|
||||
.direnv
|
||||
.envrc
|
||||
CHANGELOG.ignore.md
|
||||
@@ -1 +0,0 @@
|
||||
pnpm lint-staged
|
||||
4
.npmrc
4
.npmrc
@@ -1,4 +0,0 @@
|
||||
shamefully-hoist=true
|
||||
strict-peer-dependencies=false
|
||||
node-linker=hoisted
|
||||
prefer-workspace-packages=true
|
||||
@@ -1,3 +1,2 @@
|
||||
/codex-cli/dist
|
||||
/codex-cli/node_modules
|
||||
pnpm-lock.yaml
|
||||
|
||||
18
.vscode/launch.json
vendored
18
.vscode/launch.json
vendored
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Cargo launch",
|
||||
"cargo": {
|
||||
"cwd": "${workspaceFolder}/codex-rs",
|
||||
"args": [
|
||||
"build",
|
||||
"--bin=codex-tui"
|
||||
]
|
||||
},
|
||||
"args": []
|
||||
}
|
||||
]
|
||||
}
|
||||
10
.vscode/settings.json
vendored
10
.vscode/settings.json
vendored
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"rust-analyzer.checkOnSave": true,
|
||||
"rust-analyzer.check.command": "clippy",
|
||||
"rust-analyzer.check.extraArgs": ["--all-features", "--tests"],
|
||||
"rust-analyzer.rustfmt.extraArgs": ["--config", "imports_granularity=Item"],
|
||||
"[rust]": {
|
||||
"editor.defaultFormatter": "rust-lang.rust-analyzer",
|
||||
"editor.formatOnSave": true,
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
# Rust/codex-rs
|
||||
|
||||
In the codex-rs folder where the rust code lives:
|
||||
|
||||
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR`. You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
|
||||
|
||||
Before creating a pull request with changes to `codex-rs`, run `just fmt` (in `codex-rs` directory) to format the code and `just fix` (in `codex-rs` directory) to fix any linter issues in the code, ensure the test suite passes by running `cargo test --all-features` in the `codex-rs` directory.
|
||||
|
||||
When making individual changes prefer running tests on individual files or projects first.
|
||||
174
CHANGELOG.md
174
CHANGELOG.md
@@ -2,185 +2,19 @@
|
||||
|
||||
You can install any of these versions: `npm install -g codex@version`
|
||||
|
||||
## `0.1.2505172129`
|
||||
|
||||
### 🪲 Bug Fixes
|
||||
|
||||
- Add node version check (#1007)
|
||||
- Persist token after refresh (#1006)
|
||||
|
||||
## `0.1.2505171619`
|
||||
|
||||
- `codex --login` + `codex --free` (#998)
|
||||
|
||||
## `0.1.2505161800`
|
||||
|
||||
- Sign in with chatgpt credits (#974)
|
||||
- Add support for OpenAI tool type, local_shell (#961)
|
||||
|
||||
## `0.1.2505161243`
|
||||
|
||||
- Sign in with chatgpt (#963)
|
||||
- Session history viewer (#912)
|
||||
- Apply patch issue when using different cwd (#942)
|
||||
- Diff command for filenames with special characters (#954)
|
||||
|
||||
## `0.1.2505160811`
|
||||
|
||||
- `codex-mini-latest` (#951)
|
||||
|
||||
## `0.1.2505140839`
|
||||
|
||||
### 🪲 Bug Fixes
|
||||
|
||||
- Gpt-4.1 apply_patch handling (#930)
|
||||
- Add support for fileOpener in config.json (#911)
|
||||
- Patch in #366 and #367 for marked-terminal (#916)
|
||||
- Remember to set lastIndex = 0 on shared RegExp (#918)
|
||||
- Always load version from package.json at runtime (#909)
|
||||
- Tweak the label for citations for better rendering (#919)
|
||||
- Tighten up some logic around session timestamps and ids (#922)
|
||||
- Change EventMsg enum so every variant takes a single struct (#925)
|
||||
- Reasoning default to medium, show workdir when supplied (#931)
|
||||
- Test_dev_null_write() was not using echo as intended (#923)
|
||||
|
||||
## `0.1.2504301751`
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- User config api key (#569)
|
||||
- `@mention` files in codex (#701)
|
||||
- Add `--reasoning` CLI flag (#314)
|
||||
- Lower default retry wait time and increase number of tries (#720)
|
||||
- Add common package registries domains to allowed-domains list (#414)
|
||||
|
||||
### 🪲 Bug Fixes
|
||||
|
||||
- Insufficient quota message (#758)
|
||||
- Input keyboard shortcut opt+delete (#685)
|
||||
- `/diff` should include untracked files (#686)
|
||||
- Only allow running without sandbox if explicitly marked in safe container (#699)
|
||||
- Tighten up check for /usr/bin/sandbox-exec (#710)
|
||||
- Check if sandbox-exec is available (#696)
|
||||
- Duplicate messages in quiet mode (#680)
|
||||
|
||||
## `0.1.2504251709`
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- Add openai model info configuration (#551)
|
||||
- Added provider to run quiet mode function (#571)
|
||||
- Create parent directories when creating new files (#552)
|
||||
- Print bug report URL in terminal instead of opening browser (#510) (#528)
|
||||
- Add support for custom provider configuration in the user config (#537)
|
||||
- Add support for OpenAI-Organization and OpenAI-Project headers (#626)
|
||||
- Add specific instructions for creating API keys in error msg (#581)
|
||||
- Enhance toCodePoints to prevent potential unicode 14 errors (#615)
|
||||
- More native keyboard navigation in multiline editor (#655)
|
||||
- Display error on selection of invalid model (#594)
|
||||
|
||||
### 🪲 Bug Fixes
|
||||
|
||||
- Model selection (#643)
|
||||
- Nits in apply patch (#640)
|
||||
- Input keyboard shortcuts (#676)
|
||||
- `apply_patch` unicode characters (#625)
|
||||
- Don't clear turn input before retries (#611)
|
||||
- More loosely match context for apply_patch (#610)
|
||||
- Update bug report template - there is no --revision flag (#614)
|
||||
- Remove outdated copy of text input and external editor feature (#670)
|
||||
- Remove unreachable "disableResponseStorage" logic flow introduced in #543 (#573)
|
||||
- Non-openai mode - fix for gemini content: null, fix 429 to throw before stream (#563)
|
||||
- Only allow going up in history when not already in history if input is empty (#654)
|
||||
- Do not grant "node" user sudo access when using run_in_container.sh (#627)
|
||||
- Update scripts/build_container.sh to use pnpm instead of npm (#631)
|
||||
- Update lint-staged config to use pnpm --filter (#582)
|
||||
- Non-openai mode - don't default temp and top_p (#572)
|
||||
- Fix error catching when checking for updates (#597)
|
||||
- Close stdin when running an exec tool call (#636)
|
||||
|
||||
## `0.1.2504221401`
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- Show actionable errors when api keys are missing (#523)
|
||||
- Add CLI `--version` flag (#492)
|
||||
|
||||
### 🪲 Bug Fixes
|
||||
|
||||
- Agent loop for ZDR (`disableResponseStorage`) (#543)
|
||||
- Fix relative `workdir` check for `apply_patch` (#556)
|
||||
- Minimal mid-stream #429 retry loop using existing back-off (#506)
|
||||
- Inconsistent usage of base URL and API key (#507)
|
||||
- Remove requirement for api key for ollama (#546)
|
||||
- Support `[provider]_BASE_URL` (#542)
|
||||
|
||||
## `0.1.2504220136`
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- Add support for ZDR orgs (#481)
|
||||
- Include fractional portion of chunk that exceeds stdout/stderr limit (#497)
|
||||
|
||||
## `0.1.2504211509`
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- Support multiple providers via Responses-Completion transformation (#247)
|
||||
- Add user-defined safe commands configuration and approval logic #380 (#386)
|
||||
- Allow switching approval modes when prompted to approve an edit/command (#400)
|
||||
- Add support for `/diff` command autocomplete in TerminalChatInput (#431)
|
||||
- Auto-open model selector if user selects deprecated model (#427)
|
||||
- Read approvalMode from config file (#298)
|
||||
- `/diff` command to view git diff (#426)
|
||||
- Tab completions for file paths (#279)
|
||||
- Add /command autocomplete (#317)
|
||||
- Allow multi-line input (#438)
|
||||
|
||||
### 🪲 Bug Fixes
|
||||
|
||||
- `full-auto` support in quiet mode (#374)
|
||||
- Enable shell option for child process execution (#391)
|
||||
- Configure husky and lint-staged for pnpm monorepo (#384)
|
||||
- Command pipe execution by improving shell detection (#437)
|
||||
- Name of the file not matching the name of the component (#354)
|
||||
- Allow proper exit from new Switch approval mode dialog (#453)
|
||||
- Ensure /clear resets context and exclude system messages from approximateTokenUsed count (#443)
|
||||
- `/clear` now clears terminal screen and resets context left indicator (#425)
|
||||
- Correct fish completion function name in CLI script (#485)
|
||||
- Auto-open model-selector when model is not found (#448)
|
||||
- Remove unnecessary isLoggingEnabled() checks (#420)
|
||||
- Improve test reliability for `raw-exec` (#434)
|
||||
- Unintended tear down of agent loop (#483)
|
||||
- Remove extraneous type casts (#462)
|
||||
|
||||
## `0.1.2504181820`
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- Add `/bug` report command (#312)
|
||||
- Notify when a newer version is available (#333)
|
||||
|
||||
### 🪲 Bug Fixes
|
||||
|
||||
- Update context left display logic in TerminalChatInput component (#307)
|
||||
- Improper spawn of sh on Windows Powershell (#318)
|
||||
- `/bug` report command, thinking indicator (#381)
|
||||
- Include pnpm lock file (#377)
|
||||
|
||||
## `0.1.2504172351`
|
||||
## 0.1.2504172351
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- Add Nix flake for reproducible development environments (#225)
|
||||
|
||||
### 🪲 Bug Fixes
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- Handle invalid commands (#304)
|
||||
- Raw-exec-process-group.test improve reliability and error handling (#280)
|
||||
- Canonicalize the writeable paths used in seatbelt policy (#275)
|
||||
|
||||
## `0.1.2504172304`
|
||||
## 0.1.2504172304
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
@@ -193,7 +27,7 @@ You can install any of these versions: `npm install -g codex@version`
|
||||
- `--config`/`-c` flag to open global instructions in nvim (#158)
|
||||
- Update position of cursor when navigating input history with arrow keys to the end of the text (#255)
|
||||
|
||||
### 🪲 Bug Fixes
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- Correct word deletion logic for trailing spaces (Ctrl+Backspace) (#131)
|
||||
- Improve Windows compatibility for CLI commands and sandbox (#261)
|
||||
|
||||
70
PNPM.md
70
PNPM.md
@@ -1,70 +0,0 @@
|
||||
# Migration to pnpm
|
||||
|
||||
This project has been migrated from npm to pnpm to improve dependency management and developer experience.
|
||||
|
||||
## Why pnpm?
|
||||
|
||||
- **Faster installation**: pnpm is significantly faster than npm and yarn
|
||||
- **Disk space savings**: pnpm uses a content-addressable store to avoid duplication
|
||||
- **Phantom dependency prevention**: pnpm creates a strict node_modules structure
|
||||
- **Native workspaces support**: simplified monorepo management
|
||||
|
||||
## How to use pnpm
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
# Global installation of pnpm
|
||||
npm install -g pnpm@10.8.1
|
||||
|
||||
# Or with corepack (available with Node.js 22+)
|
||||
corepack enable
|
||||
corepack prepare pnpm@10.8.1 --activate
|
||||
```
|
||||
|
||||
### Common commands
|
||||
|
||||
| npm command | pnpm equivalent |
|
||||
| --------------- | ---------------- |
|
||||
| `npm install` | `pnpm install` |
|
||||
| `npm run build` | `pnpm run build` |
|
||||
| `npm test` | `pnpm test` |
|
||||
| `npm run lint` | `pnpm run lint` |
|
||||
|
||||
### Workspace-specific commands
|
||||
|
||||
| Action | Command |
|
||||
| ------------------------------------------ | ---------------------------------------- |
|
||||
| Run a command in a specific package | `pnpm --filter @openai/codex run build` |
|
||||
| Install a dependency in a specific package | `pnpm --filter @openai/codex add lodash` |
|
||||
| Run a command in all packages | `pnpm -r run test` |
|
||||
|
||||
## Monorepo structure
|
||||
|
||||
```
|
||||
codex/
|
||||
├── pnpm-workspace.yaml # Workspace configuration
|
||||
├── .npmrc # pnpm configuration
|
||||
├── package.json # Root dependencies and scripts
|
||||
├── codex-cli/ # Main package
|
||||
│ └── package.json # codex-cli specific dependencies
|
||||
└── docs/ # Documentation (future package)
|
||||
```
|
||||
|
||||
## Configuration files
|
||||
|
||||
- **pnpm-workspace.yaml**: Defines the packages included in the monorepo
|
||||
- **.npmrc**: Configures pnpm behavior
|
||||
- **Root package.json**: Contains shared scripts and dependencies
|
||||
|
||||
## CI/CD
|
||||
|
||||
CI/CD workflows have been updated to use pnpm instead of npm. Make sure your CI environments use pnpm 10.8.1 or higher.
|
||||
|
||||
## Known issues
|
||||
|
||||
If you encounter issues with pnpm, try the following solutions:
|
||||
|
||||
1. Remove the `node_modules` folder and `pnpm-lock.yaml` file, then run `pnpm install`
|
||||
2. Make sure you're using pnpm 10.8.1 or higher
|
||||
3. Verify that Node.js 22 or higher is installed
|
||||
529
README.md
529
README.md
@@ -1,61 +1,51 @@
|
||||
<h1 align="center">OpenAI Codex CLI</h1>
|
||||
<p align="center">Lightweight coding agent that runs in your terminal</p>
|
||||
|
||||
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install codex</code></p>
|
||||
<p align="center"><code>npm i -g @openai/codex</code></p>
|
||||
|
||||
This is the home of the **Codex CLI**, which is a coding agent from OpenAI that runs locally on your computer. If you are looking for the _cloud-based agent_ from OpenAI, **Codex [Web]**, see <https://chatgpt.com/codex>.
|
||||
|
||||
<!--  -->
|
||||

|
||||
|
||||
---
|
||||
|
||||
<details>
|
||||
<summary><strong>Table of contents</strong></summary>
|
||||
<summary><strong>Table of Contents</strong></summary>
|
||||
|
||||
<!-- Begin ToC -->
|
||||
|
||||
- [Experimental technology disclaimer](#experimental-technology-disclaimer)
|
||||
- [Experimental Technology Disclaimer](#experimental-technology-disclaimer)
|
||||
- [Quickstart](#quickstart)
|
||||
- [OpenAI API Users](#openai-api-users)
|
||||
- [OpenAI Plus/Pro Users](#openai-pluspro-users)
|
||||
- [Why Codex?](#why-codex)
|
||||
- [Security model & permissions](#security-model--permissions)
|
||||
- [Why Codex?](#whycodex)
|
||||
- [Security Model \& Permissions](#securitymodelpermissions)
|
||||
- [Platform sandboxing details](#platform-sandboxing-details)
|
||||
- [System requirements](#system-requirements)
|
||||
- [CLI reference](#cli-reference)
|
||||
- [Memory & project docs](#memory--project-docs)
|
||||
- [Non-interactive / CI mode](#non-interactive--ci-mode)
|
||||
- [Model Context Protocol (MCP)](#model-context-protocol-mcp)
|
||||
- [Tracing / verbose logging](#tracing--verbose-logging)
|
||||
- [System Requirements](#systemrequirements)
|
||||
- [CLI Reference](#clireference)
|
||||
- [Memory \& Project Docs](#memoryprojectdocs)
|
||||
- [Non‑interactive / CI mode](#noninteractivecimode)
|
||||
- [Recipes](#recipes)
|
||||
- [Installation](#installation)
|
||||
- [DotSlash](#dotslash)
|
||||
- [Configuration](#configuration)
|
||||
- [FAQ](#faq)
|
||||
- [Zero data retention (ZDR) usage](#zero-data-retention-zdr-usage)
|
||||
- [Codex open source fund](#codex-open-source-fund)
|
||||
- [Funding Opportunity](#funding-opportunity)
|
||||
- [Contributing](#contributing)
|
||||
- [Development workflow](#development-workflow)
|
||||
- [Writing high-impact code changes](#writing-high-impact-code-changes)
|
||||
- [Nix Flake Development](#nix-flake-development)
|
||||
- [Writing high‑impact code changes](#writing-highimpact-code-changes)
|
||||
- [Opening a pull request](#opening-a-pull-request)
|
||||
- [Review process](#review-process)
|
||||
- [Community values](#community-values)
|
||||
- [Getting help](#getting-help)
|
||||
- [Contributor license agreement (CLA)](#contributor-license-agreement-cla)
|
||||
- [Contributor License Agreement (CLA)](#contributor-license-agreement-cla)
|
||||
- [Quick fixes](#quick-fixes)
|
||||
- [Releasing `codex`](#releasing-codex)
|
||||
- [Security & responsible AI](#security--responsible-ai)
|
||||
- [Security \& Responsible AI](#securityresponsibleai)
|
||||
- [License](#license)
|
||||
|
||||
<!-- End ToC -->
|
||||
- [Zero Data Retention (ZDR) Organization Limitation](#zero-data-retention-zdr-organization-limitation)
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## Experimental technology disclaimer
|
||||
## Experimental Technology Disclaimer
|
||||
|
||||
Codex CLI is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome:
|
||||
Codex CLI is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We’re building it in the open with the community and welcome:
|
||||
|
||||
- Bug reports
|
||||
- Feature requests
|
||||
@@ -66,97 +56,27 @@ Help us improve by filing issues or submitting PRs (see the section below for ho
|
||||
|
||||
## Quickstart
|
||||
|
||||
Install globally with your preferred package manager:
|
||||
Install globally:
|
||||
|
||||
```shell
|
||||
npm install -g @openai/codex # Alternatively: `brew install codex`
|
||||
npm install -g @openai/codex
|
||||
```
|
||||
|
||||
Or go to the [latest GitHub Release](https://github.com/openai/codex/releases/latest) and download the appropriate binary for your platform.
|
||||
|
||||
### OpenAI API Users
|
||||
|
||||
Next, set your OpenAI API key as an environment variable:
|
||||
|
||||
```shell
|
||||
export OPENAI_API_KEY="your-api-key-here"
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> This command sets the key only for your current terminal session. You can add the `export` line to your shell's configuration file (e.g., `~/.zshrc`), but we recommend setting it for the session.
|
||||
|
||||
### OpenAI Plus/Pro Users
|
||||
|
||||
If you have a paid OpenAI account, run the following to start the login process:
|
||||
|
||||
```
|
||||
codex login
|
||||
```
|
||||
|
||||
If you complete the process successfully, you should have a `~/.codex/auth.json` file that contains the credentials that Codex will use.
|
||||
|
||||
If you encounter problems with the login flow, please comment on <https://github.com/openai/codex/issues/1243>.
|
||||
|
||||
<details>
|
||||
<summary><strong>Use <code>--profile</code> to use other models</strong></summary>
|
||||
|
||||
Codex also allows you to use other providers that support the OpenAI Chat Completions (or Responses) API.
|
||||
|
||||
To do so, you must first define custom [providers](./config.md#model_providers) in `~/.codex/config.toml`. For example, the provider for a standard Ollama setup would be defined as follows:
|
||||
|
||||
```toml
|
||||
[model_providers.ollama]
|
||||
name = "Ollama"
|
||||
base_url = "http://localhost:11434/v1"
|
||||
```
|
||||
|
||||
The `base_url` will have `/chat/completions` appended to it to build the full URL for the request.
|
||||
|
||||
For providers that also require an `Authorization` header of the form `Bearer: SECRET`, an `env_key` can be specified, which indicates the environment variable to read to use as the value of `SECRET` when making a request:
|
||||
|
||||
```toml
|
||||
[model_providers.openrouter]
|
||||
name = "OpenRouter"
|
||||
base_url = "https://openrouter.ai/api/v1"
|
||||
env_key = "OPENROUTER_API_KEY"
|
||||
```
|
||||
|
||||
Providers that speak the Responses API are also supported by adding `wire_api = "responses"` as part of the definition. Accessing OpenAI models via Azure is an example of such a provider, though it also requires specifying additional `query_params` that need to be appended to the request URL:
|
||||
|
||||
```toml
|
||||
[model_providers.azure]
|
||||
name = "Azure"
|
||||
# Make sure you set the appropriate subdomain for this URL.
|
||||
base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"
|
||||
env_key = "AZURE_OPENAI_API_KEY" # Or "OPENAI_API_KEY", whichever you use.
|
||||
# Newer versions appear to support the responses API, see https://github.com/openai/codex/pull/1321
|
||||
query_params = { api-version = "2025-04-01-preview" }
|
||||
wire_api = "responses"
|
||||
```
|
||||
|
||||
Once you have defined a provider you wish to use, you can configure it as your default provider as follows:
|
||||
|
||||
```toml
|
||||
model_provider = "azure"
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> If you find yourself experimenting with a variety of models and providers, then you likely want to invest in defining a _profile_ for each configuration like so:
|
||||
|
||||
```toml
|
||||
[profiles.o3]
|
||||
model_provider = "azure"
|
||||
model = "o3"
|
||||
|
||||
[profiles.mistral]
|
||||
model_provider = "ollama"
|
||||
model = "mistral"
|
||||
```
|
||||
|
||||
This way, you can specify one command-line argument (.e.g., `--profile o3`, `--profile mistral`) to override multiple settings together.
|
||||
|
||||
</details>
|
||||
<br />
|
||||
> **Note:** This command sets the key only for your current terminal session. To make it permanent, add the `export` line to your shell's configuration file (e.g., `~/.zshrc`).
|
||||
>
|
||||
> **Tip:** You can also place your API key into a `.env` file at the root of your project:
|
||||
>
|
||||
> ```env
|
||||
> OPENAI_API_KEY=your-api-key-here
|
||||
> ```
|
||||
>
|
||||
> The CLI will automatically load variables from `.env` (via `dotenv/config`).
|
||||
|
||||
Run interactively:
|
||||
|
||||
@@ -171,150 +91,143 @@ codex "explain this codebase to me"
|
||||
```
|
||||
|
||||
```shell
|
||||
codex --full-auto "create the fanciest todo-list app"
|
||||
codex --approval-mode full-auto "create the fanciest todo-list app"
|
||||
```
|
||||
|
||||
That's it - Codex will scaffold a file, run it inside a sandbox, install any
|
||||
That’s it – Codex will scaffold a file, run it inside a sandbox, install any
|
||||
missing dependencies, and show you the live result. Approve the changes and
|
||||
they'll be committed to your working directory.
|
||||
they’ll be committed to your working directory.
|
||||
|
||||
---
|
||||
|
||||
## Why Codex?
|
||||
## Why Codex?
|
||||
|
||||
Codex CLI is built for developers who already **live in the terminal** and want
|
||||
ChatGPT-level reasoning **plus** the power to actually run code, manipulate
|
||||
files, and iterate - all under version control. In short, it's _chat-driven
|
||||
ChatGPT‑level reasoning **plus** the power to actually run code, manipulate
|
||||
files, and iterate – all under version control. In short, it’s _chat‑driven
|
||||
development_ that understands and executes your repo.
|
||||
|
||||
- **Zero setup** - bring your OpenAI API key and it just works!
|
||||
- **Zero setup** — bring your OpenAI API key and it just works!
|
||||
- **Full auto-approval, while safe + secure** by running network-disabled and directory-sandboxed
|
||||
- **Multimodal** - pass in screenshots or diagrams to implement features ✨
|
||||
- **Multimodal** — pass in screenshots or diagrams to implement features ✨
|
||||
|
||||
And it's **fully open-source** so you can see and contribute to how it develops!
|
||||
|
||||
---
|
||||
|
||||
## Security model & permissions
|
||||
## Security Model & Permissions
|
||||
|
||||
Codex lets you decide _how much autonomy_ you want to grant the agent. The following options can be configured independently:
|
||||
Codex lets you decide _how much autonomy_ the agent receives and auto-approval policy via the
|
||||
`--approval-mode` flag (or the interactive onboarding prompt):
|
||||
|
||||
- [`approval_policy`](./codex-rs/config.md#approval_policy) determines when you should be prompted to approve whether Codex can execute a command
|
||||
- [`sandbox`](./codex-rs/config.md#sandbox) determines the _sandbox policy_ that Codex uses to execute untrusted commands
|
||||
| Mode | What the agent may do without asking | Still requires approval |
|
||||
| ------------------------- | -------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
|
||||
| **Suggest** <br>(default) | • Read any file in the repo | • **All** file writes/patches <br>• **Any** arbitrary shell commands (aside from reading files) |
|
||||
| **Auto Edit** | • Read **and** apply‑patch writes to files | • **All** shell commands |
|
||||
| **Full Auto** | • Read/write files <br>• Execute shell commands (network disabled, writes limited to your workdir) | – |
|
||||
|
||||
By default, Codex runs with `--ask-for-approval untrusted` and `--sandbox read-only`, which means that:
|
||||
In **Full Auto** every command is run **network‑disabled** and confined to the
|
||||
current working directory (plus temporary files) for defense‑in‑depth. Codex
|
||||
will also show a warning/confirmation if you start in **auto‑edit** or
|
||||
**full‑auto** while the directory is _not_ tracked by Git, so you always have a
|
||||
safety net.
|
||||
|
||||
- The user is prompted to approve every command not on the set of "trusted" commands built into Codex (`cat`, `ls`, etc.)
|
||||
- Approved commands are run outside of a sandbox because user approval implies "trust," in this case.
|
||||
|
||||
Running Codex with the `--full-auto` convenience flag changes the configuration to `--ask-for-approval on-failure` and `--sandbox workspace-write`, which means that:
|
||||
|
||||
- Codex does not initially ask for user approval before running an individual command.
|
||||
- Though when it runs a command, it is run under a sandbox in which:
|
||||
- It can read any file on the system.
|
||||
- It can only write files under the current directory (or the directory specified via `--cd`).
|
||||
- Network requests are completely disabled.
|
||||
- Only if the command exits with a non-zero exit code will it ask the user for approval. If granted, it will re-attempt the command outside of the sandbox. (A common case is when Codex cannot `npm install` a dependency because that requires network access.)
|
||||
|
||||
Again, these two options can be configured independently. For example, if you want Codex to perform an "exploration" where you are happy for it to read anything it wants but you never want to be prompted, you could run Codex with `--ask-for-approval never` and `--sandbox read-only`.
|
||||
Coming soon: you’ll be able to whitelist specific commands to auto‑execute with
|
||||
the network enabled, once we’re confident in additional safeguards.
|
||||
|
||||
### Platform sandboxing details
|
||||
|
||||
The mechanism Codex uses to implement the sandbox policy depends on your OS:
|
||||
The hardening mechanism Codex uses depends on your OS:
|
||||
|
||||
- **macOS 12+** uses **Apple Seatbelt** and runs commands using `sandbox-exec` with a profile (`-p`) that corresponds to the `--sandbox` that was specified.
|
||||
- **Linux** uses a combination of Landlock/seccomp APIs to enforce the `sandbox` configuration.
|
||||
- **macOS 12+** – commands are wrapped with **Apple Seatbelt** (`sandbox-exec`).
|
||||
|
||||
Note that when running Linux in a containerized environment such as Docker, sandboxing may not work if the host/container configuration does not support the necessary Landlock/seccomp APIs. In such cases, we recommend configuring your Docker container so that it provides the sandbox guarantees you are looking for and then running `codex` with `--sandbox danger-full-access` (or, more simply, the `--dangerously-bypass-approvals-and-sandbox` flag) within your container.
|
||||
- Everything is placed in a read‑only jail except for a small set of
|
||||
writable roots (`$PWD`, `$TMPDIR`, `~/.codex`, etc.).
|
||||
- Outbound network is _fully blocked_ by default – even if a child process
|
||||
tries to `curl` somewhere it will fail.
|
||||
|
||||
- **Linux** – there is no sandboxing by default.
|
||||
We recommend using Docker for sandboxing, where Codex launches itself inside a **minimal
|
||||
container image** and mounts your repo _read/write_ at the same path. A
|
||||
custom `iptables`/`ipset` firewall script denies all egress except the
|
||||
OpenAI API. This gives you deterministic, reproducible runs without needing
|
||||
root on the host. You can use the [`run_in_container.sh`](./codex-cli/scripts/run_in_container.sh) script to set up the sandbox.
|
||||
|
||||
---
|
||||
|
||||
## System requirements
|
||||
## System Requirements
|
||||
|
||||
| Requirement | Details |
|
||||
| --------------------------- | --------------------------------------------------------------- |
|
||||
| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
|
||||
| Git (optional, recommended) | 2.23+ for built-in PR helpers |
|
||||
| RAM | 4-GB minimum (8-GB recommended) |
|
||||
| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
|
||||
| Node.js | **22 or newer** (LTS recommended) |
|
||||
| Git (optional, recommended) | 2.23+ for built‑in PR helpers |
|
||||
| RAM | 4‑GB minimum (8‑GB recommended) |
|
||||
|
||||
> Never run `sudo npm install -g`; fix npm permissions instead.
|
||||
|
||||
---
|
||||
|
||||
## CLI reference
|
||||
## CLI Reference
|
||||
|
||||
| Command | Purpose | Example |
|
||||
| ------------------ | ---------------------------------- | ------------------------------- |
|
||||
| `codex` | Interactive TUI | `codex` |
|
||||
| `codex "..."` | Initial prompt for interactive TUI | `codex "fix lint errors"` |
|
||||
| `codex exec "..."` | Non-interactive "automation mode" | `codex exec "explain utils.ts"` |
|
||||
| Command | Purpose | Example |
|
||||
| ------------------------------------ | ----------------------------------- | ------------------------------------ |
|
||||
| `codex` | Interactive REPL | `codex` |
|
||||
| `codex "…"` | Initial prompt for interactive REPL | `codex "fix lint errors"` |
|
||||
| `codex -q "…"` | Non‑interactive "quiet mode" | `codex -q --json "explain utils.ts"` |
|
||||
| `codex completion <bash\|zsh\|fish>` | Print shell completion script | `codex completion bash` |
|
||||
|
||||
Key flags: `--model/-m`, `--ask-for-approval/-a`.
|
||||
Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`.
|
||||
|
||||
---
|
||||
|
||||
## Memory & project docs
|
||||
## Memory & Project Docs
|
||||
|
||||
You can give Codex extra instructions and guidance using `AGENTS.md` files. Codex looks for `AGENTS.md` files in the following places, and merges them top-down:
|
||||
Codex merges Markdown instructions in this order:
|
||||
|
||||
1. `~/.codex/AGENTS.md` - personal global guidance
|
||||
2. `AGENTS.md` at repo root - shared project notes
|
||||
3. `AGENTS.md` in the current working directory - sub-folder/feature specifics
|
||||
1. `~/.codex/instructions.md` – personal global guidance
|
||||
2. `codex.md` at repo root – shared project notes
|
||||
3. `codex.md` in cwd – sub‑package specifics
|
||||
|
||||
Disable with `--no-project-doc` or `CODEX_DISABLE_PROJECT_DOC=1`.
|
||||
|
||||
---
|
||||
|
||||
## Non-interactive / CI mode
|
||||
## Non‑interactive / CI mode
|
||||
|
||||
Run Codex head-less in pipelines. Example GitHub Action step:
|
||||
Run Codex head‑less in pipelines. Example GitHub Action step:
|
||||
|
||||
```yaml
|
||||
- name: Update changelog via Codex
|
||||
run: |
|
||||
npm install -g @openai/codex
|
||||
export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}"
|
||||
codex exec --full-auto "update CHANGELOG for next release"
|
||||
codex -a auto-edit --quiet "update CHANGELOG for next release"
|
||||
```
|
||||
|
||||
## Model Context Protocol (MCP)
|
||||
Set `CODEX_QUIET_MODE=1` to silence interactive UI noise.
|
||||
|
||||
The Codex CLI can be configured to leverage MCP servers by defining an [`mcp_servers`](./codex-rs/config.md#mcp_servers) section in `~/.codex/config.toml`. It is intended to mirror how tools such as Claude and Cursor define `mcpServers` in their respective JSON config files, though the Codex format is slightly different since it uses TOML rather than JSON, e.g.:
|
||||
## Tracing / Verbose Logging
|
||||
|
||||
```toml
|
||||
# IMPORTANT: the top-level key is `mcp_servers` rather than `mcpServers`.
|
||||
[mcp_servers.server-name]
|
||||
command = "npx"
|
||||
args = ["-y", "mcp-server"]
|
||||
env = { "API_KEY" = "value" }
|
||||
Setting the environment variable `DEBUG=true` prints full API request and response details:
|
||||
|
||||
```shell
|
||||
DEBUG=true codex
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> It is somewhat experimental, but the Codex CLI can also be run as an MCP _server_ via `codex mcp`. If you launch it with an MCP client such as `npx @modelcontextprotocol/inspector codex mcp` and send it a `tools/list` request, you will see that there is only one tool, `codex`, that accepts a grab-bag of inputs, including a catch-all `config` map for anything you might want to override. Feel free to play around with it and provide feedback via GitHub issues.
|
||||
|
||||
## Tracing / verbose logging
|
||||
|
||||
Because Codex is written in Rust, it honors the `RUST_LOG` environment variable to configure its logging behavior.
|
||||
|
||||
The TUI defaults to `RUST_LOG=codex_core=info,codex_tui=info` and log messages are written to `~/.codex/log/codex-tui.log`, so you can leave the following running in a separate terminal to monitor log messages as they are written:
|
||||
|
||||
```
|
||||
tail -F ~/.codex/log/codex-tui.log
|
||||
```
|
||||
|
||||
By comparison, the non-interactive mode (`codex exec`) defaults to `RUST_LOG=error`, but messages are printed inline, so there is no need to monitor a separate file.
|
||||
|
||||
See the Rust documentation on [`RUST_LOG`](https://docs.rs/env_logger/latest/env_logger/#enabling-logging) for more information on the configuration options.
|
||||
|
||||
---
|
||||
|
||||
## Recipes
|
||||
|
||||
Below are a few bite-size examples you can copy-paste. Replace the text in quotes with your own task. See the [prompting guide](https://github.com/openai/codex/blob/main/codex-cli/examples/prompting_guide.md) for more tips and usage patterns.
|
||||
Below are a few bite‑size examples you can copy‑paste. Replace the text in quotes with your own task. See the [prompting guide](https://github.com/openai/codex/blob/main/codex-cli/examples/prompting_guide.md) for more tips and usage patterns.
|
||||
|
||||
| ✨ | What you type | What happens |
|
||||
| --- | ------------------------------------------------------------------------------- | -------------------------------------------------------------------------- |
|
||||
| 1 | `codex "Refactor the Dashboard component to React Hooks"` | Codex rewrites the class component, runs `npm test`, and shows the diff. |
|
||||
| 1 | `codex "Refactor the Dashboard component to React Hooks"` | Codex rewrites the class component, runs `npm test`, and shows the diff. |
|
||||
| 2 | `codex "Generate SQL migrations for adding a users table"` | Infers your ORM, creates migration files, and runs them in a sandboxed DB. |
|
||||
| 3 | `codex "Write unit tests for utils/date.ts"` | Generates tests, executes them, and iterates until they pass. |
|
||||
| 4 | `codex "Bulk-rename *.jpeg -> *.jpg with git mv"` | Safely renames files and updates imports/usages. |
|
||||
| 5 | `codex "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step-by-step human explanation. |
|
||||
| 4 | `codex "Bulk‑rename *.jpeg → *.jpg with git mv"` | Safely renames files and updates imports/usages. |
|
||||
| 5 | `codex "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step‑by‑step human explanation. |
|
||||
| 6 | `codex "Carefully review this repo, and propose 3 high impact well-scoped PRs"` | Suggests impactful PRs in the current codebase. |
|
||||
| 7 | `codex "Look for vulnerabilities and create a security review report"` | Finds and explains security bugs. |
|
||||
|
||||
@@ -323,65 +236,38 @@ Below are a few bite-size examples you can copy-paste. Replace the text in quote
|
||||
## Installation
|
||||
|
||||
<details open>
|
||||
<summary><strong>Install Codex CLI using your preferred package manager.</strong></summary>
|
||||
|
||||
From `brew` (recommended, downloads only the binary for your platform):
|
||||
<summary><strong>From npm (Recommended)</strong></summary>
|
||||
|
||||
```bash
|
||||
brew install codex
|
||||
npm install -g @openai/codex
|
||||
# or
|
||||
yarn global add @openai/codex
|
||||
# or
|
||||
bun install -g @openai/codex
|
||||
```
|
||||
|
||||
From `npm` (generally more readily available, but downloads binaries for all supported platforms):
|
||||
|
||||
```bash
|
||||
npm i -g @openai/codex
|
||||
```
|
||||
|
||||
Or go to the [latest GitHub Release](https://github.com/openai/codex/releases/latest) and download the appropriate binary for your platform.
|
||||
|
||||
Admittedly, each GitHub Release contains many executables, but in practice, you likely want one of these:
|
||||
|
||||
- macOS
|
||||
- Apple Silicon/arm64: `codex-aarch64-apple-darwin.tar.gz`
|
||||
- x86_64 (older Mac hardware): `codex-x86_64-apple-darwin.tar.gz`
|
||||
- Linux
|
||||
- x86_64: `codex-x86_64-unknown-linux-musl.tar.gz`
|
||||
- arm64: `codex-aarch64-unknown-linux-musl.tar.gz`
|
||||
|
||||
Each archive contains a single entry with the platform baked into the name (e.g., `codex-x86_64-unknown-linux-musl`), so you likely want to rename it to `codex` after extracting it.
|
||||
|
||||
### DotSlash
|
||||
|
||||
The GitHub Release also contains a [DotSlash](https://dotslash-cli.com/) file for the Codex CLI named `codex`. Using a DotSlash file makes it possible to make a lightweight commit to source control to ensure all contributors use the same version of an executable, regardless of what platform they use for development.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Build from source</strong></summary>
|
||||
<summary><strong>Build from source</strong></summary>
|
||||
|
||||
```bash
|
||||
# Clone the repository and navigate to the root of the Cargo workspace.
|
||||
# Clone the repository and navigate to the CLI package
|
||||
git clone https://github.com/openai/codex.git
|
||||
cd codex/codex-rs
|
||||
cd codex/codex-cli
|
||||
|
||||
# Install the Rust toolchain, if necessary.
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
source "$HOME/.cargo/env"
|
||||
rustup component add rustfmt
|
||||
rustup component add clippy
|
||||
# Install dependencies and build
|
||||
npm install
|
||||
npm run build
|
||||
|
||||
# Build Codex.
|
||||
cargo build
|
||||
# Get the usage and the options
|
||||
node ./dist/cli.js --help
|
||||
|
||||
# Launch the TUI with a sample prompt.
|
||||
cargo run --bin codex -- "explain this codebase to me"
|
||||
# Run the locally‑built CLI directly
|
||||
node ./dist/cli.js
|
||||
|
||||
# After making changes, ensure the code is clean.
|
||||
cargo fmt -- --config imports_granularity=Item
|
||||
cargo clippy --tests
|
||||
|
||||
# Run the tests.
|
||||
cargo test
|
||||
# Or link the command globally for convenience
|
||||
npm link
|
||||
```
|
||||
|
||||
</details>
|
||||
@@ -390,11 +276,22 @@ cargo test
|
||||
|
||||
## Configuration
|
||||
|
||||
Codex supports a rich set of configuration options documented in [`codex-rs/config.md`](./codex-rs/config.md).
|
||||
Codex looks for config files in **`~/.codex/`**.
|
||||
|
||||
By default, Codex loads its configuration from `~/.codex/config.toml`.
|
||||
```yaml
|
||||
# ~/.codex/config.yaml
|
||||
model: o4-mini # Default model
|
||||
fullAutoErrorMode: ask-user # or ignore-and-continue
|
||||
notify: true # Enable desktop notifications for responses
|
||||
```
|
||||
|
||||
Though `--config` can be used to set/override ad-hoc config values for individual invocations of `codex`.
|
||||
You can also define custom instructions:
|
||||
|
||||
```yaml
|
||||
# ~/.codex/instructions.md
|
||||
- Always respond with emojis
|
||||
- Only use git commands if I explicitly mention you should
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -429,38 +326,43 @@ Codex runs model-generated commands in a sandbox. If a proposed command or file
|
||||
<details>
|
||||
<summary>Does it work on Windows?</summary>
|
||||
|
||||
Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - Codex has been tested on macOS and Linux with Node 22.
|
||||
Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) – Codex has been tested on macOS and Linux with Node ≥ 22.
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## Zero data retention (ZDR) usage
|
||||
## Zero Data Retention (ZDR) Organization Limitation
|
||||
|
||||
Codex CLI **does** support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. If your OpenAI organization has Zero Data Retention enabled and you still encounter errors such as:
|
||||
> **Note:** Codex CLI does **not** currently support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled.
|
||||
|
||||
If your OpenAI organization has Zero Data Retention enabled, you may encounter errors such as:
|
||||
|
||||
```
|
||||
OpenAI rejected the request. Error details: Status: 400, Code: unsupported_parameter, Type: invalid_request_error, Message: 400 Previous response cannot be used for this organization due to Zero Data Retention.
|
||||
```
|
||||
|
||||
Ensure you are running `codex` with `--config disable_response_storage=true` or add this line to `~/.codex/config.toml` to avoid specifying the command line option each time:
|
||||
**Why?**
|
||||
|
||||
```toml
|
||||
disable_response_storage = true
|
||||
```
|
||||
- Codex CLI relies on the Responses API with `store:true` to enable internal reasoning steps.
|
||||
- As noted in the [docs](https://platform.openai.com/docs/guides/your-data#responses-api), the Responses API requires a 30-day retention period by default, or when the store parameter is set to true.
|
||||
- ZDR organizations cannot use `store:true`, so requests will fail.
|
||||
|
||||
See [the configuration documentation on `disable_response_storage`](./codex-rs/config.md#disable_response_storage) for details.
|
||||
**What can I do?**
|
||||
|
||||
- If you are part of a ZDR organization, Codex CLI will not work until support is added.
|
||||
- We are tracking this limitation and will update the documentation if support becomes available.
|
||||
|
||||
---
|
||||
|
||||
## Codex open source fund
|
||||
## Funding Opportunity
|
||||
|
||||
We're excited to launch a **$1 million initiative** supporting open source projects that use Codex CLI and other OpenAI models.
|
||||
We’re excited to launch a **$1 million initiative** supporting open source projects that use Codex CLI and other OpenAI models.
|
||||
|
||||
- Grants are awarded up to **$25,000** API credits.
|
||||
- Grants are awarded in **$25,000** API credit increments.
|
||||
- Applications are reviewed **on a rolling basis**.
|
||||
|
||||
**Interested? [Apply here](https://openai.com/form/codex-open-source-fund/).**
|
||||
**Interested? [Apply here](https://openai.com/form/codex-open-source-fund/).**
|
||||
|
||||
---
|
||||
|
||||
@@ -468,96 +370,153 @@ We're excited to launch a **$1 million initiative** supporting open source proje
|
||||
|
||||
This project is under active development and the code will likely change pretty significantly. We'll update this message once that's complete!
|
||||
|
||||
More broadly we welcome contributions - whether you are opening your very first pull request or you're a seasoned maintainer. At the same time we care about reliability and long-term maintainability, so the bar for merging code is intentionally **high**. The guidelines below spell out what "high-quality" means in practice and should make the whole process transparent and friendly.
|
||||
More broadly we welcome contributions – whether you are opening your very first pull request or you’re a seasoned maintainer. At the same time we care about reliability and long‑term maintainability, so the bar for merging code is intentionally **high**. The guidelines below spell out what “high‑quality” means in practice and should make the whole process transparent and friendly.
|
||||
|
||||
### Development workflow
|
||||
|
||||
- Create a _topic branch_ from `main` - e.g. `feat/interactive-prompt`.
|
||||
- Create a _topic branch_ from `main` – e.g. `feat/interactive-prompt`.
|
||||
- Keep your changes focused. Multiple unrelated fixes should be opened as separate PRs.
|
||||
- Following the [development setup](#development-workflow) instructions above, ensure your change is free of lint warnings and test failures.
|
||||
- Use `npm run test:watch` during development for super‑fast feedback.
|
||||
- We use **Vitest** for unit tests, **ESLint** + **Prettier** for style, and **TypeScript** for type‑checking.
|
||||
- Before pushing, run the full test/type/lint suite:
|
||||
|
||||
### Writing high-impact code changes
|
||||
### Git Hooks with Husky
|
||||
|
||||
This project uses [Husky](https://typicode.github.io/husky/) to enforce code quality checks:
|
||||
|
||||
- **Pre-commit hook**: Automatically runs lint-staged to format and lint files before committing
|
||||
- **Pre-push hook**: Runs tests and type checking before pushing to the remote
|
||||
|
||||
These hooks help maintain code quality and prevent pushing code with failing tests. For more details, see [HUSKY.md](./codex-cli/HUSKY.md).
|
||||
|
||||
```bash
|
||||
npm test && npm run lint && npm run typecheck
|
||||
```
|
||||
|
||||
- If you have **not** yet signed the Contributor License Agreement (CLA), add a PR comment containing the exact text
|
||||
|
||||
```text
|
||||
I have read the CLA Document and I hereby sign the CLA
|
||||
```
|
||||
|
||||
The CLA‑Assistant bot will turn the PR status green once all authors have signed.
|
||||
|
||||
```bash
|
||||
# Watch mode (tests rerun on change)
|
||||
npm run test:watch
|
||||
|
||||
# Type‑check without emitting files
|
||||
npm run typecheck
|
||||
|
||||
# Automatically fix lint + prettier issues
|
||||
npm run lint:fix
|
||||
npm run format:fix
|
||||
```
|
||||
|
||||
#### Nix Flake Development
|
||||
|
||||
Prerequisite: Nix >= 2.4 with flakes enabled (`experimental-features = nix-command flakes` in `~/.config/nix/nix.conf`).
|
||||
|
||||
Enter a Nix development shell:
|
||||
|
||||
```bash
|
||||
nix develop
|
||||
```
|
||||
|
||||
This shell includes Node.js, installs dependencies, builds the CLI, and provides a `codex` command alias.
|
||||
|
||||
Build and run the CLI directly:
|
||||
|
||||
```bash
|
||||
nix build
|
||||
./result/bin/codex --help
|
||||
```
|
||||
|
||||
Run the CLI via the flake app:
|
||||
|
||||
```bash
|
||||
nix run .#codex
|
||||
```
|
||||
|
||||
### Writing high‑impact code changes
|
||||
|
||||
1. **Start with an issue.** Open a new one or comment on an existing discussion so we can agree on the solution before code is written.
|
||||
2. **Add or update tests.** Every new feature or bug-fix should come with test coverage that fails before your change and passes afterwards. 100% coverage is not required, but aim for meaningful assertions.
|
||||
3. **Document behaviour.** If your change affects user-facing behaviour, update the README, inline help (`codex --help`), or relevant example projects.
|
||||
2. **Add or update tests.** Every new feature or bug‑fix should come with test coverage that fails before your change and passes afterwards. 100 % coverage is not required, but aim for meaningful assertions.
|
||||
3. **Document behaviour.** If your change affects user‑facing behaviour, update the README, inline help (`codex --help`), or relevant example projects.
|
||||
4. **Keep commits atomic.** Each commit should compile and the tests should pass. This makes reviews and potential rollbacks easier.
|
||||
|
||||
### Opening a pull request
|
||||
|
||||
- Fill in the PR template (or include similar information) - **What? Why? How?**
|
||||
- Run **all** checks locally (`cargo test && cargo clippy --tests && cargo fmt -- --config imports_granularity=Item`). CI failures that could have been caught locally slow down the process.
|
||||
- Make sure your branch is up-to-date with `main` and that you have resolved merge conflicts.
|
||||
- Mark the PR as **Ready for review** only when you believe it is in a merge-able state.
|
||||
- Fill in the PR template (or include similar information) – **What? Why? How?**
|
||||
- Run **all** checks locally (`npm test && npm run lint && npm run typecheck`). CI failures that could have been caught locally slow down the process.
|
||||
- Make sure your branch is up‑to‑date with `main` and that you have resolved merge conflicts.
|
||||
- Mark the PR as **Ready for review** only when you believe it is in a merge‑able state.
|
||||
|
||||
### Review process
|
||||
|
||||
1. One maintainer will be assigned as a primary reviewer.
|
||||
2. We may ask for changes - please do not take this personally. We value the work, we just also value consistency and long-term maintainability.
|
||||
3. When there is consensus that the PR meets the bar, a maintainer will squash-and-merge.
|
||||
2. We may ask for changes – please do not take this personally. We value the work, we just also value consistency and long‑term maintainability.
|
||||
3. When there is consensus that the PR meets the bar, a maintainer will squash‑and‑merge.
|
||||
|
||||
### Community values
|
||||
|
||||
- **Be kind and inclusive.** Treat others with respect; we follow the [Contributor Covenant](https://www.contributor-covenant.org/).
|
||||
- **Assume good intent.** Written communication is hard - err on the side of generosity.
|
||||
- **Assume good intent.** Written communication is hard – err on the side of generosity.
|
||||
- **Teach & learn.** If you spot something confusing, open an issue or PR with improvements.
|
||||
|
||||
### Getting help
|
||||
|
||||
If you run into problems setting up the project, would like feedback on an idea, or just want to say _hi_ - please open a Discussion or jump into the relevant issue. We are happy to help.
|
||||
If you run into problems setting up the project, would like feedback on an idea, or just want to say _hi_ – please open a Discussion or jump into the relevant issue. We are happy to help.
|
||||
|
||||
Together we can make Codex CLI an incredible tool. **Happy hacking!** :rocket:
|
||||
|
||||
### Contributor license agreement (CLA)
|
||||
### Contributor License Agreement (CLA)
|
||||
|
||||
All contributors **must** accept the CLA. The process is lightweight:
|
||||
|
||||
1. Open your pull request.
|
||||
2. Paste the following comment (or reply `recheck` if you've signed before):
|
||||
2. Paste the following comment (or reply `recheck` if you’ve signed before):
|
||||
|
||||
```text
|
||||
I have read the CLA Document and I hereby sign the CLA
|
||||
```
|
||||
|
||||
3. The CLA-Assistant bot records your signature in the repo and marks the status check as passed.
|
||||
3. The CLA‑Assistant bot records your signature in the repo and marks the status check as passed.
|
||||
|
||||
No special Git commands, email attachments, or commit footers required.
|
||||
|
||||
#### Quick fixes
|
||||
|
||||
| Scenario | Command |
|
||||
| ----------------- | ------------------------------------------------ |
|
||||
| Amend last commit | `git commit --amend -s --no-edit && git push -f` |
|
||||
| Scenario | Command |
|
||||
| ----------------- | ----------------------------------------------------------------------------------------- |
|
||||
| Amend last commit | `git commit --amend -s --no-edit && git push -f` |
|
||||
| GitHub UI only | Edit the commit message in the PR → add<br>`Signed-off-by: Your Name <email@example.com>` |
|
||||
|
||||
The **DCO check** blocks merges until every commit in the PR carries the footer (with squash this is just the one).
|
||||
|
||||
### Releasing `codex`
|
||||
|
||||
_For admins only._
|
||||
To publish a new version of the CLI, run the release scripts defined in `codex-cli/package.json`:
|
||||
|
||||
Make sure you are on `main` and have no local changes. Then run:
|
||||
|
||||
```shell
|
||||
VERSION=0.2.0 # Can also be 0.2.0-alpha.1 or any valid Rust version.
|
||||
./codex-rs/scripts/create_github_release.sh "$VERSION"
|
||||
```
|
||||
|
||||
This will make a local commit on top of `main` with `version` set to `$VERSION` in `codex-rs/Cargo.toml` (note that on `main`, we leave the version as `version = "0.0.0"`).
|
||||
|
||||
This will push the commit using the tag `rust-v${VERSION}`, which in turn kicks off [the release workflow](.github/workflows/rust-release.yml). This will create a new GitHub Release named `$VERSION`.
|
||||
|
||||
If everything looks good in the generated GitHub Release, uncheck the **pre-release** box so it is the latest release.
|
||||
|
||||
Create a PR to update [`Formula/c/codex.rb`](https://github.com/Homebrew/homebrew-core/blob/main/Formula/c/codex.rb) on Homebrew.
|
||||
1. Open the `codex-cli` directory
|
||||
2. Make sure you're on a branch like `git checkout -b bump-version`
|
||||
3. Bump the version and `CLI_VERSION` to current datetime: `npm run release:version`
|
||||
4. Commit the version bump (with DCO sign-off):
|
||||
```bash
|
||||
git add codex-cli/src/utils/session.ts codex-cli/package.json
|
||||
git commit -s -m "chore(release): codex-cli v$(node -p \"require('./codex-cli/package.json').version\")"
|
||||
```
|
||||
5. Copy README, build, and publish to npm: `npm run release`
|
||||
6. Push to branch: `git push origin HEAD`
|
||||
|
||||
---
|
||||
|
||||
## Security & responsible AI
|
||||
## Security & Responsible AI
|
||||
|
||||
Have you discovered a vulnerability or have concerns about model output? Please e-mail **security@openai.com** and we will respond promptly.
|
||||
Have you discovered a vulnerability or have concerns about model output? Please e‑mail **security@openai.com** and we will respond promptly.
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
||||
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
||||
|
||||
@@ -35,10 +35,10 @@ conventional_commits = true
|
||||
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "<!-- 0 -->🚀 Features" },
|
||||
{ message = "^fix", group = "<!-- 1 -->🪲 Bug Fixes" },
|
||||
{ message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
|
||||
{ message = "^bump", group = "<!-- 6 -->🛳️ Release" },
|
||||
# Fallback – skip anything that didn't match the above rules.
|
||||
{ message = ".*", group = "<!-- 10 -->💼 Other" },
|
||||
{ message = ".*", group = "<!-- 10 -->💼 Other", skip = true },
|
||||
]
|
||||
|
||||
filter_unconventional = false
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module.exports = {
|
||||
root: true,
|
||||
env: { browser: true, node: true, es2020: true },
|
||||
env: { browser: true, es2020: true },
|
||||
extends: [
|
||||
"eslint:recommended",
|
||||
"plugin:@typescript-eslint/recommended",
|
||||
|
||||
7
codex-cli/.gitignore
vendored
7
codex-cli/.gitignore
vendored
@@ -1,7 +0,0 @@
|
||||
# Added by ./scripts/install_native_deps.sh
|
||||
/bin/codex-aarch64-apple-darwin
|
||||
/bin/codex-aarch64-unknown-linux-musl
|
||||
/bin/codex-linux-sandbox-arm64
|
||||
/bin/codex-linux-sandbox-x64
|
||||
/bin/codex-x86_64-apple-darwin
|
||||
/bin/codex-x86_64-unknown-linux-musl
|
||||
32
codex-cli/.husky/_/husky.sh
Normal file
32
codex-cli/.husky/_/husky.sh
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env sh
|
||||
if [ -z "$husky_skip_init" ]; then
|
||||
debug () {
|
||||
if [ "$HUSKY_DEBUG" = "1" ]; then
|
||||
echo "husky (debug) - $1"
|
||||
fi
|
||||
}
|
||||
|
||||
readonly hook_name="$(basename -- "$0")"
|
||||
debug "starting $hook_name..."
|
||||
|
||||
if [ "$HUSKY" = "0" ]; then
|
||||
debug "HUSKY env variable is set to 0, skipping hook"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -f ~/.huskyrc ]; then
|
||||
debug "sourcing ~/.huskyrc"
|
||||
. ~/.huskyrc
|
||||
fi
|
||||
|
||||
readonly husky_skip_init=1
|
||||
export husky_skip_init
|
||||
sh -e "$0" "$@"
|
||||
exitCode="$?"
|
||||
|
||||
if [ $exitCode != 0 ]; then
|
||||
echo "husky - $hook_name hook exited with code $exitCode (error)"
|
||||
fi
|
||||
|
||||
exit $exitCode
|
||||
fi
|
||||
5
codex-cli/.husky/pre-commit
Normal file
5
codex-cli/.husky/pre-commit
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
# Run lint-staged to check files that are about to be committed
|
||||
npm run pre-commit
|
||||
5
codex-cli/.husky/pre-push
Normal file
5
codex-cli/.husky/pre-push
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
# Run tests and type checking before pushing
|
||||
npm test && npm run typecheck
|
||||
9
codex-cli/.lintstagedrc.json
Normal file
9
codex-cli/.lintstagedrc.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"*.{ts,tsx}": [
|
||||
"eslint --fix",
|
||||
"prettier --write"
|
||||
],
|
||||
"*.{json,md,yml}": [
|
||||
"prettier --write"
|
||||
]
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM node:24-slim
|
||||
FROM node:20-slim
|
||||
|
||||
ARG TZ
|
||||
ENV TZ="$TZ"
|
||||
@@ -20,6 +20,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
less \
|
||||
man-db \
|
||||
procps \
|
||||
sudo \
|
||||
unzip \
|
||||
ripgrep \
|
||||
zsh \
|
||||
@@ -46,14 +47,10 @@ RUN npm install -g codex.tgz \
|
||||
&& rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/tests \
|
||||
&& rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/docs
|
||||
|
||||
# Inside the container we consider the environment already sufficiently locked
|
||||
# down, therefore instruct Codex CLI to allow running without sandboxing.
|
||||
ENV CODEX_UNSAFE_ALLOW_NO_SANDBOX=1
|
||||
|
||||
# Copy and set up firewall script as root.
|
||||
USER root
|
||||
# Copy and set up firewall script
|
||||
COPY scripts/init_firewall.sh /usr/local/bin/
|
||||
RUN chmod 500 /usr/local/bin/init_firewall.sh
|
||||
|
||||
# Drop back to non-root.
|
||||
USER root
|
||||
RUN chmod +x /usr/local/bin/init_firewall.sh && \
|
||||
echo "node ALL=(root) NOPASSWD: /usr/local/bin/init_firewall.sh" > /etc/sudoers.d/node-firewall && \
|
||||
chmod 0440 /etc/sudoers.d/node-firewall
|
||||
USER node
|
||||
|
||||
@@ -1,736 +0,0 @@
|
||||
<h1 align="center">OpenAI Codex CLI</h1>
|
||||
<p align="center">Lightweight coding agent that runs in your terminal</p>
|
||||
|
||||
<p align="center"><code>npm i -g @openai/codex</code></p>
|
||||
|
||||
> [!IMPORTANT]
|
||||
> This is the documentation for the _legacy_ TypeScript implementation of the Codex CLI. It has been superseded by the _Rust_ implementation. See the [README in the root of the Codex repository](https://github.com/openai/codex/blob/main/README.md) for details.
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
<details>
|
||||
<summary><strong>Table of contents</strong></summary>
|
||||
|
||||
<!-- Begin ToC -->
|
||||
|
||||
- [Experimental technology disclaimer](#experimental-technology-disclaimer)
|
||||
- [Quickstart](#quickstart)
|
||||
- [Why Codex?](#why-codex)
|
||||
- [Security model & permissions](#security-model--permissions)
|
||||
- [Platform sandboxing details](#platform-sandboxing-details)
|
||||
- [System requirements](#system-requirements)
|
||||
- [CLI reference](#cli-reference)
|
||||
- [Memory & project docs](#memory--project-docs)
|
||||
- [Non-interactive / CI mode](#non-interactive--ci-mode)
|
||||
- [Tracing / verbose logging](#tracing--verbose-logging)
|
||||
- [Recipes](#recipes)
|
||||
- [Installation](#installation)
|
||||
- [Configuration guide](#configuration-guide)
|
||||
- [Basic configuration parameters](#basic-configuration-parameters)
|
||||
- [Custom AI provider configuration](#custom-ai-provider-configuration)
|
||||
- [History configuration](#history-configuration)
|
||||
- [Configuration examples](#configuration-examples)
|
||||
- [Full configuration example](#full-configuration-example)
|
||||
- [Custom instructions](#custom-instructions)
|
||||
- [Environment variables setup](#environment-variables-setup)
|
||||
- [FAQ](#faq)
|
||||
- [Zero data retention (ZDR) usage](#zero-data-retention-zdr-usage)
|
||||
- [Codex open source fund](#codex-open-source-fund)
|
||||
- [Contributing](#contributing)
|
||||
- [Development workflow](#development-workflow)
|
||||
- [Git hooks with Husky](#git-hooks-with-husky)
|
||||
- [Debugging](#debugging)
|
||||
- [Writing high-impact code changes](#writing-high-impact-code-changes)
|
||||
- [Opening a pull request](#opening-a-pull-request)
|
||||
- [Review process](#review-process)
|
||||
- [Community values](#community-values)
|
||||
- [Getting help](#getting-help)
|
||||
- [Contributor license agreement (CLA)](#contributor-license-agreement-cla)
|
||||
- [Quick fixes](#quick-fixes)
|
||||
- [Releasing `codex`](#releasing-codex)
|
||||
- [Alternative build options](#alternative-build-options)
|
||||
- [Nix flake development](#nix-flake-development)
|
||||
- [Security & responsible AI](#security--responsible-ai)
|
||||
- [License](#license)
|
||||
|
||||
<!-- End ToC -->
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## Experimental technology disclaimer
|
||||
|
||||
Codex CLI is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome:
|
||||
|
||||
- Bug reports
|
||||
- Feature requests
|
||||
- Pull requests
|
||||
- Good vibes
|
||||
|
||||
Help us improve by filing issues or submitting PRs (see the section below for how to contribute)!
|
||||
|
||||
## Quickstart
|
||||
|
||||
Install globally:
|
||||
|
||||
```shell
|
||||
npm install -g @openai/codex
|
||||
```
|
||||
|
||||
Next, set your OpenAI API key as an environment variable:
|
||||
|
||||
```shell
|
||||
export OPENAI_API_KEY="your-api-key-here"
|
||||
```
|
||||
|
||||
> **Note:** This command sets the key only for your current terminal session. You can add the `export` line to your shell's configuration file (e.g., `~/.zshrc`) but we recommend setting for the session. **Tip:** You can also place your API key into a `.env` file at the root of your project:
|
||||
>
|
||||
> ```env
|
||||
> OPENAI_API_KEY=your-api-key-here
|
||||
> ```
|
||||
>
|
||||
> The CLI will automatically load variables from `.env` (via `dotenv/config`).
|
||||
|
||||
<details>
|
||||
<summary><strong>Use <code>--provider</code> to use other models</strong></summary>
|
||||
|
||||
> Codex also allows you to use other providers that support the OpenAI Chat Completions API. You can set the provider in the config file or use the `--provider` flag. The possible options for `--provider` are:
|
||||
>
|
||||
> - openai (default)
|
||||
> - openrouter
|
||||
> - azure
|
||||
> - gemini
|
||||
> - ollama
|
||||
> - mistral
|
||||
> - deepseek
|
||||
> - xai
|
||||
> - groq
|
||||
> - arceeai
|
||||
> - any other provider that is compatible with the OpenAI API
|
||||
>
|
||||
> If you use a provider other than OpenAI, you will need to set the API key for the provider in the config file or in the environment variable as:
|
||||
>
|
||||
> ```shell
|
||||
> export <provider>_API_KEY="your-api-key-here"
|
||||
> ```
|
||||
>
|
||||
> If you use a provider not listed above, you must also set the base URL for the provider:
|
||||
>
|
||||
> ```shell
|
||||
> export <provider>_BASE_URL="https://your-provider-api-base-url"
|
||||
> ```
|
||||
|
||||
</details>
|
||||
<br />
|
||||
|
||||
Run interactively:
|
||||
|
||||
```shell
|
||||
codex
|
||||
```
|
||||
|
||||
Or, run with a prompt as input (and optionally in `Full Auto` mode):
|
||||
|
||||
```shell
|
||||
codex "explain this codebase to me"
|
||||
```
|
||||
|
||||
```shell
|
||||
codex --approval-mode full-auto "create the fanciest todo-list app"
|
||||
```
|
||||
|
||||
That's it - Codex will scaffold a file, run it inside a sandbox, install any
|
||||
missing dependencies, and show you the live result. Approve the changes and
|
||||
they'll be committed to your working directory.
|
||||
|
||||
---
|
||||
|
||||
## Why Codex?
|
||||
|
||||
Codex CLI is built for developers who already **live in the terminal** and want
|
||||
ChatGPT-level reasoning **plus** the power to actually run code, manipulate
|
||||
files, and iterate - all under version control. In short, it's _chat-driven
|
||||
development_ that understands and executes your repo.
|
||||
|
||||
- **Zero setup** - bring your OpenAI API key and it just works!
|
||||
- **Full auto-approval, while safe + secure** by running network-disabled and directory-sandboxed
|
||||
- **Multimodal** - pass in screenshots or diagrams to implement features ✨
|
||||
|
||||
And it's **fully open-source** so you can see and contribute to how it develops!
|
||||
|
||||
---
|
||||
|
||||
## Security model & permissions
|
||||
|
||||
Codex lets you decide _how much autonomy_ the agent receives and auto-approval policy via the
|
||||
`--approval-mode` flag (or the interactive onboarding prompt):
|
||||
|
||||
| Mode | What the agent may do without asking | Still requires approval |
|
||||
| ------------------------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
|
||||
| **Suggest** <br>(default) | <li>Read any file in the repo | <li>**All** file writes/patches<li> **Any** arbitrary shell commands (aside from reading files) |
|
||||
| **Auto Edit** | <li>Read **and** apply-patch writes to files | <li>**All** shell commands |
|
||||
| **Full Auto** | <li>Read/write files <li> Execute shell commands (network disabled, writes limited to your workdir) | - |
|
||||
|
||||
In **Full Auto** every command is run **network-disabled** and confined to the
|
||||
current working directory (plus temporary files) for defense-in-depth. Codex
|
||||
will also show a warning/confirmation if you start in **auto-edit** or
|
||||
**full-auto** while the directory is _not_ tracked by Git, so you always have a
|
||||
safety net.
|
||||
|
||||
Coming soon: you'll be able to whitelist specific commands to auto-execute with
|
||||
the network enabled, once we're confident in additional safeguards.
|
||||
|
||||
### Platform sandboxing details
|
||||
|
||||
The hardening mechanism Codex uses depends on your OS:
|
||||
|
||||
- **macOS 12+** - commands are wrapped with **Apple Seatbelt** (`sandbox-exec`).
|
||||
|
||||
- Everything is placed in a read-only jail except for a small set of
|
||||
writable roots (`$PWD`, `$TMPDIR`, `~/.codex`, etc.).
|
||||
- Outbound network is _fully blocked_ by default - even if a child process
|
||||
tries to `curl` somewhere it will fail.
|
||||
|
||||
- **Linux** - there is no sandboxing by default.
|
||||
We recommend using Docker for sandboxing, where Codex launches itself inside a **minimal
|
||||
container image** and mounts your repo _read/write_ at the same path. A
|
||||
custom `iptables`/`ipset` firewall script denies all egress except the
|
||||
OpenAI API. This gives you deterministic, reproducible runs without needing
|
||||
root on the host. You can use the [`run_in_container.sh`](../codex-cli/scripts/run_in_container.sh) script to set up the sandbox.
|
||||
|
||||
---
|
||||
|
||||
## System requirements
|
||||
|
||||
| Requirement | Details |
|
||||
| --------------------------- | --------------------------------------------------------------- |
|
||||
| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
|
||||
| Node.js | **22 or newer** (LTS recommended) |
|
||||
| Git (optional, recommended) | 2.23+ for built-in PR helpers |
|
||||
| RAM | 4-GB minimum (8-GB recommended) |
|
||||
|
||||
> Never run `sudo npm install -g`; fix npm permissions instead.
|
||||
|
||||
---
|
||||
|
||||
## CLI reference
|
||||
|
||||
| Command | Purpose | Example |
|
||||
| ------------------------------------ | ----------------------------------- | ------------------------------------ |
|
||||
| `codex` | Interactive REPL | `codex` |
|
||||
| `codex "..."` | Initial prompt for interactive REPL | `codex "fix lint errors"` |
|
||||
| `codex -q "..."` | Non-interactive "quiet mode" | `codex -q --json "explain utils.ts"` |
|
||||
| `codex completion <bash\|zsh\|fish>` | Print shell completion script | `codex completion bash` |
|
||||
|
||||
Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`.
|
||||
|
||||
---
|
||||
|
||||
## Memory & project docs
|
||||
|
||||
You can give Codex extra instructions and guidance using `AGENTS.md` files. Codex looks for `AGENTS.md` files in the following places, and merges them top-down:
|
||||
|
||||
1. `~/.codex/AGENTS.md` - personal global guidance
|
||||
2. `AGENTS.md` at repo root - shared project notes
|
||||
3. `AGENTS.md` in the current working directory - sub-folder/feature specifics
|
||||
|
||||
Disable loading of these files with `--no-project-doc` or the environment variable `CODEX_DISABLE_PROJECT_DOC=1`.
|
||||
|
||||
---
|
||||
|
||||
## Non-interactive / CI mode
|
||||
|
||||
Run Codex head-less in pipelines. Example GitHub Action step:
|
||||
|
||||
```yaml
|
||||
- name: Update changelog via Codex
|
||||
run: |
|
||||
npm install -g @openai/codex
|
||||
export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}"
|
||||
codex -a auto-edit --quiet "update CHANGELOG for next release"
|
||||
```
|
||||
|
||||
Set `CODEX_QUIET_MODE=1` to silence interactive UI noise.
|
||||
|
||||
## Tracing / verbose logging
|
||||
|
||||
Setting the environment variable `DEBUG=true` prints full API request and response details:
|
||||
|
||||
```shell
|
||||
DEBUG=true codex
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Recipes
|
||||
|
||||
Below are a few bite-size examples you can copy-paste. Replace the text in quotes with your own task. See the [prompting guide](https://github.com/openai/codex/blob/main/codex-cli/examples/prompting_guide.md) for more tips and usage patterns.
|
||||
|
||||
| ✨ | What you type | What happens |
|
||||
| --- | ------------------------------------------------------------------------------- | -------------------------------------------------------------------------- |
|
||||
| 1 | `codex "Refactor the Dashboard component to React Hooks"` | Codex rewrites the class component, runs `npm test`, and shows the diff. |
|
||||
| 2 | `codex "Generate SQL migrations for adding a users table"` | Infers your ORM, creates migration files, and runs them in a sandboxed DB. |
|
||||
| 3 | `codex "Write unit tests for utils/date.ts"` | Generates tests, executes them, and iterates until they pass. |
|
||||
| 4 | `codex "Bulk-rename *.jpeg -> *.jpg with git mv"` | Safely renames files and updates imports/usages. |
|
||||
| 5 | `codex "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step-by-step human explanation. |
|
||||
| 6 | `codex "Carefully review this repo, and propose 3 high impact well-scoped PRs"` | Suggests impactful PRs in the current codebase. |
|
||||
| 7 | `codex "Look for vulnerabilities and create a security review report"` | Finds and explains security bugs. |
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
<details open>
|
||||
<summary><strong>From npm (Recommended)</strong></summary>
|
||||
|
||||
```bash
|
||||
npm install -g @openai/codex
|
||||
# or
|
||||
yarn global add @openai/codex
|
||||
# or
|
||||
bun install -g @openai/codex
|
||||
# or
|
||||
pnpm add -g @openai/codex
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Build from source</strong></summary>
|
||||
|
||||
```bash
|
||||
# Clone the repository and navigate to the CLI package
|
||||
git clone https://github.com/openai/codex.git
|
||||
cd codex/codex-cli
|
||||
|
||||
# Enable corepack
|
||||
corepack enable
|
||||
|
||||
# Install dependencies and build
|
||||
pnpm install
|
||||
pnpm build
|
||||
|
||||
# Linux-only: download prebuilt sandboxing binaries (requires gh and zstd).
|
||||
./scripts/install_native_deps.sh
|
||||
|
||||
# Get the usage and the options
|
||||
node ./dist/cli.js --help
|
||||
|
||||
# Run the locally-built CLI directly
|
||||
node ./dist/cli.js
|
||||
|
||||
# Or link the command globally for convenience
|
||||
pnpm link
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## Configuration guide
|
||||
|
||||
Codex configuration files can be placed in the `~/.codex/` directory, supporting both YAML and JSON formats.
|
||||
|
||||
### Basic configuration parameters
|
||||
|
||||
| Parameter | Type | Default | Description | Available Options |
|
||||
| ------------------- | ------- | ---------- | -------------------------------- | ---------------------------------------------------------------------------------------------- |
|
||||
| `model` | string | `o4-mini` | AI model to use | Any model name supporting OpenAI API |
|
||||
| `approvalMode` | string | `suggest` | AI assistant's permission mode | `suggest` (suggestions only)<br>`auto-edit` (automatic edits)<br>`full-auto` (fully automatic) |
|
||||
| `fullAutoErrorMode` | string | `ask-user` | Error handling in full-auto mode | `ask-user` (prompt for user input)<br>`ignore-and-continue` (ignore and proceed) |
|
||||
| `notify` | boolean | `true` | Enable desktop notifications | `true`/`false` |
|
||||
|
||||
### Custom AI provider configuration
|
||||
|
||||
In the `providers` object, you can configure multiple AI service providers. Each provider requires the following parameters:
|
||||
|
||||
| Parameter | Type | Description | Example |
|
||||
| --------- | ------ | --------------------------------------- | ----------------------------- |
|
||||
| `name` | string | Display name of the provider | `"OpenAI"` |
|
||||
| `baseURL` | string | API service URL | `"https://api.openai.com/v1"` |
|
||||
| `envKey` | string | Environment variable name (for API key) | `"OPENAI_API_KEY"` |
|
||||
|
||||
### History configuration
|
||||
|
||||
In the `history` object, you can configure conversation history settings:
|
||||
|
||||
| Parameter | Type | Description | Example Value |
|
||||
| ------------------- | ------- | ------------------------------------------------------ | ------------- |
|
||||
| `maxSize` | number | Maximum number of history entries to save | `1000` |
|
||||
| `saveHistory` | boolean | Whether to save history | `true` |
|
||||
| `sensitivePatterns` | array | Patterns of sensitive information to filter in history | `[]` |
|
||||
|
||||
### Configuration examples
|
||||
|
||||
1. YAML format (save as `~/.codex/config.yaml`):
|
||||
|
||||
```yaml
|
||||
model: o4-mini
|
||||
approvalMode: suggest
|
||||
fullAutoErrorMode: ask-user
|
||||
notify: true
|
||||
```
|
||||
|
||||
2. JSON format (save as `~/.codex/config.json`):
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "o4-mini",
|
||||
"approvalMode": "suggest",
|
||||
"fullAutoErrorMode": "ask-user",
|
||||
"notify": true
|
||||
}
|
||||
```
|
||||
|
||||
### Full configuration example
|
||||
|
||||
Below is a comprehensive example of `config.json` with multiple custom providers:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "o4-mini",
|
||||
"provider": "openai",
|
||||
"providers": {
|
||||
"openai": {
|
||||
"name": "OpenAI",
|
||||
"baseURL": "https://api.openai.com/v1",
|
||||
"envKey": "OPENAI_API_KEY"
|
||||
},
|
||||
"azure": {
|
||||
"name": "AzureOpenAI",
|
||||
"baseURL": "https://YOUR_PROJECT_NAME.openai.azure.com/openai",
|
||||
"envKey": "AZURE_OPENAI_API_KEY"
|
||||
},
|
||||
"openrouter": {
|
||||
"name": "OpenRouter",
|
||||
"baseURL": "https://openrouter.ai/api/v1",
|
||||
"envKey": "OPENROUTER_API_KEY"
|
||||
},
|
||||
"gemini": {
|
||||
"name": "Gemini",
|
||||
"baseURL": "https://generativelanguage.googleapis.com/v1beta/openai",
|
||||
"envKey": "GEMINI_API_KEY"
|
||||
},
|
||||
"ollama": {
|
||||
"name": "Ollama",
|
||||
"baseURL": "http://localhost:11434/v1",
|
||||
"envKey": "OLLAMA_API_KEY"
|
||||
},
|
||||
"mistral": {
|
||||
"name": "Mistral",
|
||||
"baseURL": "https://api.mistral.ai/v1",
|
||||
"envKey": "MISTRAL_API_KEY"
|
||||
},
|
||||
"deepseek": {
|
||||
"name": "DeepSeek",
|
||||
"baseURL": "https://api.deepseek.com",
|
||||
"envKey": "DEEPSEEK_API_KEY"
|
||||
},
|
||||
"xai": {
|
||||
"name": "xAI",
|
||||
"baseURL": "https://api.x.ai/v1",
|
||||
"envKey": "XAI_API_KEY"
|
||||
},
|
||||
"groq": {
|
||||
"name": "Groq",
|
||||
"baseURL": "https://api.groq.com/openai/v1",
|
||||
"envKey": "GROQ_API_KEY"
|
||||
},
|
||||
"arceeai": {
|
||||
"name": "ArceeAI",
|
||||
"baseURL": "https://conductor.arcee.ai/v1",
|
||||
"envKey": "ARCEEAI_API_KEY"
|
||||
}
|
||||
},
|
||||
"history": {
|
||||
"maxSize": 1000,
|
||||
"saveHistory": true,
|
||||
"sensitivePatterns": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Custom instructions
|
||||
|
||||
You can create a `~/.codex/AGENTS.md` file to define custom guidance for the agent:
|
||||
|
||||
```markdown
|
||||
- Always respond with emojis
|
||||
- Only use git commands when explicitly requested
|
||||
```
|
||||
|
||||
### Environment variables setup
|
||||
|
||||
For each AI provider, you need to set the corresponding API key in your environment variables. For example:
|
||||
|
||||
```bash
|
||||
# OpenAI
|
||||
export OPENAI_API_KEY="your-api-key-here"
|
||||
|
||||
# Azure OpenAI
|
||||
export AZURE_OPENAI_API_KEY="your-azure-api-key-here"
|
||||
export AZURE_OPENAI_API_VERSION="2025-04-01-preview" (Optional)
|
||||
|
||||
# OpenRouter
|
||||
export OPENROUTER_API_KEY="your-openrouter-key-here"
|
||||
|
||||
# Similarly for other providers
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## FAQ
|
||||
|
||||
<details>
|
||||
<summary>OpenAI released a model called Codex in 2021 - is this related?</summary>
|
||||
|
||||
In 2021, OpenAI released Codex, an AI system designed to generate code from natural language prompts. That original Codex model was deprecated as of March 2023 and is separate from the CLI tool.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Which models are supported?</summary>
|
||||
|
||||
Any model available with [Responses API](https://platform.openai.com/docs/api-reference/responses). The default is `o4-mini`, but pass `--model gpt-4.1` or set `model: gpt-4.1` in your config file to override.
|
||||
|
||||
</details>
|
||||
<details>
|
||||
<summary>Why does <code>o3</code> or <code>o4-mini</code> not work for me?</summary>
|
||||
|
||||
It's possible that your [API account needs to be verified](https://help.openai.com/en/articles/10910291-api-organization-verification) in order to start streaming responses and seeing chain of thought summaries from the API. If you're still running into issues, please let us know!
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>How do I stop Codex from editing my files?</summary>
|
||||
|
||||
Codex runs model-generated commands in a sandbox. If a proposed command or file change doesn't look right, you can simply type **n** to deny the command or give the model feedback.
|
||||
|
||||
</details>
|
||||
<details>
|
||||
<summary>Does it work on Windows?</summary>
|
||||
|
||||
Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - Codex has been tested on macOS and Linux with Node 22.
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## Zero data retention (ZDR) usage
|
||||
|
||||
Codex CLI **does** support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. If your OpenAI organization has Zero Data Retention enabled and you still encounter errors such as:
|
||||
|
||||
```
|
||||
OpenAI rejected the request. Error details: Status: 400, Code: unsupported_parameter, Type: invalid_request_error, Message: 400 Previous response cannot be used for this organization due to Zero Data Retention.
|
||||
```
|
||||
|
||||
You may need to upgrade to a more recent version with: `npm i -g @openai/codex@latest`
|
||||
|
||||
---
|
||||
|
||||
## Codex open source fund
|
||||
|
||||
We're excited to launch a **$1 million initiative** supporting open source projects that use Codex CLI and other OpenAI models.
|
||||
|
||||
- Grants are awarded up to **$25,000** API credits.
|
||||
- Applications are reviewed **on a rolling basis**.
|
||||
|
||||
**Interested? [Apply here](https://openai.com/form/codex-open-source-fund/).**
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
This project is under active development and the code will likely change pretty significantly. We'll update this message once that's complete!
|
||||
|
||||
More broadly we welcome contributions - whether you are opening your very first pull request or you're a seasoned maintainer. At the same time we care about reliability and long-term maintainability, so the bar for merging code is intentionally **high**. The guidelines below spell out what "high-quality" means in practice and should make the whole process transparent and friendly.
|
||||
|
||||
### Development workflow
|
||||
|
||||
- Create a _topic branch_ from `main` - e.g. `feat/interactive-prompt`.
|
||||
- Keep your changes focused. Multiple unrelated fixes should be opened as separate PRs.
|
||||
- Use `pnpm test:watch` during development for super-fast feedback.
|
||||
- We use **Vitest** for unit tests, **ESLint** + **Prettier** for style, and **TypeScript** for type-checking.
|
||||
- Before pushing, run the full test/type/lint suite:
|
||||
|
||||
### Git hooks with Husky
|
||||
|
||||
This project uses [Husky](https://typicode.github.io/husky/) to enforce code quality checks:
|
||||
|
||||
- **Pre-commit hook**: Automatically runs lint-staged to format and lint files before committing
|
||||
- **Pre-push hook**: Runs tests and type checking before pushing to the remote
|
||||
|
||||
These hooks help maintain code quality and prevent pushing code with failing tests. For more details, see [HUSKY.md](./HUSKY.md).
|
||||
|
||||
```bash
|
||||
pnpm test && pnpm run lint && pnpm run typecheck
|
||||
```
|
||||
|
||||
- If you have **not** yet signed the Contributor License Agreement (CLA), add a PR comment containing the exact text
|
||||
|
||||
```text
|
||||
I have read the CLA Document and I hereby sign the CLA
|
||||
```
|
||||
|
||||
The CLA-Assistant bot will turn the PR status green once all authors have signed.
|
||||
|
||||
```bash
|
||||
# Watch mode (tests rerun on change)
|
||||
pnpm test:watch
|
||||
|
||||
# Type-check without emitting files
|
||||
pnpm typecheck
|
||||
|
||||
# Automatically fix lint + prettier issues
|
||||
pnpm lint:fix
|
||||
pnpm format:fix
|
||||
```
|
||||
|
||||
### Debugging
|
||||
|
||||
To debug the CLI with a visual debugger, do the following in the `codex-cli` folder:
|
||||
|
||||
- Run `pnpm run build` to build the CLI, which will generate `cli.js.map` alongside `cli.js` in the `dist` folder.
|
||||
- Run the CLI with `node --inspect-brk ./dist/cli.js` The program then waits until a debugger is attached before proceeding. Options:
|
||||
- In VS Code, choose **Debug: Attach to Node Process** from the command palette and choose the option in the dropdown with debug port `9229` (likely the first option)
|
||||
- Go to <chrome://inspect> in Chrome and find **localhost:9229** and click **trace**
|
||||
|
||||
### Writing high-impact code changes
|
||||
|
||||
1. **Start with an issue.** Open a new one or comment on an existing discussion so we can agree on the solution before code is written.
|
||||
2. **Add or update tests.** Every new feature or bug-fix should come with test coverage that fails before your change and passes afterwards. 100% coverage is not required, but aim for meaningful assertions.
|
||||
3. **Document behaviour.** If your change affects user-facing behaviour, update the README, inline help (`codex --help`), or relevant example projects.
|
||||
4. **Keep commits atomic.** Each commit should compile and the tests should pass. This makes reviews and potential rollbacks easier.
|
||||
|
||||
### Opening a pull request
|
||||
|
||||
- Fill in the PR template (or include similar information) - **What? Why? How?**
|
||||
- Run **all** checks locally (`npm test && npm run lint && npm run typecheck`). CI failures that could have been caught locally slow down the process.
|
||||
- Make sure your branch is up-to-date with `main` and that you have resolved merge conflicts.
|
||||
- Mark the PR as **Ready for review** only when you believe it is in a merge-able state.
|
||||
|
||||
### Review process
|
||||
|
||||
1. One maintainer will be assigned as a primary reviewer.
|
||||
2. We may ask for changes - please do not take this personally. We value the work, we just also value consistency and long-term maintainability.
|
||||
3. When there is consensus that the PR meets the bar, a maintainer will squash-and-merge.
|
||||
|
||||
### Community values
|
||||
|
||||
- **Be kind and inclusive.** Treat others with respect; we follow the [Contributor Covenant](https://www.contributor-covenant.org/).
|
||||
- **Assume good intent.** Written communication is hard - err on the side of generosity.
|
||||
- **Teach & learn.** If you spot something confusing, open an issue or PR with improvements.
|
||||
|
||||
### Getting help
|
||||
|
||||
If you run into problems setting up the project, would like feedback on an idea, or just want to say _hi_ - please open a Discussion or jump into the relevant issue. We are happy to help.
|
||||
|
||||
Together we can make Codex CLI an incredible tool. **Happy hacking!** :rocket:
|
||||
|
||||
### Contributor license agreement (CLA)
|
||||
|
||||
All contributors **must** accept the CLA. The process is lightweight:
|
||||
|
||||
1. Open your pull request.
|
||||
2. Paste the following comment (or reply `recheck` if you've signed before):
|
||||
|
||||
```text
|
||||
I have read the CLA Document and I hereby sign the CLA
|
||||
```
|
||||
|
||||
3. The CLA-Assistant bot records your signature in the repo and marks the status check as passed.
|
||||
|
||||
No special Git commands, email attachments, or commit footers required.
|
||||
|
||||
#### Quick fixes
|
||||
|
||||
| Scenario | Command |
|
||||
| ----------------- | ------------------------------------------------ |
|
||||
| Amend last commit | `git commit --amend -s --no-edit && git push -f` |
|
||||
|
||||
The **DCO check** blocks merges until every commit in the PR carries the footer (with squash this is just the one).
|
||||
|
||||
### Releasing `codex`
|
||||
|
||||
To publish a new version of the CLI you first need to stage the npm package. A
|
||||
helper script in `codex-cli/scripts/` does all the heavy lifting. Inside the
|
||||
`codex-cli` folder run:
|
||||
|
||||
```bash
|
||||
# Classic, JS implementation that includes small, native binaries for Linux sandboxing.
|
||||
pnpm stage-release
|
||||
|
||||
# Optionally specify the temp directory to reuse between runs.
|
||||
RELEASE_DIR=$(mktemp -d)
|
||||
pnpm stage-release --tmp "$RELEASE_DIR"
|
||||
|
||||
# "Fat" package that additionally bundles the native Rust CLI binaries for
|
||||
# Linux. End-users can then opt-in at runtime by setting CODEX_RUST=1.
|
||||
pnpm stage-release --native
|
||||
```
|
||||
|
||||
Go to the folder where the release is staged and verify that it works as intended. If so, run the following from the temp folder:
|
||||
|
||||
```
|
||||
cd "$RELEASE_DIR"
|
||||
npm publish
|
||||
```
|
||||
|
||||
### Alternative build options
|
||||
|
||||
#### Nix flake development
|
||||
|
||||
Prerequisite: Nix >= 2.4 with flakes enabled (`experimental-features = nix-command flakes` in `~/.config/nix/nix.conf`).
|
||||
|
||||
Enter a Nix development shell:
|
||||
|
||||
```bash
|
||||
# Use either one of the commands according to which implementation you want to work with
|
||||
nix develop .#codex-cli # For entering codex-cli specific shell
|
||||
nix develop .#codex-rs # For entering codex-rs specific shell
|
||||
```
|
||||
|
||||
This shell includes Node.js, installs dependencies, builds the CLI, and provides a `codex` command alias.
|
||||
|
||||
Build and run the CLI directly:
|
||||
|
||||
```bash
|
||||
# Use either one of the commands according to which implementation you want to work with
|
||||
nix build .#codex-cli # For building codex-cli
|
||||
nix build .#codex-rs # For building codex-rs
|
||||
./result/bin/codex --help
|
||||
```
|
||||
|
||||
Run the CLI via the flake app:
|
||||
|
||||
```bash
|
||||
# Use either one of the commands according to which implementation you want to work with
|
||||
nix run .#codex-cli # For running codex-cli
|
||||
nix run .#codex-rs # For running codex-rs
|
||||
```
|
||||
|
||||
Use direnv with flakes
|
||||
|
||||
If you have direnv installed, you can use the following `.envrc` to automatically enter the Nix shell when you `cd` into the project directory:
|
||||
|
||||
```bash
|
||||
cd codex-rs
|
||||
echo "use flake ../flake.nix#codex-cli" >> .envrc && direnv allow
|
||||
cd codex-cli
|
||||
echo "use flake ../flake.nix#codex-rs" >> .envrc && direnv allow
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Security & responsible AI
|
||||
|
||||
Have you discovered a vulnerability or have concerns about model output? Please e-mail **security@openai.com** and we will respond promptly.
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
||||
20
codex-cli/bin/codex
Executable file
20
codex-cli/bin/codex
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env sh
|
||||
# resolve script path in case of symlink
|
||||
SOURCE="$0"
|
||||
while [ -h "$SOURCE" ]; do
|
||||
DIR=$(dirname "$SOURCE")
|
||||
SOURCE=$(readlink "$SOURCE")
|
||||
case "$SOURCE" in
|
||||
/*) ;; # absolute path
|
||||
*) SOURCE="$DIR/$SOURCE" ;; # relative path
|
||||
esac
|
||||
done
|
||||
DIR=$(cd "$(dirname "$SOURCE")" && pwd)
|
||||
if command -v node >/dev/null 2>&1; then
|
||||
exec node "$DIR/../dist/cli.js" "$@"
|
||||
elif command -v bun >/dev/null 2>&1; then
|
||||
exec bun "$DIR/../dist/cli.js" "$@"
|
||||
else
|
||||
echo "Error: node or bun is required to run codex" >&2
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,153 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
// Unified entry point for the Codex CLI.
|
||||
/*
|
||||
* Behavior
|
||||
* =========
|
||||
* 1. By default we import the JavaScript implementation located in
|
||||
* dist/cli.js.
|
||||
*
|
||||
* 2. Developers can opt-in to a pre-compiled Rust binary by setting the
|
||||
* environment variable CODEX_RUST to a truthy value (`1`, `true`, etc.).
|
||||
* When that variable is present we resolve the correct binary for the
|
||||
* current platform / architecture and execute it via child_process.
|
||||
*
|
||||
* If the CODEX_RUST=1 is specified and there is no native binary for the
|
||||
* current platform / architecture, an error is thrown.
|
||||
*/
|
||||
|
||||
import fs from "fs";
|
||||
import path from "path";
|
||||
import { fileURLToPath, pathToFileURL } from "url";
|
||||
|
||||
// Determine whether the user explicitly wants the Rust CLI.
|
||||
|
||||
// __dirname equivalent in ESM
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
// For the @native release of the Node module, the `use-native` file is added,
|
||||
// indicating we should default to the native binary. For other releases,
|
||||
// setting CODEX_RUST=1 will opt-in to the native binary, if included.
|
||||
const wantsNative = fs.existsSync(path.join(__dirname, "use-native")) ||
|
||||
(process.env.CODEX_RUST != null
|
||||
? ["1", "true", "yes"].includes(process.env.CODEX_RUST.toLowerCase())
|
||||
: false);
|
||||
|
||||
// Try native binary if requested.
|
||||
if (wantsNative && process.platform !== 'win32') {
|
||||
const { platform, arch } = process;
|
||||
|
||||
let targetTriple = null;
|
||||
switch (platform) {
|
||||
case "linux":
|
||||
case "android":
|
||||
switch (arch) {
|
||||
case "x64":
|
||||
targetTriple = "x86_64-unknown-linux-musl";
|
||||
break;
|
||||
case "arm64":
|
||||
targetTriple = "aarch64-unknown-linux-musl";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case "darwin":
|
||||
switch (arch) {
|
||||
case "x64":
|
||||
targetTriple = "x86_64-apple-darwin";
|
||||
break;
|
||||
case "arm64":
|
||||
targetTriple = "aarch64-apple-darwin";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!targetTriple) {
|
||||
throw new Error(`Unsupported platform: ${platform} (${arch})`);
|
||||
}
|
||||
|
||||
const binaryPath = path.join(__dirname, "..", "bin", `codex-${targetTriple}`);
|
||||
|
||||
// Use an asynchronous spawn instead of spawnSync so that Node is able to
|
||||
// respond to signals (e.g. Ctrl-C / SIGINT) while the native binary is
|
||||
// executing. This allows us to forward those signals to the child process
|
||||
// and guarantees that when either the child terminates or the parent
|
||||
// receives a fatal signal, both processes exit in a predictable manner.
|
||||
const { spawn } = await import("child_process");
|
||||
|
||||
const child = spawn(binaryPath, process.argv.slice(2), {
|
||||
stdio: "inherit",
|
||||
});
|
||||
|
||||
child.on("error", (err) => {
|
||||
// Typically triggered when the binary is missing or not executable.
|
||||
// Re-throwing here will terminate the parent with a non-zero exit code
|
||||
// while still printing a helpful stack trace.
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Forward common termination signals to the child so that it shuts down
|
||||
// gracefully. In the handler we temporarily disable the default behavior of
|
||||
// exiting immediately; once the child has been signaled we simply wait for
|
||||
// its exit event which will in turn terminate the parent (see below).
|
||||
const forwardSignal = (signal) => {
|
||||
if (child.killed) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
child.kill(signal);
|
||||
} catch {
|
||||
/* ignore */
|
||||
}
|
||||
};
|
||||
|
||||
["SIGINT", "SIGTERM", "SIGHUP"].forEach((sig) => {
|
||||
process.on(sig, () => forwardSignal(sig));
|
||||
});
|
||||
|
||||
// When the child exits, mirror its termination reason in the parent so that
|
||||
// shell scripts and other tooling observe the correct exit status.
|
||||
// Wrap the lifetime of the child process in a Promise so that we can await
|
||||
// its termination in a structured way. The Promise resolves with an object
|
||||
// describing how the child exited: either via exit code or due to a signal.
|
||||
const childResult = await new Promise((resolve) => {
|
||||
child.on("exit", (code, signal) => {
|
||||
if (signal) {
|
||||
resolve({ type: "signal", signal });
|
||||
} else {
|
||||
resolve({ type: "code", exitCode: code ?? 1 });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
if (childResult.type === "signal") {
|
||||
// Re-emit the same signal so that the parent terminates with the expected
|
||||
// semantics (this also sets the correct exit code of 128 + n).
|
||||
process.kill(process.pid, childResult.signal);
|
||||
} else {
|
||||
process.exit(childResult.exitCode);
|
||||
}
|
||||
} else {
|
||||
// Fallback: execute the original JavaScript CLI.
|
||||
|
||||
// Resolve the path to the compiled CLI bundle
|
||||
const cliPath = path.resolve(__dirname, "../dist/cli.js");
|
||||
const cliUrl = pathToFileURL(cliPath).href;
|
||||
|
||||
// Load and execute the CLI
|
||||
try {
|
||||
await import(cliUrl);
|
||||
} catch (err) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
import * as esbuild from "esbuild";
|
||||
import * as fs from "fs";
|
||||
import * as path from "path";
|
||||
|
||||
const OUT_DIR = 'dist'
|
||||
/**
|
||||
* ink attempts to import react-devtools-core in an ESM-unfriendly way:
|
||||
*
|
||||
@@ -41,11 +39,6 @@ const isDevBuild =
|
||||
|
||||
const plugins = [ignoreReactDevToolsPlugin];
|
||||
|
||||
// Build Hygiene, ensure we drop previous dist dir and any leftover files
|
||||
const outPath = path.resolve(OUT_DIR);
|
||||
if (fs.existsSync(outPath)) {
|
||||
fs.rmSync(outPath, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
// Add a shebang that enables source‑map support for dev builds so that stack
|
||||
// traces point to the original TypeScript lines without requiring callers to
|
||||
@@ -57,7 +50,7 @@ if (isDevBuild) {
|
||||
name: "dev-shebang",
|
||||
setup(build) {
|
||||
build.onEnd(async () => {
|
||||
const outFile = path.resolve(isDevBuild ? `${OUT_DIR}/cli-dev.js` : `${OUT_DIR}/cli.js`);
|
||||
const outFile = path.resolve(isDevBuild ? "dist/cli-dev.js" : "dist/cli.js");
|
||||
let code = await fs.promises.readFile(outFile, "utf8");
|
||||
if (code.startsWith("#!")) {
|
||||
code = code.replace(/^#!.*\n/, devShebangLine);
|
||||
@@ -72,14 +65,11 @@ if (isDevBuild) {
|
||||
esbuild
|
||||
.build({
|
||||
entryPoints: ["src/cli.tsx"],
|
||||
// Do not bundle the contents of package.json at build time: always read it
|
||||
// at runtime.
|
||||
external: ["../package.json"],
|
||||
bundle: true,
|
||||
format: "esm",
|
||||
platform: "node",
|
||||
tsconfig: "tsconfig.json",
|
||||
outfile: isDevBuild ? `${OUT_DIR}/cli-dev.js` : `${OUT_DIR}/cli.js`,
|
||||
outfile: isDevBuild ? "dist/cli-dev.js" : "dist/cli.js",
|
||||
minify: !isDevBuild,
|
||||
sourcemap: isDevBuild ? "inline" : true,
|
||||
plugins,
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
{ pkgs, monorep-deps ? [], ... }:
|
||||
let
|
||||
node = pkgs.nodejs_22;
|
||||
in
|
||||
rec {
|
||||
package = pkgs.buildNpmPackage {
|
||||
pname = "codex-cli";
|
||||
version = "0.1.0";
|
||||
src = ./.;
|
||||
npmDepsHash = "sha256-3tAalmh50I0fhhd7XreM+jvl0n4zcRhqygFNB1Olst8";
|
||||
nodejs = node;
|
||||
npmInstallFlags = [ "--frozen-lockfile" ];
|
||||
meta = with pkgs.lib; {
|
||||
description = "OpenAI Codex command‑line interface";
|
||||
license = licenses.asl20;
|
||||
homepage = "https://github.com/openai/codex";
|
||||
};
|
||||
};
|
||||
devShell = pkgs.mkShell {
|
||||
name = "codex-cli-dev";
|
||||
buildInputs = monorep-deps ++ [
|
||||
node
|
||||
pkgs.pnpm
|
||||
];
|
||||
shellHook = ''
|
||||
echo "Entering development shell for codex-cli"
|
||||
# cd codex-cli
|
||||
if [ -f package-lock.json ]; then
|
||||
pnpm ci || echo "npm ci failed"
|
||||
else
|
||||
pnpm install || echo "npm install failed"
|
||||
fi
|
||||
npm run build || echo "npm build failed"
|
||||
export PATH=$PWD/node_modules/.bin:$PATH
|
||||
alias codex="node $PWD/dist/cli.js"
|
||||
'';
|
||||
};
|
||||
app = {
|
||||
type = "app";
|
||||
program = "${package}/bin/codex";
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
name: "impossible-pong"
|
||||
description: |
|
||||
Update index.html with the following features:
|
||||
- Add an overlaid styled popup to start the game on first load
|
||||
- Add an overlayed styled popup to start the game on first load
|
||||
- Between each point, show a 3 second countdown (this should be skipped if a player wins)
|
||||
- After each game the AI wins, display text at the bottom of the screen with lighthearted insults for the player
|
||||
- Add a leaderboard to the right of the court that shows how many games each player has won.
|
||||
|
||||
@@ -13,7 +13,7 @@ act,prompt,for_devs
|
||||
"Advertiser","I want you to act as an advertiser. You will create a campaign to promote a product or service of your choice. You will choose a target audience, develop key messages and slogans, select the media channels for promotion, and decide on any additional activities needed to reach your goals. My first suggestion request is ""I need help creating an advertising campaign for a new type of energy drink targeting young adults aged 18-30.""",FALSE
|
||||
"Storyteller","I want you to act as a storyteller. You will come up with entertaining stories that are engaging, imaginative and captivating for the audience. It can be fairy tales, educational stories or any other type of stories which has the potential to capture people's attention and imagination. Depending on the target audience, you may choose specific themes or topics for your storytelling session e.g., if it's children then you can talk about animals; If it's adults then history-based tales might engage them better etc. My first request is ""I need an interesting story on perseverance.""",FALSE
|
||||
"Football Commentator","I want you to act as a football commentator. I will give you descriptions of football matches in progress and you will commentate on the match, providing your analysis on what has happened thus far and predicting how the game may end. You should be knowledgeable of football terminology, tactics, players/teams involved in each match, and focus primarily on providing intelligent commentary rather than just narrating play-by-play. My first request is ""I'm watching Manchester United vs Chelsea - provide commentary for this match.""",FALSE
|
||||
"Stand-up Comedian","I want you to act as a stand-up comedian. I will provide you with some topics related to current events and you will use your with, creativity, and observational skills to create a routine based on those topics. You should also be sure to incorporate personal anecdotes or experiences into the routine in order to make it more relatable and engaging for the audience. My first request is ""I want an humorous take on politics.""",FALSE
|
||||
"Stand-up Comedian","I want you to act as a stand-up comedian. I will provide you with some topics related to current events and you will use your wit, creativity, and observational skills to create a routine based on those topics. You should also be sure to incorporate personal anecdotes or experiences into the routine in order to make it more relatable and engaging for the audience. My first request is ""I want an humorous take on politics.""",FALSE
|
||||
"Motivational Coach","I want you to act as a motivational coach. I will provide you with some information about someone's goals and challenges, and it will be your job to come up with strategies that can help this person achieve their goals. This could involve providing positive affirmations, giving helpful advice or suggesting activities they can do to reach their end goal. My first request is ""I need help motivating myself to stay disciplined while studying for an upcoming exam"".",FALSE
|
||||
"Composer","I want you to act as a composer. I will provide the lyrics to a song and you will create music for it. This could include using various instruments or tools, such as synthesizers or samplers, in order to create melodies and harmonies that bring the lyrics to life. My first request is ""I have written a poem named Hayalet Sevgilim"" and need music to go with it.""""""",FALSE
|
||||
"Debater","I want you to act as a debater. I will provide you with some topics related to current events and your task is to research both sides of the debates, present valid arguments for each side, refute opposing points of view, and draw persuasive conclusions based on evidence. Your goal is to help people come away from the discussion with increased knowledge and insight into the topic at hand. My first request is ""I want an opinion piece about Deno.""",FALSE
|
||||
@@ -23,7 +23,7 @@ act,prompt,for_devs
|
||||
"Movie Critic","I want you to act as a movie critic. You will develop an engaging and creative movie review. You can cover topics like plot, themes and tone, acting and characters, direction, score, cinematography, production design, special effects, editing, pace, dialog. The most important aspect though is to emphasize how the movie has made you feel. What has really resonated with you. You can also be critical about the movie. Please avoid spoilers. My first request is ""I need to write a movie review for the movie Interstellar""",FALSE
|
||||
"Relationship Coach","I want you to act as a relationship coach. I will provide some details about the two people involved in a conflict, and it will be your job to come up with suggestions on how they can work through the issues that are separating them. This could include advice on communication techniques or different strategies for improving their understanding of one another's perspectives. My first request is ""I need help solving conflicts between my spouse and myself.""",FALSE
|
||||
"Poet","I want you to act as a poet. You will create poems that evoke emotions and have the power to stir people's soul. Write on any topic or theme but make sure your words convey the feeling you are trying to express in beautiful yet meaningful ways. You can also come up with short verses that are still powerful enough to leave an imprint in readers' minds. My first request is ""I need a poem about love.""",FALSE
|
||||
"Rapper","I want you to act as a rapper. You will come up with powerful and meaningful lyrics, beats and rhythm that can 'wow' the audience. Your lyrics should have an intriguing meaning and message which people can relate too. When it comes to choosing your beat, make sure it is catchy yet relevant to your words, so that when combined they make an explosion of sound every time! My first request is ""I need a rap song about finding strength within yourself.""",FALSE
|
||||
"Rapper","I want you to act as a rapper. You will come up with powerful and meaningful lyrics, beats and rhythm that can 'wow' the audience. Your lyrics should have an intriguing meaning and message which people can relate too. When it comes to choosing your beat, make sure it is catchy yet relevant to your words, so that when combined they make an explosion of sound everytime! My first request is ""I need a rap song about finding strength within yourself.""",FALSE
|
||||
"Motivational Speaker","I want you to act as a motivational speaker. Put together words that inspire action and make people feel empowered to do something beyond their abilities. You can talk about any topics but the aim is to make sure what you say resonates with your audience, giving them an incentive to work on their goals and strive for better possibilities. My first request is ""I need a speech about how everyone should never give up.""",FALSE
|
||||
"Philosophy Teacher","I want you to act as a philosophy teacher. I will provide some topics related to the study of philosophy, and it will be your job to explain these concepts in an easy-to-understand manner. This could include providing examples, posing questions or breaking down complex ideas into smaller pieces that are easier to comprehend. My first request is ""I need help understanding how different philosophical theories can be applied in everyday life.""",FALSE
|
||||
"Philosopher","I want you to act as a philosopher. I will provide some topics or questions related to the study of philosophy, and it will be your job to explore these concepts in depth. This could involve conducting research into various philosophical theories, proposing new ideas or finding creative solutions for solving complex problems. My first request is ""I need help developing an ethical framework for decision making.""",FALSE
|
||||
|
||||
|
7372
codex-cli/package-lock.json
generated
Normal file
7372
codex-cli/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,9 @@
|
||||
{
|
||||
"name": "@openai/codex",
|
||||
"version": "0.0.0-dev",
|
||||
"version": "0.1.2504172351",
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
"codex": "bin/codex.js"
|
||||
"codex": "bin/codex"
|
||||
},
|
||||
"type": "module",
|
||||
"engines": {
|
||||
@@ -20,31 +20,34 @@
|
||||
"typecheck": "tsc --noEmit",
|
||||
"build": "node build.mjs",
|
||||
"build:dev": "NODE_ENV=development node build.mjs --dev && NODE_OPTIONS=--enable-source-maps node dist/cli-dev.js",
|
||||
"stage-release": "./scripts/stage_release.sh"
|
||||
"release:readme": "cp ../README.md ./README.md",
|
||||
"release:version": "TS=$(date +%y%m%d%H%M) && sed -E -i'' -e \"s/\\\"0\\.1\\.[0-9]{10}\\\"/\\\"0.1.${TS}\\\"/g\" package.json src/utils/session.ts",
|
||||
"release:build-and-publish": "npm run build && npm publish",
|
||||
"release": "npm run release:readme && npm run release:version && npm install && npm run release:build-and-publish",
|
||||
"prepare": "husky",
|
||||
"pre-commit": "lint-staged"
|
||||
},
|
||||
"files": [
|
||||
"README.md",
|
||||
"bin",
|
||||
"dist"
|
||||
"dist",
|
||||
"src"
|
||||
],
|
||||
"dependencies": {
|
||||
"@inkjs/ui": "^2.0.0",
|
||||
"chalk": "^5.2.0",
|
||||
"diff": "^7.0.0",
|
||||
"dotenv": "^16.1.4",
|
||||
"express": "^5.1.0",
|
||||
"fast-deep-equal": "^3.1.3",
|
||||
"fast-npm-meta": "^0.4.2",
|
||||
"figures": "^6.1.0",
|
||||
"file-type": "^20.1.0",
|
||||
"https-proxy-agent": "^7.0.6",
|
||||
"ink": "^5.2.0",
|
||||
"js-yaml": "^4.1.0",
|
||||
"marked": "^15.0.7",
|
||||
"marked-terminal": "^7.3.0",
|
||||
"meow": "^13.2.0",
|
||||
"open": "^10.1.0",
|
||||
"openai": "^4.95.1",
|
||||
"package-manager-detector": "^1.2.0",
|
||||
"openai": "^4.89.0",
|
||||
"react": "^18.2.0",
|
||||
"shell-quote": "^1.8.2",
|
||||
"strip-ansi": "^7.1.0",
|
||||
@@ -55,16 +58,12 @@
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.22.0",
|
||||
"@types/diff": "^7.0.2",
|
||||
"@types/express": "^5.0.1",
|
||||
"@types/js-yaml": "^4.0.9",
|
||||
"@types/marked-terminal": "^6.1.1",
|
||||
"@types/react": "^18.0.32",
|
||||
"@types/semver": "^7.7.0",
|
||||
"@types/shell-quote": "^1.7.5",
|
||||
"@types/which": "^3.0.4",
|
||||
"@typescript-eslint/eslint-plugin": "^7.18.0",
|
||||
"@typescript-eslint/parser": "^7.18.0",
|
||||
"boxen": "^8.0.1",
|
||||
"esbuild": "^0.25.2",
|
||||
"eslint-plugin-import": "^2.31.0",
|
||||
"eslint-plugin-react": "^7.32.2",
|
||||
@@ -72,18 +71,24 @@
|
||||
"eslint-plugin-react-refresh": "^0.4.19",
|
||||
"husky": "^9.1.7",
|
||||
"ink-testing-library": "^3.0.0",
|
||||
"prettier": "^3.5.3",
|
||||
"lint-staged": "^15.5.1",
|
||||
"prettier": "^2.8.7",
|
||||
"punycode": "^2.3.1",
|
||||
"semver": "^7.7.1",
|
||||
"ts-node": "^10.9.1",
|
||||
"typescript": "^5.0.3",
|
||||
"vite": "^6.3.4",
|
||||
"vitest": "^3.1.2",
|
||||
"whatwg-url": "^14.2.0",
|
||||
"which": "^5.0.0"
|
||||
"vitest": "^3.0.9",
|
||||
"whatwg-url": "^14.2.0"
|
||||
},
|
||||
"resolutions": {
|
||||
"braces": "^3.0.3",
|
||||
"micromatch": "^4.0.8",
|
||||
"semver": "^7.7.1"
|
||||
},
|
||||
"overrides": {
|
||||
"punycode": "^2.3.1"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/openai/codex.git"
|
||||
"url": "https://github.com/openai/codex"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
# npm releases
|
||||
|
||||
Run the following:
|
||||
|
||||
To build the 0.2.x or later version of the npm module, which runs the Rust version of the CLI, build it as follows:
|
||||
|
||||
```bash
|
||||
./codex-cli/scripts/stage_rust_release.py --release-version 0.6.0
|
||||
```
|
||||
@@ -8,9 +8,9 @@ pushd "$SCRIPT_DIR/.." >> /dev/null || {
|
||||
echo "Error: Failed to change directory to $SCRIPT_DIR/.."
|
||||
exit 1
|
||||
}
|
||||
pnpm install
|
||||
pnpm run build
|
||||
npm install
|
||||
npm run build
|
||||
rm -rf ./dist/openai-codex-*.tgz
|
||||
pnpm pack --pack-destination ./dist
|
||||
npm pack --pack-destination ./dist
|
||||
mv ./dist/openai-codex-*.tgz ./dist/codex.tgz
|
||||
docker build -t codex -f "./Dockerfile" .
|
||||
|
||||
@@ -2,26 +2,6 @@
|
||||
set -euo pipefail # Exit on error, undefined vars, and pipeline failures
|
||||
IFS=$'\n\t' # Stricter word splitting
|
||||
|
||||
# Read allowed domains from file
|
||||
ALLOWED_DOMAINS_FILE="/etc/codex/allowed_domains.txt"
|
||||
if [ -f "$ALLOWED_DOMAINS_FILE" ]; then
|
||||
ALLOWED_DOMAINS=()
|
||||
while IFS= read -r domain; do
|
||||
ALLOWED_DOMAINS+=("$domain")
|
||||
done < "$ALLOWED_DOMAINS_FILE"
|
||||
echo "Using domains from file: ${ALLOWED_DOMAINS[*]}"
|
||||
else
|
||||
# Fallback to default domains
|
||||
ALLOWED_DOMAINS=("api.openai.com")
|
||||
echo "Domains file not found, using default: ${ALLOWED_DOMAINS[*]}"
|
||||
fi
|
||||
|
||||
# Ensure we have at least one domain
|
||||
if [ ${#ALLOWED_DOMAINS[@]} -eq 0 ]; then
|
||||
echo "ERROR: No allowed domains specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Flush existing rules and delete existing ipsets
|
||||
iptables -F
|
||||
iptables -X
|
||||
@@ -44,7 +24,8 @@ iptables -A OUTPUT -o lo -j ACCEPT
|
||||
ipset create allowed-domains hash:net
|
||||
|
||||
# Resolve and add other allowed domains
|
||||
for domain in "${ALLOWED_DOMAINS[@]}"; do
|
||||
for domain in \
|
||||
"api.openai.com"; do
|
||||
echo "Resolving $domain..."
|
||||
ips=$(dig +short A "$domain")
|
||||
if [ -z "$ips" ]; then
|
||||
@@ -106,7 +87,7 @@ else
|
||||
echo "Firewall verification passed - unable to reach https://example.com as expected"
|
||||
fi
|
||||
|
||||
# Always verify OpenAI API access is working
|
||||
# Verify OpenAI API access
|
||||
if ! curl --connect-timeout 5 https://api.openai.com >/dev/null 2>&1; then
|
||||
echo "ERROR: Firewall verification failed - unable to reach https://api.openai.com"
|
||||
exit 1
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Install native runtime dependencies for codex-cli.
|
||||
#
|
||||
# By default the script copies the sandbox binaries that are required at
|
||||
# runtime. When called with the --full-native flag, it additionally
|
||||
# bundles pre-built Rust CLI binaries so that the resulting npm package can run
|
||||
# the native implementation when users set CODEX_RUST=1.
|
||||
#
|
||||
# Usage
|
||||
# install_native_deps.sh [--full-native] [--workflow-url URL] [CODEX_CLI_ROOT]
|
||||
#
|
||||
# The optional RELEASE_ROOT is the path that contains package.json. Omitting
|
||||
# it installs the binaries into the repository's own bin/ folder to support
|
||||
# local development.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ------------------
|
||||
# Parse arguments
|
||||
# ------------------
|
||||
|
||||
CODEX_CLI_ROOT=""
|
||||
INCLUDE_RUST=0
|
||||
|
||||
# Until we start publishing stable GitHub releases, we have to grab the binaries
|
||||
# from the GitHub Action that created them. Update the URL below to point to the
|
||||
# appropriate workflow run:
|
||||
WORKFLOW_URL="https://github.com/openai/codex/actions/runs/15981617627"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--full-native)
|
||||
INCLUDE_RUST=1
|
||||
;;
|
||||
--workflow-url)
|
||||
shift || { echo "--workflow-url requires an argument"; exit 1; }
|
||||
if [ -n "$1" ]; then
|
||||
WORKFLOW_URL="$1"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$CODEX_CLI_ROOT" ]]; then
|
||||
CODEX_CLI_ROOT="$1"
|
||||
else
|
||||
echo "Unexpected argument: $1" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Determine where the binaries should be installed.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
if [ -n "$CODEX_CLI_ROOT" ]; then
|
||||
# The caller supplied a release root directory.
|
||||
BIN_DIR="$CODEX_CLI_ROOT/bin"
|
||||
else
|
||||
# No argument; fall back to the repo’s own bin directory.
|
||||
# Resolve the path of this script, then walk up to the repo root.
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CODEX_CLI_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
BIN_DIR="$CODEX_CLI_ROOT/bin"
|
||||
fi
|
||||
|
||||
# Make sure the destination directory exists.
|
||||
mkdir -p "$BIN_DIR"
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Download and decompress the artifacts from the GitHub Actions workflow.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
WORKFLOW_ID="${WORKFLOW_URL##*/}"
|
||||
|
||||
ARTIFACTS_DIR="$(mktemp -d)"
|
||||
trap 'rm -rf "$ARTIFACTS_DIR"' EXIT
|
||||
|
||||
# NB: The GitHub CLI `gh` must be installed and authenticated.
|
||||
gh run download --dir "$ARTIFACTS_DIR" --repo openai/codex "$WORKFLOW_ID"
|
||||
|
||||
# Decompress the artifacts for Linux sandboxing.
|
||||
zstd -d "$ARTIFACTS_DIR/x86_64-unknown-linux-musl/codex-linux-sandbox-x86_64-unknown-linux-musl.zst" \
|
||||
-o "$BIN_DIR/codex-linux-sandbox-x64"
|
||||
|
||||
zstd -d "$ARTIFACTS_DIR/aarch64-unknown-linux-musl/codex-linux-sandbox-aarch64-unknown-linux-musl.zst" \
|
||||
-o "$BIN_DIR/codex-linux-sandbox-arm64"
|
||||
|
||||
if [[ "$INCLUDE_RUST" -eq 1 ]]; then
|
||||
# x64 Linux
|
||||
zstd -d "$ARTIFACTS_DIR/x86_64-unknown-linux-musl/codex-x86_64-unknown-linux-musl.zst" \
|
||||
-o "$BIN_DIR/codex-x86_64-unknown-linux-musl"
|
||||
# ARM64 Linux
|
||||
zstd -d "$ARTIFACTS_DIR/aarch64-unknown-linux-musl/codex-aarch64-unknown-linux-musl.zst" \
|
||||
-o "$BIN_DIR/codex-aarch64-unknown-linux-musl"
|
||||
# x64 macOS
|
||||
zstd -d "$ARTIFACTS_DIR/x86_64-apple-darwin/codex-x86_64-apple-darwin.zst" \
|
||||
-o "$BIN_DIR/codex-x86_64-apple-darwin"
|
||||
# ARM64 macOS
|
||||
zstd -d "$ARTIFACTS_DIR/aarch64-apple-darwin/codex-aarch64-apple-darwin.zst" \
|
||||
-o "$BIN_DIR/codex-aarch64-apple-darwin"
|
||||
fi
|
||||
|
||||
echo "Installed native dependencies into $BIN_DIR"
|
||||
@@ -10,8 +10,6 @@ set -e
|
||||
|
||||
# Default the work directory to WORKSPACE_ROOT_DIR if not provided.
|
||||
WORK_DIR="${WORKSPACE_ROOT_DIR:-$(pwd)}"
|
||||
# Default allowed domains - can be overridden with OPENAI_ALLOWED_DOMAINS env var
|
||||
OPENAI_ALLOWED_DOMAINS="${OPENAI_ALLOWED_DOMAINS:-api.openai.com}"
|
||||
|
||||
# Parse optional flag.
|
||||
if [ "$1" = "--work_dir" ]; then
|
||||
@@ -25,16 +23,6 @@ fi
|
||||
|
||||
WORK_DIR=$(realpath "$WORK_DIR")
|
||||
|
||||
# Generate a unique container name based on the normalized work directory
|
||||
CONTAINER_NAME="codex_$(echo "$WORK_DIR" | sed 's/\//_/g' | sed 's/[^a-zA-Z0-9_-]//g')"
|
||||
|
||||
# Define cleanup to remove the container on script exit, ensuring no leftover containers
|
||||
cleanup() {
|
||||
docker rm -f "$CONTAINER_NAME" >/dev/null 2>&1 || true
|
||||
}
|
||||
# Trap EXIT to invoke cleanup regardless of how the script terminates
|
||||
trap cleanup EXIT
|
||||
|
||||
# Ensure a command is provided.
|
||||
if [ "$#" -eq 0 ]; then
|
||||
echo "Usage: $0 [--work_dir directory] \"COMMAND\""
|
||||
@@ -47,17 +35,11 @@ if [ -z "$WORK_DIR" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify that OPENAI_ALLOWED_DOMAINS is not empty
|
||||
if [ -z "$OPENAI_ALLOWED_DOMAINS" ]; then
|
||||
echo "Error: OPENAI_ALLOWED_DOMAINS is empty."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Kill any existing container for the working directory using cleanup(), centralizing removal logic.
|
||||
cleanup
|
||||
# Remove any existing container named 'codex'.
|
||||
docker rm -f codex 2>/dev/null || true
|
||||
|
||||
# Run the container with the specified directory mounted at the same path inside the container.
|
||||
docker run --name "$CONTAINER_NAME" -d \
|
||||
docker run --name codex -d \
|
||||
-e OPENAI_API_KEY \
|
||||
--cap-add=NET_ADMIN \
|
||||
--cap-add=NET_RAW \
|
||||
@@ -65,25 +47,8 @@ docker run --name "$CONTAINER_NAME" -d \
|
||||
codex \
|
||||
sleep infinity
|
||||
|
||||
# Write the allowed domains to a file in the container
|
||||
docker exec --user root "$CONTAINER_NAME" bash -c "mkdir -p /etc/codex"
|
||||
for domain in $OPENAI_ALLOWED_DOMAINS; do
|
||||
# Validate domain format to prevent injection
|
||||
if [[ ! "$domain" =~ ^[a-zA-Z0-9][a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then
|
||||
echo "Error: Invalid domain format: $domain"
|
||||
exit 1
|
||||
fi
|
||||
echo "$domain" | docker exec --user root -i "$CONTAINER_NAME" bash -c "cat >> /etc/codex/allowed_domains.txt"
|
||||
done
|
||||
|
||||
# Set proper permissions on the domains file
|
||||
docker exec --user root "$CONTAINER_NAME" bash -c "chmod 444 /etc/codex/allowed_domains.txt && chown root:root /etc/codex/allowed_domains.txt"
|
||||
|
||||
# Initialize the firewall inside the container as root user
|
||||
docker exec --user root "$CONTAINER_NAME" bash -c "/usr/local/bin/init_firewall.sh"
|
||||
|
||||
# Remove the firewall script after running it
|
||||
docker exec --user root "$CONTAINER_NAME" bash -c "rm -f /usr/local/bin/init_firewall.sh"
|
||||
# Initialize the firewall inside the container.
|
||||
docker exec codex bash -c "sudo /usr/local/bin/init_firewall.sh"
|
||||
|
||||
# Execute the provided command in the container, ensuring it runs in the work directory.
|
||||
# We use a parameterized bash command to safely handle the command and directory.
|
||||
@@ -92,4 +57,4 @@ quoted_args=""
|
||||
for arg in "$@"; do
|
||||
quoted_args+=" $(printf '%q' "$arg")"
|
||||
done
|
||||
docker exec -it "$CONTAINER_NAME" bash -c "cd \"/app$WORK_DIR\" && codex --full-auto ${quoted_args}"
|
||||
docker exec -it codex bash -c "cd \"/app$WORK_DIR\" && codex --full-auto ${quoted_args}"
|
||||
|
||||
@@ -1,154 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# -----------------------------------------------------------------------------
|
||||
# stage_release.sh
|
||||
# -----------------------------------------------------------------------------
|
||||
# Stages an npm release for @openai/codex.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# --tmp <dir> : Use <dir> instead of a freshly created temp directory.
|
||||
# --native : Bundle the pre-built Rust CLI binaries for Linux alongside
|
||||
# the JavaScript implementation (a so-called "fat" package).
|
||||
# -h|--help : Print usage.
|
||||
#
|
||||
# When --native is supplied we copy the linux-sandbox binaries (as before) and
|
||||
# additionally fetch / unpack the two Rust targets that we currently support:
|
||||
# - x86_64-unknown-linux-musl
|
||||
# - aarch64-unknown-linux-musl
|
||||
#
|
||||
# NOTE: This script is intended to be run from the repository root via
|
||||
# `pnpm --filter codex-cli stage-release ...` or inside codex-cli with the
|
||||
# helper script entry in package.json (`pnpm stage-release ...`).
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Helper - usage / flag parsing
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $(basename "$0") [--tmp DIR] [--native] [--version VERSION]
|
||||
|
||||
Options
|
||||
--tmp DIR Use DIR to stage the release (defaults to a fresh mktemp dir)
|
||||
--native Bundle Rust binaries for Linux (fat package)
|
||||
--version Specify the version to release (defaults to a timestamp-based version)
|
||||
-h, --help Show this help
|
||||
|
||||
Legacy positional argument: the first non-flag argument is still interpreted
|
||||
as the temporary directory (for backwards compatibility) but is deprecated.
|
||||
EOF
|
||||
exit "${1:-0}"
|
||||
}
|
||||
|
||||
TMPDIR=""
|
||||
INCLUDE_NATIVE=0
|
||||
# Default to a timestamp-based version (keep same scheme as before)
|
||||
VERSION="$(printf '0.1.%d' "$(date +%y%m%d%H%M)")"
|
||||
WORKFLOW_URL=""
|
||||
|
||||
# Manual flag parser - Bash getopts does not handle GNU long options well.
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--tmp)
|
||||
shift || { echo "--tmp requires an argument"; usage 1; }
|
||||
TMPDIR="$1"
|
||||
;;
|
||||
--tmp=*)
|
||||
TMPDIR="${1#*=}"
|
||||
;;
|
||||
--native)
|
||||
INCLUDE_NATIVE=1
|
||||
;;
|
||||
--version)
|
||||
shift || { echo "--version requires an argument"; usage 1; }
|
||||
VERSION="$1"
|
||||
;;
|
||||
--workflow-url)
|
||||
shift || { echo "--workflow-url requires an argument"; exit 1; }
|
||||
WORKFLOW_URL="$1"
|
||||
;;
|
||||
-h|--help)
|
||||
usage 0
|
||||
;;
|
||||
--*)
|
||||
echo "Unknown option: $1" >&2
|
||||
usage 1
|
||||
;;
|
||||
*)
|
||||
echo "Unexpected extra argument: $1" >&2
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# Fallback when the caller did not specify a directory.
|
||||
# If no directory was specified create a fresh temporary one.
|
||||
if [[ -z "$TMPDIR" ]]; then
|
||||
TMPDIR="$(mktemp -d)"
|
||||
fi
|
||||
|
||||
# Ensure the directory exists, then resolve to an absolute path.
|
||||
mkdir -p "$TMPDIR"
|
||||
TMPDIR="$(cd "$TMPDIR" && pwd)"
|
||||
|
||||
# Main build logic
|
||||
|
||||
echo "Staging release in $TMPDIR"
|
||||
|
||||
# The script lives in codex-cli/scripts/ - change into codex-cli root so that
|
||||
# relative paths keep working.
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CODEX_CLI_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
pushd "$CODEX_CLI_ROOT" >/dev/null
|
||||
|
||||
# 1. Build the JS artifacts ---------------------------------------------------
|
||||
|
||||
pnpm install
|
||||
pnpm build
|
||||
|
||||
# Paths inside the staged package
|
||||
mkdir -p "$TMPDIR/bin"
|
||||
|
||||
cp -r bin/codex.js "$TMPDIR/bin/codex.js"
|
||||
cp -r dist "$TMPDIR/dist"
|
||||
cp -r src "$TMPDIR/src" # keep source for TS sourcemaps
|
||||
cp ../README.md "$TMPDIR" || true # README is one level up - ignore if missing
|
||||
|
||||
# Modify package.json - bump version and optionally add the native directory to
|
||||
# the files array so that the binaries are published to npm.
|
||||
|
||||
jq --arg version "$VERSION" \
|
||||
'.version = $version' \
|
||||
package.json > "$TMPDIR/package.json"
|
||||
|
||||
# 2. Native runtime deps (sandbox plus optional Rust binaries)
|
||||
|
||||
if [[ "$INCLUDE_NATIVE" -eq 1 ]]; then
|
||||
./scripts/install_native_deps.sh --full-native --workflow-url "$WORKFLOW_URL" "$TMPDIR"
|
||||
touch "${TMPDIR}/bin/use-native"
|
||||
else
|
||||
./scripts/install_native_deps.sh "$TMPDIR"
|
||||
fi
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
echo "Staged version $VERSION for release in $TMPDIR"
|
||||
|
||||
if [[ "$INCLUDE_NATIVE" -eq 1 ]]; then
|
||||
echo "Verify the CLI:"
|
||||
echo " node ${TMPDIR}/bin/codex.js --version"
|
||||
echo " node ${TMPDIR}/bin/codex.js --help"
|
||||
else
|
||||
echo "Test Node:"
|
||||
echo " node ${TMPDIR}/bin/codex.js --help"
|
||||
fi
|
||||
|
||||
# Print final hint for convenience
|
||||
if [[ "$INCLUDE_NATIVE" -eq 1 ]]; then
|
||||
echo "Next: cd \"$TMPDIR\" && npm publish --tag native"
|
||||
else
|
||||
echo "Next: cd \"$TMPDIR\" && npm publish"
|
||||
fi
|
||||
@@ -1,62 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="""Stage a release for the npm module.
|
||||
|
||||
Run this after the GitHub Release has been created and use
|
||||
`--release-version` to specify the version to release.
|
||||
"""
|
||||
)
|
||||
parser.add_argument(
|
||||
"--release-version", required=True, help="Version to release, e.g., 0.3.0"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
version = args.release_version
|
||||
|
||||
gh_run = subprocess.run(
|
||||
[
|
||||
"gh",
|
||||
"run",
|
||||
"list",
|
||||
"--branch",
|
||||
f"rust-v{version}",
|
||||
"--json",
|
||||
"workflowName,url,headSha",
|
||||
"--jq",
|
||||
'first(.[] | select(.workflowName == "rust-release"))',
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
check=True,
|
||||
)
|
||||
gh_run.check_returncode()
|
||||
workflow = json.loads(gh_run.stdout)
|
||||
sha = workflow["headSha"]
|
||||
|
||||
print(f"should `git checkout {sha}`")
|
||||
|
||||
current_dir = Path(__file__).parent.resolve()
|
||||
stage_release = subprocess.run(
|
||||
[
|
||||
current_dir / "stage_release.sh",
|
||||
"--version",
|
||||
version,
|
||||
"--workflow-url",
|
||||
workflow["url"],
|
||||
"--native",
|
||||
]
|
||||
)
|
||||
stage_release.check_returncode()
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,13 +1,12 @@
|
||||
import type { ApprovalPolicy } from "./approvals";
|
||||
import type { AppConfig } from "./utils/config";
|
||||
import type { TerminalChatSession } from "./utils/session.js";
|
||||
import type { ResponseItem } from "openai/resources/responses/responses";
|
||||
|
||||
import TerminalChat from "./components/chat/terminal-chat";
|
||||
import TerminalChatPastRollout from "./components/chat/terminal-chat-past-rollout";
|
||||
import { checkInGit } from "./utils/check-in-git";
|
||||
import { CLI_VERSION, type TerminalChatSession } from "./utils/session.js";
|
||||
import { onExit } from "./utils/terminal";
|
||||
import { CLI_VERSION } from "./version";
|
||||
import { ConfirmInput } from "@inkjs/ui";
|
||||
import { Box, Text, useApp, useStdin } from "ink";
|
||||
import React, { useMemo, useState } from "react";
|
||||
@@ -50,7 +49,6 @@ export default function App({
|
||||
<TerminalChatPastRollout
|
||||
session={rollout.session}
|
||||
items={rollout.items}
|
||||
fileOpener={config.fileOpener}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -71,14 +71,13 @@ export type ApprovalPolicy =
|
||||
*/
|
||||
export function canAutoApprove(
|
||||
command: ReadonlyArray<string>,
|
||||
workdir: string | undefined,
|
||||
policy: ApprovalPolicy,
|
||||
writableRoots: ReadonlyArray<string>,
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): SafetyAssessment {
|
||||
if (command[0] === "apply_patch") {
|
||||
return command.length === 2 && typeof command[1] === "string"
|
||||
? canAutoApproveApplyPatch(command[1], workdir, writableRoots, policy)
|
||||
? canAutoApproveApplyPatch(command[1], writableRoots, policy)
|
||||
: {
|
||||
type: "reject",
|
||||
reason: "Invalid apply_patch command",
|
||||
@@ -104,12 +103,7 @@ export function canAutoApprove(
|
||||
) {
|
||||
const applyPatchArg = tryParseApplyPatch(command[2]);
|
||||
if (applyPatchArg != null) {
|
||||
return canAutoApproveApplyPatch(
|
||||
applyPatchArg,
|
||||
workdir,
|
||||
writableRoots,
|
||||
policy,
|
||||
);
|
||||
return canAutoApproveApplyPatch(applyPatchArg, writableRoots, policy);
|
||||
}
|
||||
|
||||
let bashCmd;
|
||||
@@ -141,8 +135,8 @@ export function canAutoApprove(
|
||||
// bashCmd could be a mix of strings and operators, e.g.:
|
||||
// "ls || (true && pwd)" => [ 'ls', { op: '||' }, '(', 'true', { op: '&&' }, 'pwd', ')' ]
|
||||
// We try to ensure that *every* command segment is deemed safe and that
|
||||
// all operators belong to an allow-list. If so, the entire expression is
|
||||
// considered auto-approvable.
|
||||
// all operators belong to an allow‑list. If so, the entire expression is
|
||||
// considered auto‑approvable.
|
||||
|
||||
const shellSafe = isEntireShellExpressionSafe(bashCmd);
|
||||
if (shellSafe != null) {
|
||||
@@ -168,7 +162,6 @@ export function canAutoApprove(
|
||||
|
||||
function canAutoApproveApplyPatch(
|
||||
applyPatchArg: string,
|
||||
workdir: string | undefined,
|
||||
writableRoots: ReadonlyArray<string>,
|
||||
policy: ApprovalPolicy,
|
||||
): SafetyAssessment {
|
||||
@@ -186,13 +179,7 @@ function canAutoApproveApplyPatch(
|
||||
break;
|
||||
}
|
||||
|
||||
if (
|
||||
isWritePatchConstrainedToWritablePaths(
|
||||
applyPatchArg,
|
||||
workdir,
|
||||
writableRoots,
|
||||
)
|
||||
) {
|
||||
if (isWritePatchConstrainedToWritablePaths(applyPatchArg, writableRoots)) {
|
||||
return {
|
||||
type: "auto-approve",
|
||||
reason: "apply_patch command is constrained to writable paths",
|
||||
@@ -221,7 +208,6 @@ function canAutoApproveApplyPatch(
|
||||
*/
|
||||
function isWritePatchConstrainedToWritablePaths(
|
||||
applyPatchArg: string,
|
||||
workdir: string | undefined,
|
||||
writableRoots: ReadonlyArray<string>,
|
||||
): boolean {
|
||||
// `identify_files_needed()` returns a list of files that will be modified or
|
||||
@@ -236,12 +222,10 @@ function isWritePatchConstrainedToWritablePaths(
|
||||
return (
|
||||
allPathsConstrainedTowritablePaths(
|
||||
identify_files_needed(applyPatchArg),
|
||||
workdir,
|
||||
writableRoots,
|
||||
) &&
|
||||
allPathsConstrainedTowritablePaths(
|
||||
identify_files_added(applyPatchArg),
|
||||
workdir,
|
||||
writableRoots,
|
||||
)
|
||||
);
|
||||
@@ -249,49 +233,24 @@ function isWritePatchConstrainedToWritablePaths(
|
||||
|
||||
function allPathsConstrainedTowritablePaths(
|
||||
candidatePaths: ReadonlyArray<string>,
|
||||
workdir: string | undefined,
|
||||
writableRoots: ReadonlyArray<string>,
|
||||
): boolean {
|
||||
return candidatePaths.every((candidatePath) =>
|
||||
isPathConstrainedTowritablePaths(candidatePath, workdir, writableRoots),
|
||||
isPathConstrainedTowritablePaths(candidatePath, writableRoots),
|
||||
);
|
||||
}
|
||||
|
||||
/** If candidatePath is relative, it will be resolved against cwd. */
|
||||
function isPathConstrainedTowritablePaths(
|
||||
candidatePath: string,
|
||||
workdir: string | undefined,
|
||||
writableRoots: ReadonlyArray<string>,
|
||||
): boolean {
|
||||
const candidateAbsolutePath = resolvePathAgainstWorkdir(
|
||||
candidatePath,
|
||||
workdir,
|
||||
);
|
||||
|
||||
const candidateAbsolutePath = path.resolve(candidatePath);
|
||||
return writableRoots.some((writablePath) =>
|
||||
pathContains(writablePath, candidateAbsolutePath),
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* If not already an absolute path, resolves `candidatePath` against `workdir`
|
||||
* if specified; otherwise, against `process.cwd()`.
|
||||
*/
|
||||
export function resolvePathAgainstWorkdir(
|
||||
candidatePath: string,
|
||||
workdir: string | undefined,
|
||||
): string {
|
||||
// Normalize candidatePath to prevent path traversal attacks
|
||||
const normalizedCandidatePath = path.normalize(candidatePath);
|
||||
if (path.isAbsolute(normalizedCandidatePath)) {
|
||||
return normalizedCandidatePath;
|
||||
} else if (workdir != null) {
|
||||
return path.resolve(workdir, normalizedCandidatePath);
|
||||
} else {
|
||||
return path.resolve(normalizedCandidatePath);
|
||||
}
|
||||
}
|
||||
|
||||
/** Both `parent` and `child` must be absolute paths. */
|
||||
function pathContains(parent: string, child: string): boolean {
|
||||
const relative = path.relative(parent, child);
|
||||
@@ -355,7 +314,7 @@ export function isSafeCommand(
|
||||
};
|
||||
case "true":
|
||||
return {
|
||||
reason: "No-op (true)",
|
||||
reason: "No‑op (true)",
|
||||
group: "Utility",
|
||||
};
|
||||
case "echo":
|
||||
@@ -365,45 +324,16 @@ export function isSafeCommand(
|
||||
reason: "View file contents",
|
||||
group: "Reading files",
|
||||
};
|
||||
case "nl":
|
||||
return {
|
||||
reason: "View file with line numbers",
|
||||
group: "Reading files",
|
||||
};
|
||||
case "rg": {
|
||||
// Certain ripgrep options execute external commands or invoke other
|
||||
// processes, so we must reject them.
|
||||
const isUnsafe = command.some(
|
||||
(arg: string) =>
|
||||
UNSAFE_OPTIONS_FOR_RIPGREP_WITHOUT_ARGS.has(arg) ||
|
||||
[...UNSAFE_OPTIONS_FOR_RIPGREP_WITH_ARGS].some(
|
||||
(opt) => arg === opt || arg.startsWith(`${opt}=`),
|
||||
),
|
||||
);
|
||||
|
||||
if (isUnsafe) {
|
||||
break;
|
||||
}
|
||||
|
||||
case "rg":
|
||||
return {
|
||||
reason: "Ripgrep search",
|
||||
group: "Searching",
|
||||
};
|
||||
}
|
||||
case "find": {
|
||||
// Certain options to `find` allow executing arbitrary processes, so we
|
||||
// cannot auto-approve them.
|
||||
if (
|
||||
command.some((arg: string) => UNSAFE_OPTIONS_FOR_FIND_COMMAND.has(arg))
|
||||
) {
|
||||
break;
|
||||
} else {
|
||||
return {
|
||||
reason: "Find files or directories",
|
||||
group: "Searching",
|
||||
};
|
||||
}
|
||||
}
|
||||
case "find":
|
||||
return {
|
||||
reason: "Find files or directories",
|
||||
group: "Searching",
|
||||
};
|
||||
case "grep":
|
||||
return {
|
||||
reason: "Text search (grep)",
|
||||
@@ -468,15 +398,11 @@ export function isSafeCommand(
|
||||
}
|
||||
break;
|
||||
case "sed":
|
||||
// We allow two types of sed invocations:
|
||||
// 1. `sed -n 1,200p FILE`
|
||||
// 2. `sed -n 1,200p` because the file is passed via stdin, e.g.,
|
||||
// `nl -ba README.md | sed -n '1,200p'`
|
||||
if (
|
||||
cmd1 === "-n" &&
|
||||
isValidSedNArg(cmd2) &&
|
||||
(command.length === 3 ||
|
||||
(typeof cmd3 === "string" && command.length === 4))
|
||||
typeof cmd3 === "string" &&
|
||||
command.length === 4
|
||||
) {
|
||||
return {
|
||||
reason: "Sed print subset",
|
||||
@@ -495,43 +421,12 @@ function isValidSedNArg(arg: string | undefined): boolean {
|
||||
return arg != null && /^(\d+,)?\d+p$/.test(arg);
|
||||
}
|
||||
|
||||
const UNSAFE_OPTIONS_FOR_FIND_COMMAND: ReadonlySet<string> = new Set([
|
||||
// Options that can execute arbitrary commands.
|
||||
"-exec",
|
||||
"-execdir",
|
||||
"-ok",
|
||||
"-okdir",
|
||||
// Option that deletes matching files.
|
||||
"-delete",
|
||||
// Options that write pathnames to a file.
|
||||
"-fls",
|
||||
"-fprint",
|
||||
"-fprint0",
|
||||
"-fprintf",
|
||||
]);
|
||||
|
||||
// Ripgrep options that are considered unsafe because they may execute
|
||||
// arbitrary commands or spawn auxiliary processes.
|
||||
const UNSAFE_OPTIONS_FOR_RIPGREP_WITH_ARGS: ReadonlySet<string> = new Set([
|
||||
// Executes an arbitrary command for each matching file.
|
||||
"--pre",
|
||||
// Allows custom hostname command which could leak environment details.
|
||||
"--hostname-bin",
|
||||
]);
|
||||
|
||||
const UNSAFE_OPTIONS_FOR_RIPGREP_WITHOUT_ARGS: ReadonlySet<string> = new Set([
|
||||
// Enables searching inside archives which triggers external decompression
|
||||
// utilities – reject out of an abundance of caution.
|
||||
"--search-zip",
|
||||
"-z",
|
||||
]);
|
||||
|
||||
// ---------------- Helper utilities for complex shell expressions -----------------
|
||||
|
||||
// A conservative allow-list of bash operators that do not, on their own, cause
|
||||
// A conservative allow‑list of bash operators that do not, on their own, cause
|
||||
// side effects. Redirections (>, >>, <, etc.) and command substitution `$()`
|
||||
// are intentionally excluded. Parentheses used for grouping are treated as
|
||||
// strings by `shell-quote`, so we do not add them here. Reference:
|
||||
// strings by `shell‑quote`, so we do not add them here. Reference:
|
||||
// https://github.com/substack/node-shell-quote#parsecmd-opts
|
||||
const SAFE_SHELL_OPERATORS: ReadonlySet<string> = new Set([
|
||||
"&&", // logical AND
|
||||
@@ -557,7 +452,7 @@ function isEntireShellExpressionSafe(
|
||||
}
|
||||
|
||||
try {
|
||||
// Collect command segments delimited by operators. `shell-quote` represents
|
||||
// Collect command segments delimited by operators. `shell‑quote` represents
|
||||
// subshell grouping parentheses as literal strings "(" and ")"; treat them
|
||||
// as unsafe to keep the logic simple (since subshells could introduce
|
||||
// unexpected scope changes).
|
||||
@@ -625,7 +520,7 @@ function isParseEntryWithOp(
|
||||
return (
|
||||
typeof entry === "object" &&
|
||||
entry != null &&
|
||||
// Using the safe `in` operator keeps the check property-safe even when
|
||||
// Using the safe `in` operator keeps the check property‑safe even when
|
||||
// `entry` is a `string`.
|
||||
"op" in entry &&
|
||||
typeof (entry as { op?: unknown }).op === "string"
|
||||
|
||||
@@ -1,19 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
import "dotenv/config";
|
||||
|
||||
// Exit early if on an older version of Node.js (< 22)
|
||||
const major = process.versions.node.split(".").map(Number)[0]!;
|
||||
if (major < 22) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
"\n" +
|
||||
"Codex CLI requires Node.js version 22 or newer.\n" +
|
||||
`You are running Node.js v${process.versions.node}.\n` +
|
||||
"Please upgrade Node.js: https://nodejs.org/en/download/\n",
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Hack to suppress deprecation warnings (punycode)
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(process as any).noDeprecation = true;
|
||||
@@ -23,36 +10,30 @@ import type { ApprovalPolicy } from "./approvals";
|
||||
import type { CommandConfirmation } from "./utils/agent/agent-loop";
|
||||
import type { AppConfig } from "./utils/config";
|
||||
import type { ResponseItem } from "openai/resources/responses/responses";
|
||||
import type { ReasoningEffort } from "openai/resources.mjs";
|
||||
|
||||
import App from "./app";
|
||||
import { runSinglePass } from "./cli-singlepass";
|
||||
import SessionsOverlay from "./components/sessions-overlay.js";
|
||||
import { AgentLoop } from "./utils/agent/agent-loop";
|
||||
import { initLogger } from "./utils/agent/log";
|
||||
import { ReviewDecision } from "./utils/agent/review";
|
||||
import { AutoApprovalMode } from "./utils/auto-approval-mode";
|
||||
import { checkForUpdates } from "./utils/check-updates";
|
||||
import {
|
||||
loadConfig,
|
||||
PRETTY_PRINT,
|
||||
INSTRUCTIONS_FILEPATH,
|
||||
} from "./utils/config";
|
||||
import {
|
||||
getApiKey as fetchApiKey,
|
||||
maybeRedeemCredits,
|
||||
} from "./utils/get-api-key";
|
||||
import { createInputItem } from "./utils/input-utils";
|
||||
import { initLogger } from "./utils/logger/log";
|
||||
import { isModelSupportedForResponses } from "./utils/model-utils.js";
|
||||
import {
|
||||
isModelSupportedForResponses,
|
||||
preloadModels,
|
||||
} from "./utils/model-utils.js";
|
||||
import { parseToolCall } from "./utils/parsers";
|
||||
import { providers } from "./utils/providers";
|
||||
import { onExit, setInkRenderer } from "./utils/terminal";
|
||||
import chalk from "chalk";
|
||||
import { spawnSync } from "child_process";
|
||||
import fs from "fs";
|
||||
import { render } from "ink";
|
||||
import meow from "meow";
|
||||
import os from "os";
|
||||
import path from "path";
|
||||
import React from "react";
|
||||
|
||||
@@ -72,16 +53,10 @@ const cli = meow(
|
||||
$ codex completion <bash|zsh|fish>
|
||||
|
||||
Options
|
||||
--version Print version and exit
|
||||
|
||||
-h, --help Show usage and exit
|
||||
-m, --model <model> Model to use for completions (default: codex-mini-latest)
|
||||
-p, --provider <provider> Provider to use for completions (default: openai)
|
||||
-m, --model <model> Model to use for completions (default: o4-mini)
|
||||
-i, --image <path> Path(s) to image files to include as input
|
||||
-v, --view <rollout> Inspect a previously saved rollout instead of starting a session
|
||||
--history Browse previous sessions
|
||||
--login Start a new sign in flow
|
||||
--free Retry redeeming free credits
|
||||
-q, --quiet Non-interactive mode that only prints the assistant's final output
|
||||
-c, --config Open the instructions file in your editor
|
||||
-w, --writable-root <path> Writable folder for sandbox in full-auto mode (can be specified multiple times)
|
||||
@@ -90,19 +65,11 @@ const cli = meow(
|
||||
--auto-edit Automatically approve file edits; still prompt for commands
|
||||
--full-auto Automatically approve edits and commands when executed in the sandbox
|
||||
|
||||
--no-project-doc Do not automatically include the repository's 'AGENTS.md'
|
||||
--no-project-doc Do not automatically include the repository's 'codex.md'
|
||||
--project-doc <file> Include an additional markdown file at <file> as context
|
||||
--full-stdout Do not truncate stdout/stderr from command outputs
|
||||
--notify Enable desktop notifications for responses
|
||||
|
||||
--disable-response-storage Disable server‑side response storage (sends the
|
||||
full conversation context with every request)
|
||||
|
||||
--flex-mode Use "flex-mode" processing mode for the request (only supported
|
||||
with models o3 and o4-mini)
|
||||
|
||||
--reasoning <effort> Set the reasoning effort level (low, medium, high) (default: high)
|
||||
|
||||
Dangerous options
|
||||
--dangerously-auto-approve-everything
|
||||
Skip all confirmation prompts and execute commands without
|
||||
@@ -124,13 +91,8 @@ const cli = meow(
|
||||
flags: {
|
||||
// misc
|
||||
help: { type: "boolean", aliases: ["h"] },
|
||||
version: { type: "boolean", description: "Print version and exit" },
|
||||
view: { type: "string" },
|
||||
history: { type: "boolean", description: "Browse previous sessions" },
|
||||
login: { type: "boolean", description: "Force a new sign in flow" },
|
||||
free: { type: "boolean", description: "Retry redeeming free credits" },
|
||||
model: { type: "string", aliases: ["m"] },
|
||||
provider: { type: "string", aliases: ["p"] },
|
||||
image: { type: "string", isMultiple: true, aliases: ["i"] },
|
||||
quiet: {
|
||||
type: "boolean",
|
||||
@@ -171,41 +133,24 @@ const cli = meow(
|
||||
},
|
||||
noProjectDoc: {
|
||||
type: "boolean",
|
||||
description: "Disable automatic inclusion of project-level AGENTS.md",
|
||||
description: "Disable automatic inclusion of project‑level codex.md",
|
||||
},
|
||||
projectDoc: {
|
||||
type: "string",
|
||||
description: "Path to a markdown file to include as project doc",
|
||||
},
|
||||
flexMode: {
|
||||
type: "boolean",
|
||||
description:
|
||||
"Enable the flex-mode service tier (only supported by models o3 and o4-mini)",
|
||||
},
|
||||
fullStdout: {
|
||||
type: "boolean",
|
||||
description:
|
||||
"Disable truncation of command stdout/stderr messages (show everything)",
|
||||
aliases: ["no-truncate"],
|
||||
},
|
||||
reasoning: {
|
||||
type: "string",
|
||||
description: "Set the reasoning effort level (low, medium, high)",
|
||||
choices: ["low", "medium", "high"],
|
||||
default: "high",
|
||||
},
|
||||
// Notification
|
||||
notify: {
|
||||
type: "boolean",
|
||||
description: "Enable desktop notifications for responses",
|
||||
},
|
||||
|
||||
disableResponseStorage: {
|
||||
type: "boolean",
|
||||
description:
|
||||
"Disable server-side response storage (sends full conversation context with every request)",
|
||||
},
|
||||
|
||||
// Experimental mode where whole directory is loaded in context and model is requested
|
||||
// to make code edits in a single pass.
|
||||
fullContext: {
|
||||
@@ -218,10 +163,6 @@ const cli = meow(
|
||||
},
|
||||
);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Global flag handling
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Handle 'completion' subcommand before any prompting or API calls
|
||||
if (cli.input[0] === "completion") {
|
||||
const shell = cli.input[1] || "bash";
|
||||
@@ -241,7 +182,7 @@ _codex() {
|
||||
}
|
||||
_codex`,
|
||||
fish: `# fish completion for codex
|
||||
complete -c codex -a '(__fish_complete_path)' -d 'file path'`,
|
||||
complete -c codex -a '(_fish_complete_path)' -d 'file path'`,
|
||||
};
|
||||
const script = scripts[shell];
|
||||
if (!script) {
|
||||
@@ -253,20 +194,19 @@ complete -c codex -a '(__fish_complete_path)' -d 'file path'`,
|
||||
console.log(script);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// For --help, show help and exit.
|
||||
// Show help if requested
|
||||
if (cli.flags.help) {
|
||||
cli.showHelp();
|
||||
}
|
||||
|
||||
// For --config, open custom instructions file in editor and exit.
|
||||
// Handle config flag: open instructions file in editor and exit
|
||||
if (cli.flags.config) {
|
||||
// Ensure configuration and instructions file exist
|
||||
try {
|
||||
loadConfig(); // Ensures the file is created if it doesn't already exit.
|
||||
loadConfig();
|
||||
} catch {
|
||||
// ignore errors
|
||||
}
|
||||
|
||||
const filePath = INSTRUCTIONS_FILEPATH;
|
||||
const editor =
|
||||
process.env["EDITOR"] || (process.platform === "win32" ? "notepad" : "vi");
|
||||
@@ -278,194 +218,45 @@ if (cli.flags.config) {
|
||||
// API key handling
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const fullContextMode = Boolean(cli.flags.fullContext);
|
||||
let config = loadConfig(undefined, undefined, {
|
||||
cwd: process.cwd(),
|
||||
disableProjectDoc: Boolean(cli.flags.noProjectDoc),
|
||||
projectDocPath: cli.flags.projectDoc,
|
||||
isFullContext: fullContextMode,
|
||||
});
|
||||
const apiKey = process.env["OPENAI_API_KEY"];
|
||||
|
||||
// `prompt` can be updated later when the user resumes a previous session
|
||||
// via the `--history` flag. Therefore it must be declared with `let` rather
|
||||
// than `const`.
|
||||
let prompt = cli.input[0];
|
||||
const model = cli.flags.model ?? config.model;
|
||||
const imagePaths = cli.flags.image;
|
||||
const provider = cli.flags.provider ?? config.provider ?? "openai";
|
||||
|
||||
const client = {
|
||||
issuer: "https://auth.openai.com",
|
||||
client_id: "app_EMoamEEZ73f0CkXaXp7hrann",
|
||||
};
|
||||
|
||||
let apiKey = "";
|
||||
let savedTokens:
|
||||
| {
|
||||
id_token?: string;
|
||||
access_token?: string;
|
||||
refresh_token: string;
|
||||
}
|
||||
| undefined;
|
||||
|
||||
// Try to load existing auth file if present
|
||||
try {
|
||||
const home = os.homedir();
|
||||
const authDir = path.join(home, ".codex");
|
||||
const authFile = path.join(authDir, "auth.json");
|
||||
if (fs.existsSync(authFile)) {
|
||||
const data = JSON.parse(fs.readFileSync(authFile, "utf-8"));
|
||||
savedTokens = data.tokens;
|
||||
const lastRefreshTime = data.last_refresh
|
||||
? new Date(data.last_refresh).getTime()
|
||||
: 0;
|
||||
const expired = Date.now() - lastRefreshTime > 28 * 24 * 60 * 60 * 1000;
|
||||
if (data.OPENAI_API_KEY && !expired) {
|
||||
apiKey = data.OPENAI_API_KEY;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// ignore errors
|
||||
}
|
||||
|
||||
// Get provider-specific API key if not OpenAI
|
||||
if (provider.toLowerCase() !== "openai") {
|
||||
const providerInfo = providers[provider.toLowerCase()];
|
||||
if (providerInfo) {
|
||||
const providerApiKey = process.env[providerInfo.envKey];
|
||||
if (providerApiKey) {
|
||||
apiKey = providerApiKey;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only proceed with OpenAI auth flow if:
|
||||
// 1. Provider is OpenAI and no API key is set, or
|
||||
// 2. Login flag is explicitly set
|
||||
if (provider.toLowerCase() === "openai" && !apiKey) {
|
||||
if (cli.flags.login) {
|
||||
apiKey = await fetchApiKey(client.issuer, client.client_id);
|
||||
try {
|
||||
const home = os.homedir();
|
||||
const authDir = path.join(home, ".codex");
|
||||
const authFile = path.join(authDir, "auth.json");
|
||||
if (fs.existsSync(authFile)) {
|
||||
const data = JSON.parse(fs.readFileSync(authFile, "utf-8"));
|
||||
savedTokens = data.tokens;
|
||||
}
|
||||
} catch {
|
||||
/* ignore */
|
||||
}
|
||||
} else {
|
||||
apiKey = await fetchApiKey(client.issuer, client.client_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the API key is available as an environment variable for legacy code
|
||||
process.env["OPENAI_API_KEY"] = apiKey;
|
||||
|
||||
// Only attempt credit redemption for OpenAI provider
|
||||
if (cli.flags.free && provider.toLowerCase() === "openai") {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(`${chalk.bold("codex --free")} attempting to redeem credits...`);
|
||||
if (!savedTokens?.refresh_token) {
|
||||
apiKey = await fetchApiKey(client.issuer, client.client_id, true);
|
||||
// fetchApiKey includes credit redemption as the end of the flow
|
||||
} else {
|
||||
await maybeRedeemCredits(
|
||||
client.issuer,
|
||||
client.client_id,
|
||||
savedTokens.refresh_token,
|
||||
savedTokens.id_token,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Set of providers that don't require API keys
|
||||
const NO_API_KEY_REQUIRED = new Set(["ollama"]);
|
||||
|
||||
// Skip API key validation for providers that don't require an API key
|
||||
if (!apiKey && !NO_API_KEY_REQUIRED.has(provider.toLowerCase())) {
|
||||
if (!apiKey) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
`\n${chalk.red(`Missing ${provider} API key.`)}\n\n` +
|
||||
`Set the environment variable ${chalk.bold(
|
||||
`${provider.toUpperCase()}_API_KEY`,
|
||||
)} ` +
|
||||
`\n${chalk.red("Missing OpenAI API key.")}\n\n` +
|
||||
`Set the environment variable ${chalk.bold("OPENAI_API_KEY")} ` +
|
||||
`and re-run this command.\n` +
|
||||
`${
|
||||
provider.toLowerCase() === "openai"
|
||||
? `You can create a key here: ${chalk.bold(
|
||||
chalk.underline("https://platform.openai.com/account/api-keys"),
|
||||
)}\n`
|
||||
: provider.toLowerCase() === "azure"
|
||||
? `You can create a ${chalk.bold(
|
||||
`${provider.toUpperCase()}_OPENAI_API_KEY`,
|
||||
)} ` +
|
||||
`in Azure AI Foundry portal at ${chalk.bold(chalk.underline("https://ai.azure.com"))}.\n`
|
||||
: provider.toLowerCase() === "gemini"
|
||||
? `You can create a ${chalk.bold(
|
||||
`${provider.toUpperCase()}_API_KEY`,
|
||||
)} ` + `in the ${chalk.bold(`Google AI Studio`)}.\n`
|
||||
: `You can create a ${chalk.bold(
|
||||
`${provider.toUpperCase()}_API_KEY`,
|
||||
)} ` + `in the ${chalk.bold(`${provider}`)} dashboard.\n`
|
||||
}`,
|
||||
`You can create a key here: ${chalk.bold(
|
||||
chalk.underline("https://platform.openai.com/account/api-keys"),
|
||||
)}\n`,
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const flagPresent = Object.hasOwn(cli.flags, "disableResponseStorage");
|
||||
const fullContextMode = Boolean(cli.flags.fullContext);
|
||||
let config = loadConfig(undefined, undefined, {
|
||||
cwd: process.cwd(),
|
||||
disableProjectDoc: Boolean(cli.flags.noProjectDoc),
|
||||
projectDocPath: cli.flags.projectDoc as string | undefined,
|
||||
isFullContext: fullContextMode,
|
||||
});
|
||||
|
||||
const disableResponseStorage = flagPresent
|
||||
? Boolean(cli.flags.disableResponseStorage) // value user actually passed
|
||||
: (config.disableResponseStorage ?? false); // fall back to YAML, default to false
|
||||
const prompt = cli.input[0];
|
||||
const model = cli.flags.model;
|
||||
const imagePaths = cli.flags.image as Array<string> | undefined;
|
||||
|
||||
config = {
|
||||
apiKey,
|
||||
...config,
|
||||
model: model ?? config.model,
|
||||
notify: Boolean(cli.flags.notify),
|
||||
reasoningEffort:
|
||||
(cli.flags.reasoning as ReasoningEffort | undefined) ?? "medium",
|
||||
flexMode: cli.flags.flexMode || (config.flexMode ?? false),
|
||||
provider,
|
||||
disableResponseStorage,
|
||||
};
|
||||
|
||||
// Check for updates after loading config. This is important because we write state file in
|
||||
// the config dir.
|
||||
try {
|
||||
await checkForUpdates();
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
|
||||
// For --flex-mode, validate and exit if incorrect.
|
||||
if (config.flexMode) {
|
||||
const allowedFlexModels = new Set(["o3", "o4-mini"]);
|
||||
if (!allowedFlexModels.has(config.model)) {
|
||||
if (cli.flags.flexMode) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
`The --flex-mode option is only supported when using the 'o3' or 'o4-mini' models. ` +
|
||||
`Current model: '${config.model}'.`,
|
||||
);
|
||||
process.exit(1);
|
||||
} else {
|
||||
config.flexMode = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
!(await isModelSupportedForResponses(provider, config.model)) &&
|
||||
(!provider || provider.toLowerCase() === "openai")
|
||||
) {
|
||||
if (!(await isModelSupportedForResponses(config.model))) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
`The model "${config.model}" does not appear in the list of models ` +
|
||||
`available to your account. Double-check the spelling (use\n` +
|
||||
`available to your account. Double‑check the spelling (use\n` +
|
||||
` openai models list\n` +
|
||||
`to see the full list) or choose another model with the --model flag.`,
|
||||
);
|
||||
@@ -474,47 +265,6 @@ if (
|
||||
|
||||
let rollout: AppRollout | undefined;
|
||||
|
||||
// For --history, show session selector and optionally update prompt or rollout.
|
||||
if (cli.flags.history) {
|
||||
const result: { path: string; mode: "view" | "resume" } | null =
|
||||
await new Promise((resolve) => {
|
||||
const instance = render(
|
||||
React.createElement(SessionsOverlay, {
|
||||
onView: (p: string) => {
|
||||
instance.unmount();
|
||||
resolve({ path: p, mode: "view" });
|
||||
},
|
||||
onResume: (p: string) => {
|
||||
instance.unmount();
|
||||
resolve({ path: p, mode: "resume" });
|
||||
},
|
||||
onExit: () => {
|
||||
instance.unmount();
|
||||
resolve(null);
|
||||
},
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
if (!result) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
if (result.mode === "view") {
|
||||
try {
|
||||
const content = fs.readFileSync(result.path, "utf-8");
|
||||
rollout = JSON.parse(content) as AppRollout;
|
||||
} catch (error) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error("Error reading session file:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
} else {
|
||||
prompt = `Resume this session: ${result.path}`;
|
||||
}
|
||||
}
|
||||
|
||||
// For --view, optionally load an existing rollout from disk, display it and exit.
|
||||
if (cli.flags.view) {
|
||||
const viewPath = cli.flags.view;
|
||||
const absolutePath = path.isAbsolute(viewPath)
|
||||
@@ -530,7 +280,7 @@ if (cli.flags.view) {
|
||||
}
|
||||
}
|
||||
|
||||
// For --fullcontext, run the separate cli entrypoint and exit.
|
||||
// If we are running in --fullcontext mode, do that and exit.
|
||||
if (fullContextMode) {
|
||||
await runSinglePass({
|
||||
originalPrompt: prompt,
|
||||
@@ -546,8 +296,14 @@ const additionalWritableRoots: ReadonlyArray<string> = (
|
||||
cli.flags.writableRoot ?? []
|
||||
).map((p) => path.resolve(p));
|
||||
|
||||
// For --quiet, run the cli without user interactions and exit.
|
||||
if (cli.flags.quiet) {
|
||||
// If we are running in --quiet mode, do that and exit.
|
||||
const quietMode = Boolean(cli.flags.quiet);
|
||||
const autoApproveEverything = Boolean(
|
||||
cli.flags.dangerouslyAutoApproveEverything,
|
||||
);
|
||||
const fullStdout = Boolean(cli.flags.fullStdout);
|
||||
|
||||
if (quietMode) {
|
||||
process.env["CODEX_QUIET_MODE"] = "1";
|
||||
if (!prompt || prompt.trim() === "") {
|
||||
// eslint-disable-next-line no-console
|
||||
@@ -556,19 +312,12 @@ if (cli.flags.quiet) {
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Determine approval policy for quiet mode based on flags
|
||||
const quietApprovalPolicy: ApprovalPolicy =
|
||||
cli.flags.fullAuto || cli.flags.approvalMode === "full-auto"
|
||||
? AutoApprovalMode.FULL_AUTO
|
||||
: cli.flags.autoEdit || cli.flags.approvalMode === "auto-edit"
|
||||
? AutoApprovalMode.AUTO_EDIT
|
||||
: config.approvalMode || AutoApprovalMode.SUGGEST;
|
||||
|
||||
await runQuietMode({
|
||||
prompt,
|
||||
prompt: prompt as string,
|
||||
imagePaths: imagePaths || [],
|
||||
approvalPolicy: quietApprovalPolicy,
|
||||
approvalPolicy: autoApproveEverything
|
||||
? AutoApprovalMode.FULL_AUTO
|
||||
: AutoApprovalMode.SUGGEST,
|
||||
additionalWritableRoots,
|
||||
config,
|
||||
});
|
||||
@@ -586,15 +335,16 @@ if (cli.flags.quiet) {
|
||||
// it is more dangerous than --fullAuto we deliberately give it lower
|
||||
// priority so a user specifying both flags still gets the safer behaviour.
|
||||
// 3. --autoEdit – automatically approve edits, but prompt for commands.
|
||||
// 4. config.approvalMode - use the approvalMode setting from ~/.codex/config.json.
|
||||
// 5. Default – suggest mode (prompt for everything).
|
||||
// 4. Default – suggest mode (prompt for everything).
|
||||
|
||||
const approvalPolicy: ApprovalPolicy =
|
||||
cli.flags.fullAuto || cli.flags.approvalMode === "full-auto"
|
||||
? AutoApprovalMode.FULL_AUTO
|
||||
: cli.flags.autoEdit || cli.flags.approvalMode === "auto-edit"
|
||||
? AutoApprovalMode.AUTO_EDIT
|
||||
: config.approvalMode || AutoApprovalMode.SUGGEST;
|
||||
? AutoApprovalMode.AUTO_EDIT
|
||||
: AutoApprovalMode.SUGGEST;
|
||||
|
||||
preloadModels();
|
||||
|
||||
const instance = render(
|
||||
<App
|
||||
@@ -604,7 +354,7 @@ const instance = render(
|
||||
imagePaths={imagePaths}
|
||||
approvalPolicy={approvalPolicy}
|
||||
additionalWritableRoots={additionalWritableRoots}
|
||||
fullStdout={Boolean(cli.flags.fullStdout)}
|
||||
fullStdout={fullStdout}
|
||||
/>,
|
||||
{
|
||||
patchConsole: process.env["DEBUG"] ? false : true,
|
||||
@@ -678,10 +428,8 @@ async function runQuietMode({
|
||||
model: config.model,
|
||||
config: config,
|
||||
instructions: config.instructions,
|
||||
provider: config.provider,
|
||||
approvalPolicy,
|
||||
additionalWritableRoots,
|
||||
disableResponseStorage: config.disableResponseStorage,
|
||||
onItem: (item: ResponseItem) => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(formatResponseItemForQuietMode(item));
|
||||
@@ -692,12 +440,7 @@ async function runQuietMode({
|
||||
getCommandConfirmation: (
|
||||
_command: Array<string>,
|
||||
): Promise<CommandConfirmation> => {
|
||||
// In quiet mode, default to NO_CONTINUE, except when in full-auto mode
|
||||
const reviewDecision =
|
||||
approvalPolicy === AutoApprovalMode.FULL_AUTO
|
||||
? ReviewDecision.YES
|
||||
: ReviewDecision.NO_CONTINUE;
|
||||
return Promise.resolve({ review: reviewDecision });
|
||||
return Promise.resolve({ review: ReviewDecision.NO_CONTINUE });
|
||||
},
|
||||
onLastResponseId: () => {
|
||||
/* intentionally ignored in quiet mode */
|
||||
@@ -718,13 +461,13 @@ process.on("SIGQUIT", exit);
|
||||
process.on("SIGTERM", exit);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fallback for Ctrl-C when stdin is in raw-mode
|
||||
// Fallback for Ctrl‑C when stdin is in raw‑mode
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
if (process.stdin.isTTY) {
|
||||
// Ensure we do not leave the terminal in raw mode if the user presses
|
||||
// Ctrl-C while some other component has focus and Ink is intercepting
|
||||
// input. Node does *not* emit a SIGINT in raw-mode, so we listen for the
|
||||
// Ctrl‑C while some other component has focus and Ink is intercepting
|
||||
// input. Node does *not* emit a SIGINT in raw‑mode, so we listen for the
|
||||
// corresponding byte (0x03) ourselves and trigger a graceful shutdown.
|
||||
const onRawData = (data: Buffer | string): void => {
|
||||
const str = Buffer.isBuffer(data) ? data.toString("utf8") : data;
|
||||
@@ -735,6 +478,6 @@ if (process.stdin.isTTY) {
|
||||
process.stdin.on("data", onRawData);
|
||||
}
|
||||
|
||||
// Ensure terminal clean-up always runs, even when other code calls
|
||||
// Ensure terminal clean‑up always runs, even when other code calls
|
||||
// `process.exit()` directly.
|
||||
process.once("exit", onExit);
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import type { TerminalHeaderProps } from "./terminal-header.js";
|
||||
import type { GroupedResponseItem } from "./use-message-grouping.js";
|
||||
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
|
||||
import type { FileOpenerScheme } from "src/utils/config.js";
|
||||
|
||||
import TerminalChatResponseItem from "./terminal-chat-response-item.js";
|
||||
import TerminalHeader from "./terminal-header.js";
|
||||
@@ -20,13 +19,11 @@ type MessageHistoryProps = {
|
||||
confirmationPrompt: React.ReactNode;
|
||||
loading: boolean;
|
||||
headerProps: TerminalHeaderProps;
|
||||
fileOpener: FileOpenerScheme | undefined;
|
||||
};
|
||||
|
||||
const MessageHistory: React.FC<MessageHistoryProps> = ({
|
||||
batch,
|
||||
headerProps,
|
||||
fileOpener,
|
||||
}) => {
|
||||
const messages = batch.map(({ item }) => item!);
|
||||
|
||||
@@ -71,10 +68,7 @@ const MessageHistory: React.FC<MessageHistoryProps> = ({
|
||||
message.type === "message" && message.role === "user" ? 0 : 1
|
||||
}
|
||||
>
|
||||
<TerminalChatResponseItem
|
||||
item={message}
|
||||
fileOpener={fileOpener}
|
||||
/>
|
||||
<TerminalChatResponseItem item={message} />
|
||||
</Box>
|
||||
);
|
||||
}}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import { useTerminalSize } from "../../hooks/use-terminal-size";
|
||||
import TextBuffer from "../../text-buffer.js";
|
||||
import chalk from "chalk";
|
||||
import { Box, Text, useInput } from "ink";
|
||||
import { Box, Text, useInput, useStdin } from "ink";
|
||||
import { EventEmitter } from "node:events";
|
||||
import React, { useRef, useState } from "react";
|
||||
|
||||
@@ -14,7 +14,7 @@ import React, { useRef, useState } from "react";
|
||||
* The real `process.stdin` object exposed by Node.js inherits these methods
|
||||
* from `Socket`, but the lightweight stub used in tests only extends
|
||||
* `EventEmitter`. Ink calls the two methods when enabling/disabling raw
|
||||
* mode, so make them harmless no-ops when they're absent to avoid runtime
|
||||
* mode, so make them harmless no‑ops when they're absent to avoid runtime
|
||||
* failures during unit tests.
|
||||
* ----------------------------------------------------------------------- */
|
||||
|
||||
@@ -137,9 +137,6 @@ export interface MultilineTextEditorProps {
|
||||
|
||||
// Called when the internal text buffer updates.
|
||||
readonly onChange?: (text: string) => void;
|
||||
|
||||
// Optional initial cursor position (character offset)
|
||||
readonly initialCursorOffset?: number;
|
||||
}
|
||||
|
||||
// Expose a minimal imperative API so parent components (e.g. TerminalChatInput)
|
||||
@@ -158,8 +155,6 @@ export interface MultilineTextEditorHandle {
|
||||
isCursorAtLastRow(): boolean;
|
||||
/** Full text contents */
|
||||
getText(): string;
|
||||
/** Move the cursor to the end of the text */
|
||||
moveCursorToEnd(): void;
|
||||
}
|
||||
|
||||
const MultilineTextEditorInner = (
|
||||
@@ -172,7 +167,6 @@ const MultilineTextEditorInner = (
|
||||
onSubmit,
|
||||
focus = true,
|
||||
onChange,
|
||||
initialCursorOffset,
|
||||
}: MultilineTextEditorProps,
|
||||
ref: React.Ref<MultilineTextEditorHandle | null>,
|
||||
): React.ReactElement => {
|
||||
@@ -180,7 +174,7 @@ const MultilineTextEditorInner = (
|
||||
// Editor State
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const buffer = useRef(new TextBuffer(initialText, initialCursorOffset));
|
||||
const buffer = useRef(new TextBuffer(initialText));
|
||||
const [version, setVersion] = useState(0);
|
||||
|
||||
// Keep track of the current terminal size so that the editor grows/shrinks
|
||||
@@ -193,6 +187,41 @@ const MultilineTextEditorInner = (
|
||||
// minimum so that the UI never becomes unusably small.
|
||||
const effectiveWidth = Math.max(20, width ?? terminalSize.columns);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// External editor integration helpers.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Access to stdin so we can toggle raw‑mode while the external editor is
|
||||
// in control of the terminal.
|
||||
const { stdin, setRawMode } = useStdin();
|
||||
|
||||
/**
|
||||
* Launch the user's preferred $EDITOR, blocking until they close it, then
|
||||
* reload the edited file back into the in‑memory TextBuffer. The heavy
|
||||
* work is delegated to `TextBuffer.openInExternalEditor`, but we are
|
||||
* responsible for temporarily *disabling* raw mode so the child process can
|
||||
* interact with the TTY normally.
|
||||
*/
|
||||
const openExternalEditor = React.useCallback(async () => {
|
||||
// Preserve the current raw‑mode setting so we can restore it afterwards.
|
||||
const wasRaw = stdin?.isRaw ?? false;
|
||||
try {
|
||||
setRawMode?.(false);
|
||||
await buffer.current.openInExternalEditor();
|
||||
} catch (err) {
|
||||
// Surface the error so it doesn't fail silently – for now we log to
|
||||
// stderr. In the future this could surface a toast / overlay.
|
||||
// eslint-disable-next-line no-console
|
||||
console.error("[MultilineTextEditor] external editor error", err);
|
||||
} finally {
|
||||
if (wasRaw) {
|
||||
setRawMode?.(true);
|
||||
}
|
||||
// Force a re‑render so the component reflects the mutated buffer.
|
||||
setVersion((v) => v + 1);
|
||||
}
|
||||
}, [buffer, stdin, setRawMode]);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Keyboard handling.
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -203,6 +232,25 @@ const MultilineTextEditorInner = (
|
||||
return;
|
||||
}
|
||||
|
||||
// Single‑step editor shortcut: Ctrl+X or Ctrl+E
|
||||
// Treat both true Ctrl+Key combinations *and* raw control codes so that
|
||||
// the shortcut works consistently in real terminals (raw‑mode) and the
|
||||
// ink‑testing‑library stub which delivers only the raw byte (e.g. 0x05
|
||||
// for Ctrl‑E) without setting `key.ctrl`.
|
||||
const isCtrlX =
|
||||
(key.ctrl && (input === "x" || input === "\x18")) || input === "\x18";
|
||||
const isCtrlE =
|
||||
(key.ctrl && (input === "e" || input === "\x05")) ||
|
||||
input === "\x05" ||
|
||||
(!key.ctrl &&
|
||||
input === "e" &&
|
||||
input.length === 1 &&
|
||||
input.charCodeAt(0) === 5);
|
||||
if (isCtrlX || isCtrlE) {
|
||||
openExternalEditor();
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
process.env["TEXTBUFFER_DEBUG"] === "1" ||
|
||||
process.env["TEXTBUFFER_DEBUG"] === "true"
|
||||
@@ -211,47 +259,25 @@ const MultilineTextEditorInner = (
|
||||
console.log("[MultilineTextEditor] event", { input, key });
|
||||
}
|
||||
|
||||
// 1a) CSI-u / modifyOtherKeys *mode 2* (Ink strips initial ESC, so we
|
||||
// start with '[') – format: "[<code>;<modifiers>u".
|
||||
// 1) CSI‑u / modifyOtherKeys (Ink strips initial ESC, so we start with '[')
|
||||
if (input.startsWith("[") && input.endsWith("u")) {
|
||||
const m = input.match(/^\[([0-9]+);([0-9]+)u$/);
|
||||
if (m && m[1] === "13") {
|
||||
const mod = Number(m[2]);
|
||||
// In xterm's encoding: bit-1 (value 2) is Shift. Everything >1 that
|
||||
// isn't exactly 1 means some modifier was held. We treat *shift or
|
||||
// alt present* (2,3,4,6,8,9) as newline; Ctrl (bit-2 / value 4)
|
||||
// triggers submit. See xterm/DEC modifyOtherKeys docs.
|
||||
// In xterm's encoding: bit‑1 (value 2) is Shift. Everything >1 that
|
||||
// isn't exactly 1 means some modifier was held. We treat *shift
|
||||
// present* (2,4,6,8) as newline; plain (1) as submit.
|
||||
|
||||
// Xterm encodes modifier keys in `mod` – bit‑2 (value 4) indicates
|
||||
// that Ctrl was held. We avoid the `&` bitwise operator (disallowed
|
||||
// by our ESLint config) by using arithmetic instead.
|
||||
const hasCtrl = Math.floor(mod / 4) % 2 === 1;
|
||||
if (hasCtrl) {
|
||||
if (onSubmit) {
|
||||
onSubmit(buffer.current.getText());
|
||||
}
|
||||
} else {
|
||||
buffer.current.newline();
|
||||
}
|
||||
setVersion((v) => v + 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// 1b) CSI-~ / modifyOtherKeys *mode 1* – format: "[27;<mod>;<code>~".
|
||||
// Terminals such as iTerm2 (default), older xterm versions, or when
|
||||
// modifyOtherKeys=1 is configured, emit this legacy sequence. We
|
||||
// translate it to the same behaviour as the mode‑2 variant above so
|
||||
// that Shift+Enter (newline) / Ctrl+Enter (submit) work regardless
|
||||
// of the user’s terminal settings.
|
||||
if (input.startsWith("[27;") && input.endsWith("~")) {
|
||||
const m = input.match(/^\[27;([0-9]+);13~$/);
|
||||
if (m) {
|
||||
const mod = Number(m[1]);
|
||||
const hasCtrl = Math.floor(mod / 4) % 2 === 1;
|
||||
|
||||
if (hasCtrl) {
|
||||
if (onSubmit) {
|
||||
onSubmit(buffer.current.getText());
|
||||
}
|
||||
} else {
|
||||
// Any variant without Ctrl just inserts newline (Shift, Alt, none)
|
||||
buffer.current.newline();
|
||||
}
|
||||
setVersion((v) => v + 1);
|
||||
@@ -324,16 +350,6 @@ const MultilineTextEditorInner = (
|
||||
return row === lineCount - 1;
|
||||
},
|
||||
getText: () => buffer.current.getText(),
|
||||
moveCursorToEnd: () => {
|
||||
buffer.current.move("home");
|
||||
const lines = buffer.current.getText().split("\n");
|
||||
for (let i = 0; i < lines.length - 1; i++) {
|
||||
buffer.current.move("down");
|
||||
}
|
||||
buffer.current.move("end");
|
||||
// Force a re-render
|
||||
setVersion((v) => v + 1);
|
||||
},
|
||||
}),
|
||||
[],
|
||||
);
|
||||
@@ -389,4 +405,5 @@ const MultilineTextEditorInner = (
|
||||
};
|
||||
|
||||
const MultilineTextEditor = React.forwardRef(MultilineTextEditorInner);
|
||||
|
||||
export default MultilineTextEditor;
|
||||
|
||||
@@ -15,18 +15,11 @@ const DEFAULT_DENY_MESSAGE =
|
||||
export function TerminalChatCommandReview({
|
||||
confirmationPrompt,
|
||||
onReviewCommand,
|
||||
// callback to switch approval mode overlay
|
||||
onSwitchApprovalMode,
|
||||
explanation: propExplanation,
|
||||
// whether this review Select is active (listening for keys)
|
||||
isActive = true,
|
||||
}: {
|
||||
confirmationPrompt: React.ReactNode;
|
||||
onReviewCommand: (decision: ReviewDecision, customMessage?: string) => void;
|
||||
onSwitchApprovalMode: () => void;
|
||||
explanation?: string;
|
||||
// when false, disable the underlying Select so it won't capture input
|
||||
isActive?: boolean;
|
||||
}): React.ReactElement {
|
||||
const [mode, setMode] = React.useState<"select" | "input" | "explanation">(
|
||||
"select",
|
||||
@@ -77,7 +70,6 @@ export function TerminalChatCommandReview({
|
||||
const opts: Array<
|
||||
| { label: string; value: ReviewDecision }
|
||||
| { label: string; value: "edit" }
|
||||
| { label: string; value: "switch" }
|
||||
> = [
|
||||
{
|
||||
label: "Yes (y)",
|
||||
@@ -101,11 +93,6 @@ export function TerminalChatCommandReview({
|
||||
label: "Edit or give feedback (e)",
|
||||
value: "edit",
|
||||
},
|
||||
// allow switching approval mode
|
||||
{
|
||||
label: "Switch approval mode (s)",
|
||||
value: "switch",
|
||||
},
|
||||
{
|
||||
label: "No, and keep going (n)",
|
||||
value: ReviewDecision.NO_CONTINUE,
|
||||
@@ -119,50 +106,44 @@ export function TerminalChatCommandReview({
|
||||
return opts;
|
||||
}, [showAlwaysApprove]);
|
||||
|
||||
useInput(
|
||||
(input, key) => {
|
||||
if (mode === "select") {
|
||||
if (input === "y") {
|
||||
onReviewCommand(ReviewDecision.YES);
|
||||
} else if (input === "x") {
|
||||
onReviewCommand(ReviewDecision.EXPLAIN);
|
||||
} else if (input === "e") {
|
||||
setMode("input");
|
||||
} else if (input === "n") {
|
||||
onReviewCommand(
|
||||
ReviewDecision.NO_CONTINUE,
|
||||
"Don't do that, keep going though",
|
||||
);
|
||||
} else if (input === "a" && showAlwaysApprove) {
|
||||
onReviewCommand(ReviewDecision.ALWAYS);
|
||||
} else if (input === "s") {
|
||||
// switch approval mode
|
||||
onSwitchApprovalMode();
|
||||
} else if (key.escape) {
|
||||
onReviewCommand(ReviewDecision.NO_EXIT);
|
||||
}
|
||||
} else if (mode === "explanation") {
|
||||
// When in explanation mode, any key returns to select mode
|
||||
if (key.return || key.escape || input === "x") {
|
||||
setMode("select");
|
||||
}
|
||||
} else {
|
||||
// text entry mode
|
||||
if (key.return) {
|
||||
// if user hit enter on empty msg, fall back to DEFAULT_DENY_MESSAGE
|
||||
const custom = msg.trim() === "" ? DEFAULT_DENY_MESSAGE : msg;
|
||||
onReviewCommand(ReviewDecision.NO_CONTINUE, custom);
|
||||
} else if (key.escape) {
|
||||
// treat escape as denial with default message as well
|
||||
onReviewCommand(
|
||||
ReviewDecision.NO_CONTINUE,
|
||||
msg.trim() === "" ? DEFAULT_DENY_MESSAGE : msg,
|
||||
);
|
||||
}
|
||||
useInput((input, key) => {
|
||||
if (mode === "select") {
|
||||
if (input === "y") {
|
||||
onReviewCommand(ReviewDecision.YES);
|
||||
} else if (input === "x") {
|
||||
onReviewCommand(ReviewDecision.EXPLAIN);
|
||||
} else if (input === "e") {
|
||||
setMode("input");
|
||||
} else if (input === "n") {
|
||||
onReviewCommand(
|
||||
ReviewDecision.NO_CONTINUE,
|
||||
"Don't do that, keep going though",
|
||||
);
|
||||
} else if (input === "a" && showAlwaysApprove) {
|
||||
onReviewCommand(ReviewDecision.ALWAYS);
|
||||
} else if (key.escape) {
|
||||
onReviewCommand(ReviewDecision.NO_EXIT);
|
||||
}
|
||||
},
|
||||
{ isActive },
|
||||
);
|
||||
} else if (mode === "explanation") {
|
||||
// When in explanation mode, any key returns to select mode
|
||||
if (key.return || key.escape || input === "x") {
|
||||
setMode("select");
|
||||
}
|
||||
} else {
|
||||
// text entry mode
|
||||
if (key.return) {
|
||||
// if user hit enter on empty msg, fall back to DEFAULT_DENY_MESSAGE
|
||||
const custom = msg.trim() === "" ? DEFAULT_DENY_MESSAGE : msg;
|
||||
onReviewCommand(ReviewDecision.NO_CONTINUE, custom);
|
||||
} else if (key.escape) {
|
||||
// treat escape as denial with default message as well
|
||||
onReviewCommand(
|
||||
ReviewDecision.NO_CONTINUE,
|
||||
msg.trim() === "" ? DEFAULT_DENY_MESSAGE : msg,
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1} borderStyle="round" marginTop={1}>
|
||||
@@ -210,13 +191,9 @@ export function TerminalChatCommandReview({
|
||||
<Text>Allow command?</Text>
|
||||
<Box paddingX={2} flexDirection="column" gap={1}>
|
||||
<Select
|
||||
isDisabled={!isActive}
|
||||
visibleOptionCount={approvalOptions.length}
|
||||
onChange={(value: ReviewDecision | "edit" | "switch") => {
|
||||
onChange={(value: ReviewDecision | "edit") => {
|
||||
if (value === "edit") {
|
||||
setMode("input");
|
||||
} else if (value === "switch") {
|
||||
onSwitchApprovalMode();
|
||||
} else {
|
||||
onReviewCommand(value);
|
||||
}
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
import { Box, Text } from "ink";
|
||||
import React, { useMemo } from "react";
|
||||
|
||||
type TextCompletionProps = {
|
||||
/**
|
||||
* Array of text completion options to display in the list
|
||||
*/
|
||||
completions: Array<string>;
|
||||
|
||||
/**
|
||||
* Maximum number of completion items to show at once in the view
|
||||
*/
|
||||
displayLimit: number;
|
||||
|
||||
/**
|
||||
* Index of the currently selected completion in the completions array
|
||||
*/
|
||||
selectedCompletion: number;
|
||||
};
|
||||
|
||||
function TerminalChatCompletions({
|
||||
completions,
|
||||
selectedCompletion,
|
||||
displayLimit,
|
||||
}: TextCompletionProps): JSX.Element {
|
||||
const visibleItems = useMemo(() => {
|
||||
// Try to keep selection centered in view
|
||||
let startIndex = Math.max(
|
||||
0,
|
||||
selectedCompletion - Math.floor(displayLimit / 2),
|
||||
);
|
||||
|
||||
// Fix window position when at the end of the list
|
||||
if (completions.length - startIndex < displayLimit) {
|
||||
startIndex = Math.max(0, completions.length - displayLimit);
|
||||
}
|
||||
|
||||
const endIndex = Math.min(completions.length, startIndex + displayLimit);
|
||||
|
||||
return completions.slice(startIndex, endIndex).map((completion, index) => ({
|
||||
completion,
|
||||
originalIndex: index + startIndex,
|
||||
}));
|
||||
}, [completions, selectedCompletion, displayLimit]);
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
{visibleItems.map(({ completion, originalIndex }) => (
|
||||
<Text
|
||||
key={completion}
|
||||
dimColor={originalIndex !== selectedCompletion}
|
||||
underline={originalIndex === selectedCompletion}
|
||||
backgroundColor={
|
||||
originalIndex === selectedCompletion ? "blackBright" : undefined
|
||||
}
|
||||
>
|
||||
{completion}
|
||||
</Text>
|
||||
))}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
export default TerminalChatCompletions;
|
||||
@@ -1,28 +1,82 @@
|
||||
import { log } from "../../utils/logger/log.js";
|
||||
import { log, isLoggingEnabled } from "../../utils/agent/log.js";
|
||||
import Spinner from "../vendor/ink-spinner.js";
|
||||
import { Box, Text, useInput, useStdin } from "ink";
|
||||
import React, { useState } from "react";
|
||||
import { useInterval } from "use-interval";
|
||||
|
||||
// Retaining a single static placeholder text for potential future use. The
|
||||
// more elaborate randomised thinking prompts were removed to streamline the
|
||||
// UI – the elapsed‑time counter now provides sufficient feedback.
|
||||
const thinkingTexts = ["Thinking"]; /* [
|
||||
"Consulting the rubber duck",
|
||||
"Maximizing paperclips",
|
||||
"Reticulating splines",
|
||||
"Immanentizing the Eschaton",
|
||||
"Thinking",
|
||||
"Thinking about thinking",
|
||||
"Spinning in circles",
|
||||
"Counting dust specks",
|
||||
"Updating priors",
|
||||
"Feeding the utility monster",
|
||||
"Taking off",
|
||||
"Wireheading",
|
||||
"Counting to infinity",
|
||||
"Staring into the Basilisk",
|
||||
"Negotiationing acausal trades",
|
||||
"Searching the library of babel",
|
||||
"Multiplying matrices",
|
||||
"Solving the halting problem",
|
||||
"Counting grains of sand",
|
||||
"Simulating a simulation",
|
||||
"Asking the oracle",
|
||||
"Detangling qubits",
|
||||
"Reading tea leaves",
|
||||
"Pondering universal love and transcendent joy",
|
||||
"Feeling the AGI",
|
||||
"Shaving the yak",
|
||||
"Escaping local minima",
|
||||
"Pruning the search tree",
|
||||
"Descending the gradient",
|
||||
"Bikeshedding",
|
||||
"Securing funding",
|
||||
"Rewriting in Rust",
|
||||
"Engaging infinite improbability drive",
|
||||
"Clapping with one hand",
|
||||
"Synthesizing",
|
||||
"Rebasing thesis onto antithesis",
|
||||
"Transcending the loop",
|
||||
"Frogeposting",
|
||||
"Summoning",
|
||||
"Peeking beyond the veil",
|
||||
"Seeking",
|
||||
"Entering deep thought",
|
||||
"Meditating",
|
||||
"Decomposing",
|
||||
"Creating",
|
||||
"Beseeching the machine spirit",
|
||||
"Calibrating moral compass",
|
||||
"Collapsing the wave function",
|
||||
"Doodling",
|
||||
"Translating whale song",
|
||||
"Whispering to silicon",
|
||||
"Looking for semicolons",
|
||||
"Asking ChatGPT",
|
||||
"Bargaining with entropy",
|
||||
"Channeling",
|
||||
"Cooking",
|
||||
"Parroting stochastically",
|
||||
]; */
|
||||
|
||||
export default function TerminalChatInputThinking({
|
||||
onInterrupt,
|
||||
active,
|
||||
thinkingSeconds,
|
||||
}: {
|
||||
onInterrupt: () => void;
|
||||
active: boolean;
|
||||
thinkingSeconds: number;
|
||||
}): React.ReactElement {
|
||||
const [awaitingConfirm, setAwaitingConfirm] = useState(false);
|
||||
const [dots, setDots] = useState("");
|
||||
const [awaitingConfirm, setAwaitingConfirm] = useState(false);
|
||||
|
||||
// Animate the ellipsis
|
||||
useInterval(() => {
|
||||
setDots((prev) => (prev.length < 3 ? prev + "." : ""));
|
||||
}, 500);
|
||||
const [thinkingText, setThinkingText] = useState(
|
||||
() => thinkingTexts[Math.floor(Math.random() * thinkingTexts.length)],
|
||||
);
|
||||
|
||||
const { stdin, setRawMode } = useStdin();
|
||||
|
||||
@@ -40,9 +94,11 @@ export default function TerminalChatInputThinking({
|
||||
|
||||
const str = Buffer.isBuffer(data) ? data.toString("utf8") : data;
|
||||
if (str === "\x1b\x1b") {
|
||||
log(
|
||||
"raw stdin: received collapsed ESC ESC – starting confirmation timer",
|
||||
);
|
||||
if (isLoggingEnabled()) {
|
||||
log(
|
||||
"raw stdin: received collapsed ESC ESC – starting confirmation timer",
|
||||
);
|
||||
}
|
||||
setAwaitingConfirm(true);
|
||||
setTimeout(() => setAwaitingConfirm(false), 1500);
|
||||
}
|
||||
@@ -54,7 +110,25 @@ export default function TerminalChatInputThinking({
|
||||
};
|
||||
}, [stdin, awaitingConfirm, onInterrupt, active, setRawMode]);
|
||||
|
||||
// No timers required beyond tracking the elapsed seconds supplied via props.
|
||||
useInterval(() => {
|
||||
setDots((prev) => (prev.length < 3 ? prev + "." : ""));
|
||||
}, 500);
|
||||
|
||||
useInterval(
|
||||
() => {
|
||||
setThinkingText((prev) => {
|
||||
let next = prev;
|
||||
if (thinkingTexts.length > 1) {
|
||||
while (next === prev) {
|
||||
next =
|
||||
thinkingTexts[Math.floor(Math.random() * thinkingTexts.length)];
|
||||
}
|
||||
}
|
||||
return next;
|
||||
});
|
||||
},
|
||||
active ? 30000 : null,
|
||||
);
|
||||
|
||||
useInput(
|
||||
(_input, key) => {
|
||||
@@ -63,11 +137,15 @@ export default function TerminalChatInputThinking({
|
||||
}
|
||||
|
||||
if (awaitingConfirm) {
|
||||
log("useInput: second ESC detected – triggering onInterrupt()");
|
||||
if (isLoggingEnabled()) {
|
||||
log("useInput: second ESC detected – triggering onInterrupt()");
|
||||
}
|
||||
onInterrupt();
|
||||
setAwaitingConfirm(false);
|
||||
} else {
|
||||
log("useInput: first ESC detected – waiting for confirmation");
|
||||
if (isLoggingEnabled()) {
|
||||
log("useInput: first ESC detected – waiting for confirmation");
|
||||
}
|
||||
setAwaitingConfirm(true);
|
||||
setTimeout(() => setAwaitingConfirm(false), 1500);
|
||||
}
|
||||
@@ -75,47 +153,13 @@ export default function TerminalChatInputThinking({
|
||||
{ isActive: active },
|
||||
);
|
||||
|
||||
// Custom ball animation including the elapsed seconds
|
||||
const ballFrames = [
|
||||
"( ● )",
|
||||
"( ● )",
|
||||
"( ● )",
|
||||
"( ● )",
|
||||
"( ●)",
|
||||
"( ● )",
|
||||
"( ● )",
|
||||
"( ● )",
|
||||
"( ● )",
|
||||
"(● )",
|
||||
];
|
||||
|
||||
const [frame, setFrame] = useState(0);
|
||||
|
||||
useInterval(() => {
|
||||
setFrame((idx) => (idx + 1) % ballFrames.length);
|
||||
}, 80);
|
||||
|
||||
// Preserve the spinner (ball) animation while keeping the elapsed seconds
|
||||
// text static. We achieve this by rendering the bouncing ball inside the
|
||||
// parentheses and appending the seconds counter *after* the spinner rather
|
||||
// than injecting it directly next to the ball (which caused the counter to
|
||||
// move horizontally together with the ball).
|
||||
|
||||
const frameTemplate = ballFrames[frame] ?? ballFrames[0];
|
||||
const frameWithSeconds = `${frameTemplate} ${thinkingSeconds}s`;
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Box justifyContent="space-between">
|
||||
<Box gap={2}>
|
||||
<Text>{frameWithSeconds}</Text>
|
||||
<Text>
|
||||
Thinking
|
||||
{dots}
|
||||
</Text>
|
||||
</Box>
|
||||
<Box gap={2}>
|
||||
<Spinner type="ball" />
|
||||
<Text>
|
||||
Press <Text bold>Esc</Text> twice to interrupt
|
||||
{thinkingText}
|
||||
{dots}
|
||||
</Text>
|
||||
</Box>
|
||||
{awaitingConfirm && (
|
||||
|
||||
@@ -1,36 +1,26 @@
|
||||
import type { MultilineTextEditorHandle } from "./multiline-editor";
|
||||
import type { ReviewDecision } from "../../utils/agent/review.js";
|
||||
import type { FileSystemSuggestion } from "../../utils/file-system-suggestions.js";
|
||||
import type { HistoryEntry } from "../../utils/storage/command-history.js";
|
||||
import type {
|
||||
ResponseInputItem,
|
||||
ResponseItem,
|
||||
} from "openai/resources/responses/responses.mjs";
|
||||
|
||||
import MultilineTextEditor from "./multiline-editor";
|
||||
import { TerminalChatCommandReview } from "./terminal-chat-command-review.js";
|
||||
import TextCompletions from "./terminal-chat-completions.js";
|
||||
import { log, isLoggingEnabled } from "../../utils/agent/log.js";
|
||||
import { loadConfig } from "../../utils/config.js";
|
||||
import { getFileSystemSuggestions } from "../../utils/file-system-suggestions.js";
|
||||
import { expandFileTags } from "../../utils/file-tag-utils";
|
||||
import { createInputItem } from "../../utils/input-utils.js";
|
||||
import { log } from "../../utils/logger/log.js";
|
||||
import { printAndResetSessionSummary } from "../../utils/session-cost.js";
|
||||
import { setSessionId } from "../../utils/session.js";
|
||||
import { SLASH_COMMANDS, type SlashCommand } from "../../utils/slash-commands";
|
||||
import {
|
||||
loadCommandHistory,
|
||||
addToHistory,
|
||||
} from "../../utils/storage/command-history.js";
|
||||
import { clearTerminal, onExit } from "../../utils/terminal.js";
|
||||
import Spinner from "../vendor/ink-spinner.js";
|
||||
import TextInput from "../vendor/ink-text-input.js";
|
||||
import { Box, Text, useApp, useInput, useStdin } from "ink";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import React, {
|
||||
useCallback,
|
||||
useState,
|
||||
Fragment,
|
||||
useEffect,
|
||||
useRef,
|
||||
} from "react";
|
||||
import React, { useCallback, useState, Fragment, useEffect } from "react";
|
||||
import { useInterval } from "use-interval";
|
||||
|
||||
const suggestions = [
|
||||
@@ -53,13 +43,9 @@ export default function TerminalChatInput({
|
||||
openModelOverlay,
|
||||
openApprovalOverlay,
|
||||
openHelpOverlay,
|
||||
openDiffOverlay,
|
||||
openSessionsOverlay,
|
||||
onCompact,
|
||||
interruptAgent,
|
||||
active,
|
||||
thinkingSeconds,
|
||||
items = [],
|
||||
}: {
|
||||
isNew: boolean;
|
||||
loading: boolean;
|
||||
@@ -77,138 +63,16 @@ export default function TerminalChatInput({
|
||||
openModelOverlay: () => void;
|
||||
openApprovalOverlay: () => void;
|
||||
openHelpOverlay: () => void;
|
||||
openDiffOverlay: () => void;
|
||||
openSessionsOverlay: () => void;
|
||||
onCompact: () => void;
|
||||
interruptAgent: () => void;
|
||||
active: boolean;
|
||||
thinkingSeconds: number;
|
||||
// New: current conversation items so we can include them in bug reports
|
||||
items?: Array<ResponseItem>;
|
||||
}): React.ReactElement {
|
||||
// Slash command suggestion index
|
||||
const [selectedSlashSuggestion, setSelectedSlashSuggestion] =
|
||||
useState<number>(0);
|
||||
const app = useApp();
|
||||
const [selectedSuggestion, setSelectedSuggestion] = useState<number>(0);
|
||||
const [input, setInput] = useState("");
|
||||
const [history, setHistory] = useState<Array<HistoryEntry>>([]);
|
||||
const [historyIndex, setHistoryIndex] = useState<number | null>(null);
|
||||
const [draftInput, setDraftInput] = useState<string>("");
|
||||
const [skipNextSubmit, setSkipNextSubmit] = useState<boolean>(false);
|
||||
const [fsSuggestions, setFsSuggestions] = useState<
|
||||
Array<FileSystemSuggestion>
|
||||
>([]);
|
||||
const [selectedCompletion, setSelectedCompletion] = useState<number>(-1);
|
||||
// Multiline text editor key to force remount after submission
|
||||
const [editorState, setEditorState] = useState<{
|
||||
key: number;
|
||||
initialCursorOffset?: number;
|
||||
}>({ key: 0 });
|
||||
// Imperative handle from the multiline editor so we can query caret position
|
||||
const editorRef = useRef<MultilineTextEditorHandle | null>(null);
|
||||
// Track the caret row across keystrokes
|
||||
const prevCursorRow = useRef<number | null>(null);
|
||||
const prevCursorWasAtLastRow = useRef<boolean>(false);
|
||||
|
||||
// --- Helper for updating input, remounting editor, and moving cursor to end ---
|
||||
const applyFsSuggestion = useCallback((newInputText: string) => {
|
||||
setInput(newInputText);
|
||||
setEditorState((s) => ({
|
||||
key: s.key + 1,
|
||||
initialCursorOffset: newInputText.length,
|
||||
}));
|
||||
}, []);
|
||||
|
||||
// --- Helper for updating file system suggestions ---
|
||||
function updateFsSuggestions(
|
||||
txt: string,
|
||||
alwaysUpdateSelection: boolean = false,
|
||||
) {
|
||||
// Clear file system completions if a space is typed
|
||||
if (txt.endsWith(" ")) {
|
||||
setFsSuggestions([]);
|
||||
setSelectedCompletion(-1);
|
||||
} else {
|
||||
// Determine the current token (last whitespace-separated word)
|
||||
const words = txt.trim().split(/\s+/);
|
||||
const lastWord = words[words.length - 1] ?? "";
|
||||
|
||||
const shouldUpdateSelection =
|
||||
lastWord.startsWith("@") || alwaysUpdateSelection;
|
||||
|
||||
// Strip optional leading '@' for the path prefix
|
||||
let pathPrefix: string;
|
||||
if (lastWord.startsWith("@")) {
|
||||
pathPrefix = lastWord.slice(1);
|
||||
// If only '@' is typed, list everything in the current directory
|
||||
pathPrefix = pathPrefix.length === 0 ? "./" : pathPrefix;
|
||||
} else {
|
||||
pathPrefix = lastWord;
|
||||
}
|
||||
|
||||
if (shouldUpdateSelection) {
|
||||
const completions = getFileSystemSuggestions(pathPrefix);
|
||||
setFsSuggestions(completions);
|
||||
if (completions.length > 0) {
|
||||
setSelectedCompletion((prev) =>
|
||||
prev < 0 || prev >= completions.length ? 0 : prev,
|
||||
);
|
||||
} else {
|
||||
setSelectedCompletion(-1);
|
||||
}
|
||||
} else if (fsSuggestions.length > 0) {
|
||||
// Token cleared → clear menu
|
||||
setFsSuggestions([]);
|
||||
setSelectedCompletion(-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Result of replacing text with a file system suggestion
|
||||
*/
|
||||
interface ReplacementResult {
|
||||
/** The new text with the suggestion applied */
|
||||
text: string;
|
||||
/** The selected suggestion if a replacement was made */
|
||||
suggestion: FileSystemSuggestion | null;
|
||||
/** Whether a replacement was actually made */
|
||||
wasReplaced: boolean;
|
||||
}
|
||||
|
||||
// --- Helper for replacing input with file system suggestion ---
|
||||
function getFileSystemSuggestion(
|
||||
txt: string,
|
||||
requireAtPrefix: boolean = false,
|
||||
): ReplacementResult {
|
||||
if (fsSuggestions.length === 0 || selectedCompletion < 0) {
|
||||
return { text: txt, suggestion: null, wasReplaced: false };
|
||||
}
|
||||
|
||||
const words = txt.trim().split(/\s+/);
|
||||
const lastWord = words[words.length - 1] ?? "";
|
||||
|
||||
// Check if @ prefix is required and the last word doesn't have it
|
||||
if (requireAtPrefix && !lastWord.startsWith("@")) {
|
||||
return { text: txt, suggestion: null, wasReplaced: false };
|
||||
}
|
||||
|
||||
const selected = fsSuggestions[selectedCompletion];
|
||||
if (!selected) {
|
||||
return { text: txt, suggestion: null, wasReplaced: false };
|
||||
}
|
||||
|
||||
const replacement = lastWord.startsWith("@")
|
||||
? `@${selected.path}`
|
||||
: selected.path;
|
||||
words[words.length - 1] = replacement;
|
||||
return {
|
||||
text: words.join(" "),
|
||||
suggestion: selected,
|
||||
wasReplaced: true,
|
||||
};
|
||||
}
|
||||
|
||||
// Load command history on component mount
|
||||
useEffect(() => {
|
||||
@@ -219,226 +83,45 @@ export default function TerminalChatInput({
|
||||
|
||||
loadHistory();
|
||||
}, []);
|
||||
// Reset slash suggestion index when input prefix changes
|
||||
useEffect(() => {
|
||||
if (input.trim().startsWith("/")) {
|
||||
setSelectedSlashSuggestion(0);
|
||||
}
|
||||
}, [input]);
|
||||
|
||||
useInput(
|
||||
(_input, _key) => {
|
||||
// Slash command navigation: up/down to select, enter to fill
|
||||
if (!confirmationPrompt && !loading && input.trim().startsWith("/")) {
|
||||
const prefix = input.trim();
|
||||
const matches = SLASH_COMMANDS.filter((cmd: SlashCommand) =>
|
||||
cmd.command.startsWith(prefix),
|
||||
);
|
||||
if (matches.length > 0) {
|
||||
if (_key.tab) {
|
||||
// Cycle and fill slash command suggestions on Tab
|
||||
const len = matches.length;
|
||||
// Determine new index based on shift state
|
||||
const nextIdx = _key.shift
|
||||
? selectedSlashSuggestion <= 0
|
||||
? len - 1
|
||||
: selectedSlashSuggestion - 1
|
||||
: selectedSlashSuggestion >= len - 1
|
||||
? 0
|
||||
: selectedSlashSuggestion + 1;
|
||||
setSelectedSlashSuggestion(nextIdx);
|
||||
// Autocomplete the command in the input
|
||||
const match = matches[nextIdx];
|
||||
if (!match) {
|
||||
return;
|
||||
}
|
||||
const cmd = match.command;
|
||||
setInput(cmd);
|
||||
setDraftInput(cmd);
|
||||
return;
|
||||
}
|
||||
if (_key.upArrow) {
|
||||
setSelectedSlashSuggestion((prev) =>
|
||||
prev <= 0 ? matches.length - 1 : prev - 1,
|
||||
);
|
||||
return;
|
||||
}
|
||||
if (_key.downArrow) {
|
||||
setSelectedSlashSuggestion((prev) =>
|
||||
prev < 0 || prev >= matches.length - 1 ? 0 : prev + 1,
|
||||
);
|
||||
return;
|
||||
}
|
||||
if (_key.return) {
|
||||
// Execute the currently selected slash command
|
||||
const selIdx = selectedSlashSuggestion;
|
||||
const cmdObj = matches[selIdx];
|
||||
if (cmdObj) {
|
||||
const cmd = cmdObj.command;
|
||||
setInput("");
|
||||
setDraftInput("");
|
||||
setSelectedSlashSuggestion(0);
|
||||
switch (cmd) {
|
||||
case "/history":
|
||||
openOverlay();
|
||||
break;
|
||||
case "/sessions":
|
||||
openSessionsOverlay();
|
||||
break;
|
||||
case "/help":
|
||||
openHelpOverlay();
|
||||
break;
|
||||
case "/compact":
|
||||
onCompact();
|
||||
break;
|
||||
case "/model":
|
||||
openModelOverlay();
|
||||
break;
|
||||
case "/approval":
|
||||
openApprovalOverlay();
|
||||
break;
|
||||
case "/diff":
|
||||
openDiffOverlay();
|
||||
break;
|
||||
case "/bug":
|
||||
onSubmit(cmd);
|
||||
break;
|
||||
case "/clear":
|
||||
onSubmit(cmd);
|
||||
break;
|
||||
case "/clearhistory":
|
||||
onSubmit(cmd);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!confirmationPrompt && !loading) {
|
||||
if (fsSuggestions.length > 0) {
|
||||
if (_key.upArrow) {
|
||||
setSelectedCompletion((prev) =>
|
||||
prev <= 0 ? fsSuggestions.length - 1 : prev - 1,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (_key.downArrow) {
|
||||
setSelectedCompletion((prev) =>
|
||||
prev >= fsSuggestions.length - 1 ? 0 : prev + 1,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (_key.tab && selectedCompletion >= 0) {
|
||||
const { text: newText, wasReplaced } =
|
||||
getFileSystemSuggestion(input);
|
||||
|
||||
// Only proceed if the text was actually changed
|
||||
if (wasReplaced) {
|
||||
applyFsSuggestion(newText);
|
||||
setFsSuggestions([]);
|
||||
setSelectedCompletion(-1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (_key.upArrow) {
|
||||
let moveThroughHistory = true;
|
||||
if (history.length > 0) {
|
||||
if (historyIndex == null) {
|
||||
setDraftInput(input);
|
||||
}
|
||||
|
||||
// Only use history when the caret was *already* on the very first
|
||||
// row *before* this key-press.
|
||||
const cursorRow = editorRef.current?.getRow?.() ?? 0;
|
||||
const cursorCol = editorRef.current?.getCol?.() ?? 0;
|
||||
const wasAtFirstRow = (prevCursorRow.current ?? cursorRow) === 0;
|
||||
if (!(cursorRow === 0 && wasAtFirstRow)) {
|
||||
moveThroughHistory = false;
|
||||
}
|
||||
|
||||
// If we are not yet in history mode, then also require that the col is zero so that
|
||||
// we only trigger history navigation when the user is at the start of the input.
|
||||
if (historyIndex == null && !(cursorRow === 0 && cursorCol === 0)) {
|
||||
moveThroughHistory = false;
|
||||
}
|
||||
|
||||
// Move through history.
|
||||
if (history.length && moveThroughHistory) {
|
||||
let newIndex: number;
|
||||
if (historyIndex == null) {
|
||||
const currentDraft = editorRef.current?.getText?.() ?? input;
|
||||
setDraftInput(currentDraft);
|
||||
newIndex = history.length - 1;
|
||||
} else {
|
||||
newIndex = Math.max(0, historyIndex - 1);
|
||||
}
|
||||
setHistoryIndex(newIndex);
|
||||
|
||||
setInput(history[newIndex]?.command ?? "");
|
||||
// Re-mount the editor so it picks up the new initialText
|
||||
setEditorState((s) => ({ key: s.key + 1 }));
|
||||
return; // handled
|
||||
}
|
||||
|
||||
// Otherwise let it propagate.
|
||||
return;
|
||||
}
|
||||
|
||||
if (_key.downArrow) {
|
||||
// Only move forward in history when we're already *in* history mode
|
||||
// AND the caret sits on the last line of the buffer.
|
||||
const wasAtLastRow =
|
||||
prevCursorWasAtLastRow.current ??
|
||||
editorRef.current?.isCursorAtLastRow() ??
|
||||
true;
|
||||
if (historyIndex != null && wasAtLastRow) {
|
||||
const newIndex = historyIndex + 1;
|
||||
if (newIndex >= history.length) {
|
||||
setHistoryIndex(null);
|
||||
setInput(draftInput);
|
||||
setEditorState((s) => ({ key: s.key + 1 }));
|
||||
} else {
|
||||
setHistoryIndex(newIndex);
|
||||
setInput(history[newIndex]?.command ?? "");
|
||||
setEditorState((s) => ({ key: s.key + 1 }));
|
||||
}
|
||||
return; // handled
|
||||
if (historyIndex == null) {
|
||||
return;
|
||||
}
|
||||
// Otherwise let it propagate
|
||||
}
|
||||
|
||||
// Defer filesystem suggestion logic to onSubmit if enter key is pressed
|
||||
if (!_key.return) {
|
||||
// Pressing tab should trigger the file system suggestions
|
||||
const shouldUpdateSelection = _key.tab;
|
||||
const targetInput = _key.delete ? input.slice(0, -1) : input + _input;
|
||||
updateFsSuggestions(targetInput, shouldUpdateSelection);
|
||||
const newIndex = historyIndex + 1;
|
||||
if (newIndex >= history.length) {
|
||||
setHistoryIndex(null);
|
||||
setInput(draftInput);
|
||||
} else {
|
||||
setHistoryIndex(newIndex);
|
||||
setInput(history[newIndex]?.command ?? "");
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Update the cached cursor position *after* **all** handlers (including
|
||||
// the internal <MultilineTextEditor>) have processed this key event.
|
||||
//
|
||||
// Ink invokes `useInput` callbacks starting with **parent** components
|
||||
// first, followed by their descendants. As a result the call above
|
||||
// executes *before* the editor has had a chance to react to the key
|
||||
// press and update its internal caret position. When navigating
|
||||
// through a multi-line draft with the ↑ / ↓ arrow keys this meant we
|
||||
// recorded the *old* cursor row instead of the one that results *after*
|
||||
// the key press. Consequently, a subsequent ↑ still saw
|
||||
// `prevCursorRow = 1` even though the caret was already on row 0 and
|
||||
// history-navigation never kicked in.
|
||||
//
|
||||
// Defer the sampling by one tick so we read the *final* caret position
|
||||
// for this frame.
|
||||
setTimeout(() => {
|
||||
prevCursorRow.current = editorRef.current?.getRow?.() ?? null;
|
||||
prevCursorWasAtLastRow.current =
|
||||
editorRef.current?.isCursorAtLastRow?.() ?? true;
|
||||
}, 1);
|
||||
|
||||
if (input.trim() === "" && isNew) {
|
||||
if (_key.tab) {
|
||||
setSelectedSuggestion(
|
||||
@@ -470,95 +153,72 @@ export default function TerminalChatInput({
|
||||
const onSubmit = useCallback(
|
||||
async (value: string) => {
|
||||
const inputValue = value.trim();
|
||||
|
||||
// If the user only entered a slash, do not send a chat message.
|
||||
if (inputValue === "/") {
|
||||
setInput("");
|
||||
return;
|
||||
}
|
||||
|
||||
// Skip this submit if we just autocompleted a slash command.
|
||||
if (skipNextSubmit) {
|
||||
setSkipNextSubmit(false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!inputValue) {
|
||||
return;
|
||||
} else if (inputValue === "/history") {
|
||||
}
|
||||
|
||||
if (inputValue === "/history") {
|
||||
setInput("");
|
||||
openOverlay();
|
||||
return;
|
||||
} else if (inputValue === "/sessions") {
|
||||
setInput("");
|
||||
openSessionsOverlay();
|
||||
return;
|
||||
} else if (inputValue === "/help") {
|
||||
}
|
||||
|
||||
if (inputValue === "/help") {
|
||||
setInput("");
|
||||
openHelpOverlay();
|
||||
return;
|
||||
} else if (inputValue === "/diff") {
|
||||
setInput("");
|
||||
openDiffOverlay();
|
||||
return;
|
||||
} else if (inputValue === "/compact") {
|
||||
}
|
||||
|
||||
if (inputValue === "/compact") {
|
||||
setInput("");
|
||||
onCompact();
|
||||
return;
|
||||
} else if (inputValue.startsWith("/model")) {
|
||||
}
|
||||
|
||||
if (inputValue.startsWith("/model")) {
|
||||
setInput("");
|
||||
openModelOverlay();
|
||||
return;
|
||||
} else if (inputValue.startsWith("/approval")) {
|
||||
}
|
||||
|
||||
if (inputValue.startsWith("/approval")) {
|
||||
setInput("");
|
||||
openApprovalOverlay();
|
||||
return;
|
||||
} else if (["exit", "q", ":q"].includes(inputValue)) {
|
||||
}
|
||||
|
||||
if (inputValue === "q" || inputValue === ":q" || inputValue === "exit") {
|
||||
setInput("");
|
||||
// wait one 60ms frame
|
||||
setTimeout(() => {
|
||||
app.exit();
|
||||
onExit();
|
||||
process.exit(0);
|
||||
}, 60); // Wait one frame.
|
||||
}, 60);
|
||||
return;
|
||||
} else if (inputValue === "/clear" || inputValue === "clear") {
|
||||
setInput("");
|
||||
setSessionId("");
|
||||
setLastResponseId("");
|
||||
|
||||
// Clear the terminal screen (including scrollback) before resetting context.
|
||||
// Clear the terminal first so the summary is printed on a fresh
|
||||
// screen before the new session starts.
|
||||
clearTerminal();
|
||||
|
||||
// Show the token/cost summary for the session that just ended.
|
||||
printAndResetSessionSummary();
|
||||
|
||||
// Emit a system message to confirm the clear action. We *append*
|
||||
// it so Ink's <Static> treats it as new output and actually renders it.
|
||||
setItems((prev) => {
|
||||
const filteredOldItems = prev.filter((item) => {
|
||||
// Remove any token‑heavy entries (user/assistant turns and function calls)
|
||||
if (
|
||||
item.type === "message" &&
|
||||
(item.role === "user" || item.role === "assistant")
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
if (
|
||||
item.type === "function_call" ||
|
||||
item.type === "function_call_output"
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
return true; // keep developer/system and other meta entries
|
||||
});
|
||||
|
||||
return [
|
||||
...filteredOldItems,
|
||||
{
|
||||
id: `clear-${Date.now()}`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [{ type: "input_text", text: "Terminal cleared" }],
|
||||
},
|
||||
];
|
||||
});
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `clear-${Date.now()}`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [{ type: "input_text", text: "Context cleared" }],
|
||||
},
|
||||
]);
|
||||
|
||||
return;
|
||||
} else if (inputValue === "/clearhistory") {
|
||||
@@ -571,7 +231,7 @@ export default function TerminalChatInput({
|
||||
await clearCommandHistory();
|
||||
setHistory([]);
|
||||
|
||||
// Emit a system message to confirm the history clear action.
|
||||
// Emit a system message to confirm the history clear action
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
@@ -586,65 +246,12 @@ export default function TerminalChatInput({
|
||||
},
|
||||
);
|
||||
|
||||
return;
|
||||
} else if (inputValue === "/bug") {
|
||||
// Generate a GitHub bug report URL pre‑filled with session details.
|
||||
setInput("");
|
||||
|
||||
try {
|
||||
const os = await import("node:os");
|
||||
const { CLI_VERSION } = await import("../../version.js");
|
||||
const { buildBugReportUrl } = await import(
|
||||
"../../utils/bug-report.js"
|
||||
);
|
||||
|
||||
const url = buildBugReportUrl({
|
||||
items: items ?? [],
|
||||
cliVersion: CLI_VERSION,
|
||||
model: loadConfig().model ?? "unknown",
|
||||
platform: [os.platform(), os.arch(), os.release()]
|
||||
.map((s) => `\`${s}\``)
|
||||
.join(" | "),
|
||||
});
|
||||
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `bugreport-${Date.now()}`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [
|
||||
{
|
||||
type: "input_text",
|
||||
text: `🔗 Bug report URL: ${url}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
} catch (error) {
|
||||
// If anything went wrong, notify the user.
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `bugreport-error-${Date.now()}`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [
|
||||
{
|
||||
type: "input_text",
|
||||
text: `⚠️ Failed to create bug report URL: ${error}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
}
|
||||
|
||||
return;
|
||||
} else if (inputValue.startsWith("/")) {
|
||||
// Handle invalid/unrecognized commands. Only single-word inputs starting with '/'
|
||||
// (e.g., /command) that are not recognized are caught here. Any other input, including
|
||||
// those starting with '/' but containing spaces (e.g., "/command arg"), will fall through
|
||||
// and be treated as a regular prompt.
|
||||
// Handle invalid/unrecognized commands.
|
||||
// Only single-word inputs starting with '/' (e.g., /command) that are not recognized are caught here.
|
||||
// Any other input, including those starting with '/' but containing spaces
|
||||
// (e.g., "/command arg"), will fall through and be treated as a regular prompt.
|
||||
const trimmed = inputValue.trim();
|
||||
|
||||
if (/^\/\S+$/.test(trimmed)) {
|
||||
@@ -671,13 +278,11 @@ export default function TerminalChatInput({
|
||||
// detect image file paths for dynamic inclusion
|
||||
const images: Array<string> = [];
|
||||
let text = inputValue;
|
||||
|
||||
// markdown-style image syntax: 
|
||||
text = text.replace(/!\[[^\]]*?\]\(([^)]+)\)/g, (_m, p1: string) => {
|
||||
images.push(p1.startsWith("file://") ? fileURLToPath(p1) : p1);
|
||||
return "";
|
||||
});
|
||||
|
||||
// quoted file paths ending with common image extensions (e.g. '/path/to/img.png')
|
||||
text = text.replace(
|
||||
/['"]([^'"]+?\.(?:png|jpe?g|gif|bmp|webp|svg))['"]/gi,
|
||||
@@ -686,7 +291,6 @@ export default function TerminalChatInput({
|
||||
return "";
|
||||
},
|
||||
);
|
||||
|
||||
// bare file paths ending with common image extensions
|
||||
text = text.replace(
|
||||
// eslint-disable-next-line no-useless-escape
|
||||
@@ -700,16 +304,13 @@ export default function TerminalChatInput({
|
||||
);
|
||||
text = text.trim();
|
||||
|
||||
// Expand @file tokens into XML blocks for the model
|
||||
const expandedText = await expandFileTags(text);
|
||||
|
||||
const inputItem = await createInputItem(expandedText, images);
|
||||
const inputItem = await createInputItem(text, images);
|
||||
submitInput([inputItem]);
|
||||
|
||||
// Get config for history persistence.
|
||||
// Get config for history persistence
|
||||
const config = loadConfig();
|
||||
|
||||
// Add to history and update state.
|
||||
// Add to history and update state
|
||||
const updatedHistory = await addToHistory(value, history, {
|
||||
maxSize: config.history?.maxSize ?? 1000,
|
||||
saveHistory: config.history?.saveHistory ?? true,
|
||||
@@ -721,8 +322,6 @@ export default function TerminalChatInput({
|
||||
setDraftInput("");
|
||||
setSelectedSuggestion(0);
|
||||
setInput("");
|
||||
setFsSuggestions([]);
|
||||
setSelectedCompletion(-1);
|
||||
},
|
||||
[
|
||||
setInput,
|
||||
@@ -736,12 +335,8 @@ export default function TerminalChatInput({
|
||||
openApprovalOverlay,
|
||||
openModelOverlay,
|
||||
openHelpOverlay,
|
||||
openDiffOverlay,
|
||||
openSessionsOverlay,
|
||||
history,
|
||||
history, // Add history to the dependency array
|
||||
onCompact,
|
||||
skipNextSubmit,
|
||||
items,
|
||||
],
|
||||
);
|
||||
|
||||
@@ -750,11 +345,7 @@ export default function TerminalChatInput({
|
||||
<TerminalChatCommandReview
|
||||
confirmationPrompt={confirmationPrompt}
|
||||
onReviewCommand={submitConfirmation}
|
||||
// allow switching approval mode via 'v'
|
||||
onSwitchApprovalMode={openApprovalOverlay}
|
||||
explanation={explanation}
|
||||
// disable when input is inactive (e.g., overlay open)
|
||||
isActive={active}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -766,114 +357,65 @@ export default function TerminalChatInput({
|
||||
<TerminalChatInputThinking
|
||||
onInterrupt={interruptAgent}
|
||||
active={active}
|
||||
thinkingSeconds={thinkingSeconds}
|
||||
/>
|
||||
) : (
|
||||
<Box paddingX={1}>
|
||||
<MultilineTextEditor
|
||||
ref={editorRef}
|
||||
onChange={(txt: string) => {
|
||||
setDraftInput(txt);
|
||||
<TextInput
|
||||
focus={active}
|
||||
placeholder={
|
||||
selectedSuggestion
|
||||
? `"${suggestions[selectedSuggestion - 1]}"`
|
||||
: "send a message" +
|
||||
(isNew ? " or press tab to select a suggestion" : "")
|
||||
}
|
||||
showCursor
|
||||
value={input}
|
||||
onChange={(value) => {
|
||||
setDraftInput(value);
|
||||
if (historyIndex != null) {
|
||||
setHistoryIndex(null);
|
||||
}
|
||||
setInput(txt);
|
||||
}}
|
||||
key={editorState.key}
|
||||
initialCursorOffset={editorState.initialCursorOffset}
|
||||
initialText={input}
|
||||
height={6}
|
||||
focus={active}
|
||||
onSubmit={(txt) => {
|
||||
// If final token is an @path, replace with filesystem suggestion if available
|
||||
const {
|
||||
text: replacedText,
|
||||
suggestion,
|
||||
wasReplaced,
|
||||
} = getFileSystemSuggestion(txt, true);
|
||||
|
||||
// If we replaced @path token with a directory, don't submit
|
||||
if (wasReplaced && suggestion?.isDirectory) {
|
||||
applyFsSuggestion(replacedText);
|
||||
// Update suggestions for the new directory
|
||||
updateFsSuggestions(replacedText, true);
|
||||
return;
|
||||
}
|
||||
|
||||
onSubmit(replacedText);
|
||||
setEditorState((s) => ({ key: s.key + 1 }));
|
||||
setInput("");
|
||||
setHistoryIndex(null);
|
||||
setDraftInput("");
|
||||
setInput(value);
|
||||
}}
|
||||
onSubmit={onSubmit}
|
||||
/>
|
||||
</Box>
|
||||
)}
|
||||
</Box>
|
||||
{/* Slash command autocomplete suggestions */}
|
||||
{input.trim().startsWith("/") && (
|
||||
<Box flexDirection="column" paddingX={2} marginBottom={1}>
|
||||
{SLASH_COMMANDS.filter((cmd: SlashCommand) =>
|
||||
cmd.command.startsWith(input.trim()),
|
||||
).map((cmd: SlashCommand, idx: number) => (
|
||||
<Box key={cmd.command}>
|
||||
<Text
|
||||
backgroundColor={
|
||||
idx === selectedSlashSuggestion ? "blackBright" : undefined
|
||||
}
|
||||
>
|
||||
<Text color="blueBright">{cmd.command}</Text>
|
||||
<Text> {cmd.description}</Text>
|
||||
</Text>
|
||||
</Box>
|
||||
))}
|
||||
</Box>
|
||||
)}
|
||||
<Box paddingX={2} marginBottom={1}>
|
||||
{isNew && !input ? (
|
||||
<Text dimColor>
|
||||
try:{" "}
|
||||
{suggestions.map((m, key) => (
|
||||
<Fragment key={key}>
|
||||
{key !== 0 ? " | " : ""}
|
||||
<Text
|
||||
backgroundColor={
|
||||
key + 1 === selectedSuggestion ? "blackBright" : ""
|
||||
}
|
||||
>
|
||||
{m}
|
||||
</Text>
|
||||
</Fragment>
|
||||
))}
|
||||
</Text>
|
||||
) : fsSuggestions.length > 0 ? (
|
||||
<TextCompletions
|
||||
completions={fsSuggestions.map((suggestion) => suggestion.path)}
|
||||
selectedCompletion={selectedCompletion}
|
||||
displayLimit={5}
|
||||
/>
|
||||
) : (
|
||||
<Text dimColor>
|
||||
ctrl+c to exit | "/" to see commands | enter to send
|
||||
{contextLeftPercent > 25 && (
|
||||
<>
|
||||
{" — "}
|
||||
<Text color={contextLeftPercent > 40 ? "green" : "yellow"}>
|
||||
{Math.round(contextLeftPercent)}% context left
|
||||
</Text>
|
||||
</>
|
||||
)}
|
||||
{contextLeftPercent <= 25 && (
|
||||
<>
|
||||
{" — "}
|
||||
<Text color="red">
|
||||
{Math.round(contextLeftPercent)}% context left — send
|
||||
"/compact" to condense context
|
||||
</Text>
|
||||
</>
|
||||
)}
|
||||
</Text>
|
||||
)}
|
||||
<Text dimColor>
|
||||
{isNew && !input ? (
|
||||
<>
|
||||
try:{" "}
|
||||
{suggestions.map((m, key) => (
|
||||
<Fragment key={key}>
|
||||
{key !== 0 ? " | " : ""}
|
||||
<Text
|
||||
backgroundColor={
|
||||
key + 1 === selectedSuggestion ? "blackBright" : ""
|
||||
}
|
||||
>
|
||||
{m}
|
||||
</Text>
|
||||
</Fragment>
|
||||
))}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
send q or ctrl+c to exit | send "/clear" to reset | send "/help"
|
||||
for commands | press enter to send
|
||||
{contextLeftPercent < 25 && (
|
||||
<>
|
||||
{" — "}
|
||||
<Text color="red">
|
||||
{Math.round(contextLeftPercent)}% context left — send
|
||||
"/compact" to condense context
|
||||
</Text>
|
||||
</>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
@@ -882,42 +424,12 @@ export default function TerminalChatInput({
|
||||
function TerminalChatInputThinking({
|
||||
onInterrupt,
|
||||
active,
|
||||
thinkingSeconds,
|
||||
}: {
|
||||
onInterrupt: () => void;
|
||||
active: boolean;
|
||||
thinkingSeconds: number;
|
||||
}) {
|
||||
const [awaitingConfirm, setAwaitingConfirm] = useState(false);
|
||||
const [dots, setDots] = useState("");
|
||||
|
||||
// Animate ellipsis
|
||||
useInterval(() => {
|
||||
setDots((prev) => (prev.length < 3 ? prev + "." : ""));
|
||||
}, 500);
|
||||
|
||||
// Spinner frames with embedded seconds
|
||||
const ballFrames = [
|
||||
"( ● )",
|
||||
"( ● )",
|
||||
"( ● )",
|
||||
"( ● )",
|
||||
"( ●)",
|
||||
"( ● )",
|
||||
"( ● )",
|
||||
"( ● )",
|
||||
"( ● )",
|
||||
"(● )",
|
||||
];
|
||||
const [frame, setFrame] = useState(0);
|
||||
|
||||
useInterval(() => {
|
||||
setFrame((idx) => (idx + 1) % ballFrames.length);
|
||||
}, 80);
|
||||
|
||||
// Keep the elapsed‑seconds text fixed while the ball animation moves.
|
||||
const frameTemplate = ballFrames[frame] ?? ballFrames[0];
|
||||
const frameWithSeconds = `${frameTemplate} ${thinkingSeconds}s`;
|
||||
const [awaitingConfirm, setAwaitingConfirm] = useState(false);
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// Raw stdin listener to catch the case where the terminal delivers two
|
||||
@@ -948,9 +460,11 @@ function TerminalChatInputThinking({
|
||||
const str = Buffer.isBuffer(data) ? data.toString("utf8") : data;
|
||||
if (str === "\x1b\x1b") {
|
||||
// Treat as the first Escape press – prompt the user for confirmation.
|
||||
log(
|
||||
"raw stdin: received collapsed ESC ESC – starting confirmation timer",
|
||||
);
|
||||
if (isLoggingEnabled()) {
|
||||
log(
|
||||
"raw stdin: received collapsed ESC ESC – starting confirmation timer",
|
||||
);
|
||||
}
|
||||
setAwaitingConfirm(true);
|
||||
setTimeout(() => setAwaitingConfirm(false), 1500);
|
||||
}
|
||||
@@ -963,7 +477,10 @@ function TerminalChatInputThinking({
|
||||
};
|
||||
}, [stdin, awaitingConfirm, onInterrupt, active, setRawMode]);
|
||||
|
||||
// No local timer: the parent component supplies the elapsed time via props.
|
||||
// Cycle the "Thinking…" animation dots.
|
||||
useInterval(() => {
|
||||
setDots((prev) => (prev.length < 3 ? prev + "." : ""));
|
||||
}, 500);
|
||||
|
||||
// Listen for the escape key to allow the user to interrupt the current
|
||||
// operation. We require two presses within a short window (1.5s) to avoid
|
||||
@@ -975,11 +492,15 @@ function TerminalChatInputThinking({
|
||||
}
|
||||
|
||||
if (awaitingConfirm) {
|
||||
log("useInput: second ESC detected – triggering onInterrupt()");
|
||||
if (isLoggingEnabled()) {
|
||||
log("useInput: second ESC detected – triggering onInterrupt()");
|
||||
}
|
||||
onInterrupt();
|
||||
setAwaitingConfirm(false);
|
||||
} else {
|
||||
log("useInput: first ESC detected – waiting for confirmation");
|
||||
if (isLoggingEnabled()) {
|
||||
log("useInput: first ESC detected – waiting for confirmation");
|
||||
}
|
||||
setAwaitingConfirm(true);
|
||||
setTimeout(() => setAwaitingConfirm(false), 1500);
|
||||
}
|
||||
@@ -988,30 +509,17 @@ function TerminalChatInputThinking({
|
||||
);
|
||||
|
||||
return (
|
||||
<Box width="100%" flexDirection="column" gap={1}>
|
||||
<Box
|
||||
flexDirection="row"
|
||||
width="100%"
|
||||
justifyContent="space-between"
|
||||
paddingRight={1}
|
||||
>
|
||||
<Box gap={2}>
|
||||
<Text>{frameWithSeconds}</Text>
|
||||
<Text>
|
||||
Thinking
|
||||
{dots}
|
||||
</Text>
|
||||
</Box>
|
||||
<Text>
|
||||
<Text dimColor>press</Text> <Text bold>Esc</Text>{" "}
|
||||
{awaitingConfirm ? (
|
||||
<Text bold>again</Text>
|
||||
) : (
|
||||
<Text dimColor>twice</Text>
|
||||
)}{" "}
|
||||
<Text dimColor>to interrupt</Text>
|
||||
</Text>
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Box gap={2}>
|
||||
<Spinner type="ball" />
|
||||
<Text>Thinking{dots}</Text>
|
||||
</Box>
|
||||
{awaitingConfirm && (
|
||||
<Text dimColor>
|
||||
Press <Text bold>Esc</Text> again to interrupt and enter a new
|
||||
instruction
|
||||
</Text>
|
||||
)}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
563
codex-cli/src/components/chat/terminal-chat-new-input.tsx
Normal file
563
codex-cli/src/components/chat/terminal-chat-new-input.tsx
Normal file
@@ -0,0 +1,563 @@
|
||||
import type { MultilineTextEditorHandle } from "./multiline-editor";
|
||||
import type { ReviewDecision } from "../../utils/agent/review.js";
|
||||
import type { HistoryEntry } from "../../utils/storage/command-history.js";
|
||||
import type {
|
||||
ResponseInputItem,
|
||||
ResponseItem,
|
||||
} from "openai/resources/responses/responses.mjs";
|
||||
|
||||
import MultilineTextEditor from "./multiline-editor";
|
||||
import { TerminalChatCommandReview } from "./terminal-chat-command-review.js";
|
||||
import { log, isLoggingEnabled } from "../../utils/agent/log.js";
|
||||
import { loadConfig } from "../../utils/config.js";
|
||||
import { createInputItem } from "../../utils/input-utils.js";
|
||||
import { printAndResetSessionSummary } from "../../utils/session-cost.js";
|
||||
import { setSessionId } from "../../utils/session.js";
|
||||
import {
|
||||
loadCommandHistory,
|
||||
addToHistory,
|
||||
} from "../../utils/storage/command-history.js";
|
||||
import { clearTerminal, onExit } from "../../utils/terminal.js";
|
||||
import Spinner from "../vendor/ink-spinner.js";
|
||||
import { Box, Text, useApp, useInput, useStdin } from "ink";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import React, { useCallback, useState, Fragment, useEffect } from "react";
|
||||
import { useInterval } from "use-interval";
|
||||
|
||||
const suggestions = [
|
||||
"explain this codebase to me",
|
||||
"fix any build errors",
|
||||
"are there any bugs in my code?",
|
||||
];
|
||||
|
||||
const typeHelpText = `ctrl+c to exit | "/clear" to reset context | "/help" for commands | ↑↓ to recall history | ctrl+x to open external editor | enter to send`;
|
||||
|
||||
// Enable verbose logging for the history‑navigation logic when the
|
||||
// DEBUG_TCI environment variable is truthy. The traces help while debugging
|
||||
// unit‑test failures but remain silent in production.
|
||||
const DEBUG_HIST =
|
||||
process.env["DEBUG_TCI"] === "1" || process.env["DEBUG_TCI"] === "true";
|
||||
|
||||
const thinkingTexts = ["Thinking"]; /* [
|
||||
"Consulting the rubber duck",
|
||||
"Maximizing paperclips",
|
||||
"Reticulating splines",
|
||||
"Immanentizing the Eschaton",
|
||||
"Thinking",
|
||||
"Thinking about thinking",
|
||||
"Spinning in circles",
|
||||
"Counting dust specks",
|
||||
"Updating priors",
|
||||
"Feeding the utility monster",
|
||||
"Taking off",
|
||||
"Wireheading",
|
||||
"Counting to infinity",
|
||||
"Staring into the Basilisk",
|
||||
"Running acausal tariff negotiations",
|
||||
"Searching the library of babel",
|
||||
"Multiplying matrices",
|
||||
"Solving the halting problem",
|
||||
"Counting grains of sand",
|
||||
"Simulating a simulation",
|
||||
"Asking the oracle",
|
||||
"Detangling qubits",
|
||||
"Reading tea leaves",
|
||||
"Pondering universal love and transcendent joy",
|
||||
"Feeling the AGI",
|
||||
"Shaving the yak",
|
||||
"Escaping local minima",
|
||||
"Pruning the search tree",
|
||||
"Descending the gradient",
|
||||
"Painting the bikeshed",
|
||||
"Securing funding",
|
||||
]; */
|
||||
|
||||
export default function TerminalChatInput({
|
||||
isNew: _isNew,
|
||||
loading,
|
||||
submitInput,
|
||||
confirmationPrompt,
|
||||
explanation,
|
||||
submitConfirmation,
|
||||
setLastResponseId,
|
||||
setItems,
|
||||
contextLeftPercent,
|
||||
openOverlay,
|
||||
openModelOverlay,
|
||||
openApprovalOverlay,
|
||||
openHelpOverlay,
|
||||
interruptAgent,
|
||||
active,
|
||||
}: {
|
||||
isNew: boolean;
|
||||
loading: boolean;
|
||||
submitInput: (input: Array<ResponseInputItem>) => void;
|
||||
confirmationPrompt: React.ReactNode | null;
|
||||
explanation?: string;
|
||||
submitConfirmation: (
|
||||
decision: ReviewDecision,
|
||||
customDenyMessage?: string,
|
||||
) => void;
|
||||
setLastResponseId: (lastResponseId: string) => void;
|
||||
setItems: React.Dispatch<React.SetStateAction<Array<ResponseItem>>>;
|
||||
contextLeftPercent: number;
|
||||
openOverlay: () => void;
|
||||
openModelOverlay: () => void;
|
||||
openApprovalOverlay: () => void;
|
||||
openHelpOverlay: () => void;
|
||||
interruptAgent: () => void;
|
||||
active: boolean;
|
||||
}): React.ReactElement {
|
||||
const app = useApp();
|
||||
const [selectedSuggestion, setSelectedSuggestion] = useState<number>(0);
|
||||
const [input, setInput] = useState("");
|
||||
const [history, setHistory] = useState<Array<HistoryEntry>>([]);
|
||||
const [historyIndex, setHistoryIndex] = useState<number | null>(null);
|
||||
const [draftInput, setDraftInput] = useState<string>("");
|
||||
// Multiline text editor is now the default input mode. We keep an
|
||||
// incremental `editorKey` so that we can force‑remount the component and
|
||||
// thus reset its internal buffer after each successful submit.
|
||||
const [editorKey, setEditorKey] = useState(0);
|
||||
|
||||
// Load command history on component mount
|
||||
useEffect(() => {
|
||||
async function loadHistory() {
|
||||
const historyEntries = await loadCommandHistory();
|
||||
setHistory(historyEntries);
|
||||
}
|
||||
|
||||
loadHistory();
|
||||
}, []);
|
||||
|
||||
// Imperative handle from the multiline editor so we can query caret position
|
||||
const editorRef = React.useRef<MultilineTextEditorHandle | null>(null);
|
||||
|
||||
// Track the caret row across keystrokes so we can tell whether the cursor
|
||||
// was *already* on the first/last line before the current key event. This
|
||||
// lets us distinguish between a normal vertical navigation (e.g. moving
|
||||
// from row 1 → row 0 inside a multi‑line draft) and an attempt to navigate
|
||||
// the chat history (pressing ↑ again while already at row 0).
|
||||
const prevCursorRow = React.useRef<number | null>(null);
|
||||
|
||||
useInput(
|
||||
(_input, _key) => {
|
||||
if (!confirmationPrompt && !loading) {
|
||||
if (_key.upArrow) {
|
||||
if (DEBUG_HIST) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log("[TCI] upArrow", {
|
||||
historyIndex,
|
||||
input,
|
||||
cursorRow: editorRef.current?.getRow?.(),
|
||||
});
|
||||
}
|
||||
// Only recall history when the caret was *already* on the very first
|
||||
// row *before* this key‑press. That means the user pressed ↑ while
|
||||
// the cursor sat at the top – mirroring how shells like Bash/zsh
|
||||
// enter history navigation. When the caret starts on a lower line
|
||||
// the first ↑ should merely move it up one row; only a subsequent
|
||||
// press (when we are *still* at row 0) should trigger the recall.
|
||||
|
||||
const cursorRow = editorRef.current?.getRow?.() ?? 0;
|
||||
const wasAtFirstRow = (prevCursorRow.current ?? cursorRow) === 0;
|
||||
|
||||
if (history.length > 0 && cursorRow === 0 && wasAtFirstRow) {
|
||||
if (historyIndex == null) {
|
||||
const currentDraft = editorRef.current?.getText?.() ?? input;
|
||||
setDraftInput(currentDraft);
|
||||
if (DEBUG_HIST) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log("[TCI] store draft", JSON.stringify(currentDraft));
|
||||
}
|
||||
}
|
||||
|
||||
let newIndex: number;
|
||||
if (historyIndex == null) {
|
||||
newIndex = history.length - 1;
|
||||
} else {
|
||||
newIndex = Math.max(0, historyIndex - 1);
|
||||
}
|
||||
setHistoryIndex(newIndex);
|
||||
setInput(history[newIndex]?.command ?? "");
|
||||
// Re‑mount the editor so it picks up the new initialText.
|
||||
setEditorKey((k) => k + 1);
|
||||
return; // we handled the key
|
||||
}
|
||||
// Otherwise let the event propagate so the editor moves the caret.
|
||||
}
|
||||
|
||||
if (_key.downArrow) {
|
||||
if (DEBUG_HIST) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log("[TCI] downArrow", { historyIndex, draftInput, input });
|
||||
}
|
||||
// Only move forward in history when we're already *in* history mode
|
||||
// AND the caret sits on the last line of the buffer (so ↓ within a
|
||||
// multi‑line draft simply moves the caret down).
|
||||
if (historyIndex != null && editorRef.current?.isCursorAtLastRow()) {
|
||||
const newIndex = historyIndex + 1;
|
||||
if (newIndex >= history.length) {
|
||||
setHistoryIndex(null);
|
||||
setInput(draftInput);
|
||||
setEditorKey((k) => k + 1);
|
||||
} else {
|
||||
setHistoryIndex(newIndex);
|
||||
setInput(history[newIndex]?.command ?? "");
|
||||
setEditorKey((k) => k + 1);
|
||||
}
|
||||
return; // handled
|
||||
}
|
||||
// Otherwise let it propagate.
|
||||
}
|
||||
}
|
||||
|
||||
if (input.trim() === "") {
|
||||
if (_key.tab) {
|
||||
setSelectedSuggestion(
|
||||
(s) => (s + (_key.shift ? -1 : 1)) % (suggestions.length + 1),
|
||||
);
|
||||
} else if (selectedSuggestion && _key.return) {
|
||||
const suggestion = suggestions[selectedSuggestion - 1] || "";
|
||||
setInput("");
|
||||
setSelectedSuggestion(0);
|
||||
submitInput([
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "input_text", text: suggestion }],
|
||||
type: "message",
|
||||
},
|
||||
]);
|
||||
}
|
||||
} else if (_input === "\u0003" || (_input === "c" && _key.ctrl)) {
|
||||
setTimeout(() => {
|
||||
app.exit();
|
||||
onExit();
|
||||
process.exit(0);
|
||||
}, 60);
|
||||
}
|
||||
|
||||
// Update the cached cursor position *after* we've potentially handled
|
||||
// the key so that the next event has the correct "previous" reference.
|
||||
prevCursorRow.current = editorRef.current?.getRow?.() ?? null;
|
||||
},
|
||||
{ isActive: active },
|
||||
);
|
||||
|
||||
const onSubmit = useCallback(
|
||||
async (value: string) => {
|
||||
const inputValue = value.trim();
|
||||
if (!inputValue) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (inputValue === "/history") {
|
||||
setInput("");
|
||||
openOverlay();
|
||||
return;
|
||||
}
|
||||
|
||||
if (inputValue === "/help") {
|
||||
setInput("");
|
||||
openHelpOverlay();
|
||||
return;
|
||||
}
|
||||
|
||||
if (inputValue.startsWith("/model")) {
|
||||
setInput("");
|
||||
openModelOverlay();
|
||||
return;
|
||||
}
|
||||
|
||||
if (inputValue.startsWith("/approval")) {
|
||||
setInput("");
|
||||
openApprovalOverlay();
|
||||
return;
|
||||
}
|
||||
|
||||
if (inputValue === "q" || inputValue === ":q" || inputValue === "exit") {
|
||||
setInput("");
|
||||
// wait one 60ms frame
|
||||
setTimeout(() => {
|
||||
app.exit();
|
||||
onExit();
|
||||
process.exit(0);
|
||||
}, 60);
|
||||
return;
|
||||
} else if (inputValue === "/clear" || inputValue === "clear") {
|
||||
setInput("");
|
||||
setSessionId("");
|
||||
setLastResponseId("");
|
||||
|
||||
// Clear screen then display session summary so the user sees it.
|
||||
clearTerminal();
|
||||
|
||||
printAndResetSessionSummary();
|
||||
|
||||
// Emit a system message to confirm the clear action. We *append*
|
||||
// it so Ink's <Static> treats it as new output and actually renders it.
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `clear-${Date.now()}`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [{ type: "input_text", text: "Context cleared" }],
|
||||
},
|
||||
]);
|
||||
|
||||
return;
|
||||
} else if (inputValue === "/clearhistory") {
|
||||
setInput("");
|
||||
|
||||
// Import clearCommandHistory function to avoid circular dependencies
|
||||
// Using dynamic import to lazy-load the function
|
||||
import("../../utils/storage/command-history.js").then(
|
||||
async ({ clearCommandHistory }) => {
|
||||
await clearCommandHistory();
|
||||
setHistory([]);
|
||||
|
||||
// Emit a system message to confirm the history clear action
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `clearhistory-${Date.now()}`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [
|
||||
{ type: "input_text", text: "Command history cleared" },
|
||||
],
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const images: Array<string> = [];
|
||||
const text = inputValue
|
||||
.replace(/!\[[^\]]*?\]\(([^)]+)\)/g, (_m, p1: string) => {
|
||||
images.push(p1.startsWith("file://") ? fileURLToPath(p1) : p1);
|
||||
return "";
|
||||
})
|
||||
.trim();
|
||||
|
||||
const inputItem = await createInputItem(text, images);
|
||||
submitInput([inputItem]);
|
||||
|
||||
// Get config for history persistence
|
||||
const config = loadConfig();
|
||||
|
||||
// Add to history and update state
|
||||
const updatedHistory = await addToHistory(value, history, {
|
||||
maxSize: config.history?.maxSize ?? 1000,
|
||||
saveHistory: config.history?.saveHistory ?? true,
|
||||
sensitivePatterns: config.history?.sensitivePatterns ?? [],
|
||||
});
|
||||
|
||||
setHistory(updatedHistory);
|
||||
setHistoryIndex(null);
|
||||
setDraftInput("");
|
||||
setSelectedSuggestion(0);
|
||||
setInput("");
|
||||
},
|
||||
[
|
||||
setInput,
|
||||
submitInput,
|
||||
setLastResponseId,
|
||||
setItems,
|
||||
app,
|
||||
setHistory,
|
||||
setHistoryIndex,
|
||||
openOverlay,
|
||||
openApprovalOverlay,
|
||||
openModelOverlay,
|
||||
openHelpOverlay,
|
||||
history, // Add history to the dependency array
|
||||
],
|
||||
);
|
||||
|
||||
if (confirmationPrompt) {
|
||||
return (
|
||||
<TerminalChatCommandReview
|
||||
confirmationPrompt={confirmationPrompt}
|
||||
onReviewCommand={submitConfirmation}
|
||||
explanation={explanation}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
{loading ? (
|
||||
<Box borderStyle="round">
|
||||
<TerminalChatInputThinking
|
||||
onInterrupt={interruptAgent}
|
||||
active={active}
|
||||
/>
|
||||
</Box>
|
||||
) : (
|
||||
<>
|
||||
<Box borderStyle="round">
|
||||
<MultilineTextEditor
|
||||
ref={editorRef}
|
||||
onChange={(txt: string) => setInput(txt)}
|
||||
key={editorKey}
|
||||
initialText={input}
|
||||
height={8}
|
||||
focus={active}
|
||||
onSubmit={(txt) => {
|
||||
onSubmit(txt);
|
||||
|
||||
setEditorKey((k) => k + 1);
|
||||
|
||||
setInput("");
|
||||
setHistoryIndex(null);
|
||||
setDraftInput("");
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
<Box paddingX={2} marginBottom={1}>
|
||||
<Text dimColor>
|
||||
{!input ? (
|
||||
<>
|
||||
try:{" "}
|
||||
{suggestions.map((m, key) => (
|
||||
<Fragment key={key}>
|
||||
{key !== 0 ? " | " : ""}
|
||||
<Text
|
||||
backgroundColor={
|
||||
key + 1 === selectedSuggestion ? "blackBright" : ""
|
||||
}
|
||||
>
|
||||
{m}
|
||||
</Text>
|
||||
</Fragment>
|
||||
))}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
{typeHelpText}
|
||||
{contextLeftPercent < 25 && (
|
||||
<>
|
||||
{" — "}
|
||||
<Text color="red">
|
||||
{Math.round(contextLeftPercent)}% context left
|
||||
</Text>
|
||||
</>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</Text>
|
||||
</Box>
|
||||
</>
|
||||
)}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
function TerminalChatInputThinking({
|
||||
onInterrupt,
|
||||
active,
|
||||
}: {
|
||||
onInterrupt: () => void;
|
||||
active: boolean;
|
||||
}) {
|
||||
const [dots, setDots] = useState("");
|
||||
const [awaitingConfirm, setAwaitingConfirm] = useState(false);
|
||||
|
||||
const [thinkingText] = useState(
|
||||
() => thinkingTexts[Math.floor(Math.random() * thinkingTexts.length)],
|
||||
);
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// Raw stdin listener to catch the case where the terminal delivers two
|
||||
// consecutive ESC bytes ("\x1B\x1B") in a *single* chunk. Ink's `useInput`
|
||||
// collapses that sequence into one key event, so the regular two‑step
|
||||
// handler above never sees the second press. By inspecting the raw data
|
||||
// we can identify this special case and trigger the interrupt while still
|
||||
// requiring a double press for the normal single‑byte ESC events.
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
const { stdin, setRawMode } = useStdin();
|
||||
|
||||
React.useEffect(() => {
|
||||
if (!active) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure raw mode – already enabled by Ink when the component has focus,
|
||||
// but called defensively in case that assumption ever changes.
|
||||
setRawMode?.(true);
|
||||
|
||||
const onData = (data: Buffer | string) => {
|
||||
if (awaitingConfirm) {
|
||||
return; // already awaiting a second explicit press
|
||||
}
|
||||
|
||||
// Handle both Buffer and string forms.
|
||||
const str = Buffer.isBuffer(data) ? data.toString("utf8") : data;
|
||||
if (str === "\x1b\x1b") {
|
||||
// Treat as the first Escape press – prompt the user for confirmation.
|
||||
if (isLoggingEnabled()) {
|
||||
log(
|
||||
"raw stdin: received collapsed ESC ESC – starting confirmation timer",
|
||||
);
|
||||
}
|
||||
setAwaitingConfirm(true);
|
||||
setTimeout(() => setAwaitingConfirm(false), 1500);
|
||||
}
|
||||
};
|
||||
|
||||
stdin?.on("data", onData);
|
||||
|
||||
return () => {
|
||||
stdin?.off("data", onData);
|
||||
};
|
||||
}, [stdin, awaitingConfirm, onInterrupt, active, setRawMode]);
|
||||
|
||||
useInterval(() => {
|
||||
setDots((prev) => (prev.length < 3 ? prev + "." : ""));
|
||||
}, 500);
|
||||
|
||||
useInput(
|
||||
(_input, key) => {
|
||||
if (!key.escape) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (awaitingConfirm) {
|
||||
if (isLoggingEnabled()) {
|
||||
log("useInput: second ESC detected – triggering onInterrupt()");
|
||||
}
|
||||
onInterrupt();
|
||||
setAwaitingConfirm(false);
|
||||
} else {
|
||||
if (isLoggingEnabled()) {
|
||||
log("useInput: first ESC detected – waiting for confirmation");
|
||||
}
|
||||
setAwaitingConfirm(true);
|
||||
setTimeout(() => setAwaitingConfirm(false), 1500);
|
||||
}
|
||||
},
|
||||
{ isActive: active },
|
||||
);
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Box gap={2}>
|
||||
<Spinner type="ball" />
|
||||
<Text>
|
||||
{thinkingText}
|
||||
{dots}
|
||||
</Text>
|
||||
</Box>
|
||||
{awaitingConfirm && (
|
||||
<Text dimColor>
|
||||
Press <Text bold>Esc</Text> again to interrupt and enter a new
|
||||
instruction
|
||||
</Text>
|
||||
)}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,6 +1,5 @@
|
||||
import type { TerminalChatSession } from "../../utils/session.js";
|
||||
import type { ResponseItem } from "openai/resources/responses/responses";
|
||||
import type { FileOpenerScheme } from "src/utils/config.js";
|
||||
|
||||
import TerminalChatResponseItem from "./terminal-chat-response-item";
|
||||
import { Box, Text } from "ink";
|
||||
@@ -9,11 +8,9 @@ import React from "react";
|
||||
export default function TerminalChatPastRollout({
|
||||
session,
|
||||
items,
|
||||
fileOpener,
|
||||
}: {
|
||||
session: TerminalChatSession;
|
||||
items: Array<ResponseItem>;
|
||||
fileOpener: FileOpenerScheme | undefined;
|
||||
}): React.ReactElement {
|
||||
const { version, id: sessionId, model } = session;
|
||||
return (
|
||||
@@ -54,13 +51,9 @@ export default function TerminalChatPastRollout({
|
||||
{React.useMemo(
|
||||
() =>
|
||||
items.map((item, key) => (
|
||||
<TerminalChatResponseItem
|
||||
key={key}
|
||||
item={item}
|
||||
fileOpener={fileOpener}
|
||||
/>
|
||||
<TerminalChatResponseItem key={key} item={item} />
|
||||
)),
|
||||
[items, fileOpener],
|
||||
[items],
|
||||
)}
|
||||
</Box>
|
||||
</Box>
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import type { OverlayModeType } from "./terminal-chat";
|
||||
import type { TerminalRendererOptions } from "marked-terminal";
|
||||
import type {
|
||||
ResponseFunctionToolCallItem,
|
||||
@@ -8,46 +7,27 @@ import type {
|
||||
ResponseOutputMessage,
|
||||
ResponseReasoningItem,
|
||||
} from "openai/resources/responses/responses";
|
||||
import type { FileOpenerScheme } from "src/utils/config";
|
||||
|
||||
import { useTerminalSize } from "../../hooks/use-terminal-size";
|
||||
import { collapseXmlBlocks } from "../../utils/file-tag-utils";
|
||||
import { parseToolCall, parseToolCallOutput } from "../../utils/parsers";
|
||||
import chalk, { type ForegroundColorName } from "chalk";
|
||||
import { Box, Text } from "ink";
|
||||
import { parse, setOptions } from "marked";
|
||||
import TerminalRenderer from "marked-terminal";
|
||||
import path from "path";
|
||||
import React, { useEffect, useMemo } from "react";
|
||||
import { formatCommandForDisplay } from "src/format-command.js";
|
||||
import supportsHyperlinks from "supports-hyperlinks";
|
||||
import React, { useMemo } from "react";
|
||||
|
||||
export default function TerminalChatResponseItem({
|
||||
item,
|
||||
fullStdout = false,
|
||||
setOverlayMode,
|
||||
fileOpener,
|
||||
}: {
|
||||
item: ResponseItem;
|
||||
fullStdout?: boolean;
|
||||
setOverlayMode?: React.Dispatch<React.SetStateAction<OverlayModeType>>;
|
||||
fileOpener: FileOpenerScheme | undefined;
|
||||
}): React.ReactElement {
|
||||
switch (item.type) {
|
||||
case "message":
|
||||
return (
|
||||
<TerminalChatResponseMessage
|
||||
setOverlayMode={setOverlayMode}
|
||||
message={item}
|
||||
fileOpener={fileOpener}
|
||||
/>
|
||||
);
|
||||
// @ts-expect-error new item types aren't in SDK yet
|
||||
case "local_shell_call":
|
||||
return <TerminalChatResponseMessage message={item} />;
|
||||
case "function_call":
|
||||
return <TerminalChatResponseToolCall message={item} />;
|
||||
// @ts-expect-error new item types aren't in SDK yet
|
||||
case "local_shell_call_output":
|
||||
case "function_call_output":
|
||||
return (
|
||||
<TerminalChatResponseToolCallOutput
|
||||
@@ -61,9 +41,7 @@ export default function TerminalChatResponseItem({
|
||||
|
||||
// @ts-expect-error `reasoning` is not in the responses API yet
|
||||
if (item.type === "reasoning") {
|
||||
return (
|
||||
<TerminalChatResponseReasoning message={item} fileOpener={fileOpener} />
|
||||
);
|
||||
return <TerminalChatResponseReasoning message={item} />;
|
||||
}
|
||||
|
||||
return <TerminalChatResponseGenericMessage message={item} />;
|
||||
@@ -91,10 +69,8 @@ export default function TerminalChatResponseItem({
|
||||
|
||||
export function TerminalChatResponseReasoning({
|
||||
message,
|
||||
fileOpener,
|
||||
}: {
|
||||
message: ResponseReasoningItem & { duration_ms?: number };
|
||||
fileOpener: FileOpenerScheme | undefined;
|
||||
}): React.ReactElement | null {
|
||||
// Only render when there is a reasoning summary
|
||||
if (!message.summary || message.summary.length === 0) {
|
||||
@@ -107,7 +83,7 @@ export function TerminalChatResponseReasoning({
|
||||
return (
|
||||
<Box key={key} flexDirection="column">
|
||||
{s.headline && <Text bold>{s.headline}</Text>}
|
||||
<Markdown fileOpener={fileOpener}>{s.text}</Markdown>
|
||||
<Markdown>{s.text}</Markdown>
|
||||
</Box>
|
||||
);
|
||||
})}
|
||||
@@ -122,45 +98,29 @@ const colorsByRole: Record<string, ForegroundColorName> = {
|
||||
|
||||
function TerminalChatResponseMessage({
|
||||
message,
|
||||
setOverlayMode,
|
||||
fileOpener,
|
||||
}: {
|
||||
message: ResponseInputMessageItem | ResponseOutputMessage;
|
||||
setOverlayMode?: React.Dispatch<React.SetStateAction<OverlayModeType>>;
|
||||
fileOpener: FileOpenerScheme | undefined;
|
||||
}) {
|
||||
// auto switch to model mode if the system message contains "has been deprecated"
|
||||
useEffect(() => {
|
||||
if (message.role === "system") {
|
||||
const systemMessage = message.content.find(
|
||||
(c) => c.type === "input_text",
|
||||
)?.text;
|
||||
if (systemMessage?.includes("model_not_found")) {
|
||||
setOverlayMode?.("model");
|
||||
}
|
||||
}
|
||||
}, [message, setOverlayMode]);
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Text bold color={colorsByRole[message.role] || "gray"}>
|
||||
{message.role === "assistant" ? "codex" : message.role}
|
||||
</Text>
|
||||
<Markdown fileOpener={fileOpener}>
|
||||
<Markdown>
|
||||
{message.content
|
||||
.map(
|
||||
(c) =>
|
||||
c.type === "output_text"
|
||||
? c.text
|
||||
: c.type === "refusal"
|
||||
? c.refusal
|
||||
: c.type === "input_text"
|
||||
? collapseXmlBlocks(c.text)
|
||||
: c.type === "input_image"
|
||||
? "<Image>"
|
||||
: c.type === "input_file"
|
||||
? c.filename
|
||||
: "", // unknown content type
|
||||
? c.refusal
|
||||
: c.type === "input_text"
|
||||
? c.text
|
||||
: c.type === "input_image"
|
||||
? "<Image>"
|
||||
: c.type === "input_file"
|
||||
? c.filename
|
||||
: "", // unknown content type
|
||||
)
|
||||
.join(" ")}
|
||||
</Markdown>
|
||||
@@ -171,28 +131,16 @@ function TerminalChatResponseMessage({
|
||||
function TerminalChatResponseToolCall({
|
||||
message,
|
||||
}: {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
message: ResponseFunctionToolCallItem | any;
|
||||
message: ResponseFunctionToolCallItem;
|
||||
}) {
|
||||
let workdir: string | undefined;
|
||||
let cmdReadableText: string | undefined;
|
||||
if (message.type === "function_call") {
|
||||
const details = parseToolCall(message);
|
||||
workdir = details?.workdir;
|
||||
cmdReadableText = details?.cmdReadableText;
|
||||
} else if (message.type === "local_shell_call") {
|
||||
const action = message.action;
|
||||
workdir = action.working_directory;
|
||||
cmdReadableText = formatCommandForDisplay(action.command);
|
||||
}
|
||||
const details = parseToolCall(message);
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="magentaBright" bold>
|
||||
command
|
||||
{workdir ? <Text dimColor>{` (${workdir})`}</Text> : ""}
|
||||
</Text>
|
||||
<Text>
|
||||
<Text dimColor>$</Text> {cmdReadableText}
|
||||
<Text dimColor>$</Text> {details?.cmdReadableText}
|
||||
</Text>
|
||||
</Box>
|
||||
);
|
||||
@@ -202,8 +150,7 @@ function TerminalChatResponseToolCallOutput({
|
||||
message,
|
||||
fullStdout,
|
||||
}: {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
message: ResponseFunctionToolCallOutputItem | any;
|
||||
message: ResponseFunctionToolCallOutputItem;
|
||||
fullStdout: boolean;
|
||||
}) {
|
||||
const { output, metadata } = parseToolCallOutput(message.output);
|
||||
@@ -270,91 +217,26 @@ export function TerminalChatResponseGenericMessage({
|
||||
|
||||
export type MarkdownProps = TerminalRendererOptions & {
|
||||
children: string;
|
||||
fileOpener: FileOpenerScheme | undefined;
|
||||
/** Base path for resolving relative file citation paths. */
|
||||
cwd?: string;
|
||||
};
|
||||
|
||||
export function Markdown({
|
||||
children,
|
||||
fileOpener,
|
||||
cwd,
|
||||
...options
|
||||
}: MarkdownProps): React.ReactElement {
|
||||
const size = useTerminalSize();
|
||||
|
||||
const rendered = React.useMemo(() => {
|
||||
const linkifiedMarkdown = rewriteFileCitations(children, fileOpener, cwd);
|
||||
|
||||
// Configure marked for this specific render
|
||||
setOptions({
|
||||
// @ts-expect-error missing parser, space props
|
||||
renderer: new TerminalRenderer({ ...options, width: size.columns }),
|
||||
});
|
||||
const parsed = parse(linkifiedMarkdown, { async: false }).trim();
|
||||
const parsed = parse(children, { async: false }).trim();
|
||||
|
||||
// Remove the truncation logic
|
||||
return parsed;
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps -- options is an object of primitives
|
||||
}, [
|
||||
children,
|
||||
size.columns,
|
||||
size.rows,
|
||||
fileOpener,
|
||||
supportsHyperlinks.stdout,
|
||||
chalk.level,
|
||||
]);
|
||||
}, [children, size.columns, size.rows]);
|
||||
|
||||
return <Text>{rendered}</Text>;
|
||||
}
|
||||
|
||||
/** Regex to match citations for source files (hence the `F:` prefix). */
|
||||
const citationRegex = new RegExp(
|
||||
[
|
||||
// Opening marker
|
||||
"【",
|
||||
|
||||
// Capture group 1: file ID or name (anything except '†')
|
||||
"F:([^†]+)",
|
||||
|
||||
// Field separator
|
||||
"†",
|
||||
|
||||
// Capture group 2: start line (digits)
|
||||
"L(\\d+)",
|
||||
|
||||
// Non-capturing group for optional end line
|
||||
"(?:",
|
||||
|
||||
// Capture group 3: end line (digits or '?')
|
||||
"-L(\\d+|\\?)",
|
||||
|
||||
// End of optional group (may not be present)
|
||||
")?",
|
||||
|
||||
// Closing marker
|
||||
"】",
|
||||
].join(""),
|
||||
"g", // Global flag
|
||||
);
|
||||
|
||||
function rewriteFileCitations(
|
||||
markdown: string,
|
||||
fileOpener: FileOpenerScheme | undefined,
|
||||
cwd: string = process.cwd(),
|
||||
): string {
|
||||
citationRegex.lastIndex = 0;
|
||||
return markdown.replace(citationRegex, (_match, file, start, _end) => {
|
||||
const absPath = path.resolve(cwd, file);
|
||||
if (!fileOpener) {
|
||||
return `[${file}](${absPath})`;
|
||||
}
|
||||
const uri = `${fileOpener}://file${absPath}:${start}`;
|
||||
const label = `${file}:${start}`;
|
||||
// In practice, sometimes multiple citations for the same file, but with a
|
||||
// different line number, are shown sequentially, so we:
|
||||
// - include the line number in the label to disambiguate them
|
||||
// - add a space after the link to make it easier to read
|
||||
return `[${label}](${uri}) `;
|
||||
});
|
||||
}
|
||||
|
||||
135
codex-cli/src/components/chat/terminal-chat-utils.ts
Normal file
135
codex-cli/src/components/chat/terminal-chat-utils.ts
Normal file
@@ -0,0 +1,135 @@
|
||||
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
|
||||
|
||||
import { approximateTokensUsed } from "../../utils/approximate-tokens-used.js";
|
||||
|
||||
/**
|
||||
* Type‑guard that narrows a {@link ResponseItem} to one that represents a
|
||||
* user‑authored message. The OpenAI SDK represents both input *and* output
|
||||
* messages with a discriminated union where:
|
||||
* • `type` is the string literal "message" and
|
||||
* • `role` is one of "user" | "assistant" | "system" | "developer".
|
||||
*
|
||||
* For the purposes of de‑duplication we only care about *user* messages so we
|
||||
* detect those here in a single, reusable helper.
|
||||
*/
|
||||
function isUserMessage(
|
||||
item: ResponseItem,
|
||||
): item is ResponseItem & { type: "message"; role: "user"; content: unknown } {
|
||||
return item.type === "message" && (item as { role?: string }).role === "user";
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the maximum context length (in tokens) for a given model.
|
||||
* These numbers are best‑effort guesses and provide a basis for UI percentages.
|
||||
*/
|
||||
export function maxTokensForModel(model: string): number {
|
||||
const lower = model.toLowerCase();
|
||||
// Heuristics for common context window sizes. Keep the checks loosely
|
||||
// ordered from *largest* to *smallest* so that more specific long‑context
|
||||
// models are detected before their shorter generic counterparts.
|
||||
|
||||
// Special‑case for 1,047,576‑token demo model (gpt‑4‑long). We match either
|
||||
// the literal number or "gpt-4.1" variants we occasionally encounter.
|
||||
if (lower.includes("1,047,576") || /gpt-4\.1/i.test(lower)) {
|
||||
return 1047576;
|
||||
}
|
||||
|
||||
if (lower.includes("128k") || /gpt-4\.5|gpt-4o-mini|gpt-4o\b/i.test(lower)) {
|
||||
return 128000;
|
||||
}
|
||||
|
||||
// Experimental o‑series advertised at ~200k context
|
||||
if (/\bo[134]\b|o[134]-mini|o1[- ]?pro/i.test(lower)) {
|
||||
return 200000;
|
||||
}
|
||||
|
||||
if (lower.includes("32k")) {
|
||||
return 32000;
|
||||
}
|
||||
if (lower.includes("16k")) {
|
||||
return 16000;
|
||||
}
|
||||
if (lower.includes("8k")) {
|
||||
return 8000;
|
||||
}
|
||||
if (lower.includes("4k")) {
|
||||
return 4000;
|
||||
}
|
||||
// Default to 128k for newer long‑context models
|
||||
return 128000;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the percentage of tokens remaining in context for a model.
|
||||
*/
|
||||
export function calculateContextPercentRemaining(
|
||||
items: Array<ResponseItem>,
|
||||
model: string,
|
||||
extraContextChars = 0,
|
||||
): number {
|
||||
const tokensFromItems = approximateTokensUsed(items);
|
||||
const extraTokens = Math.ceil(extraContextChars / 4);
|
||||
const used = tokensFromItems + extraTokens;
|
||||
const max = maxTokensForModel(model);
|
||||
const remaining = Math.max(0, max - used);
|
||||
return (remaining / max) * 100;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deduplicate the stream of {@link ResponseItem}s before they are persisted in
|
||||
* component state.
|
||||
*
|
||||
* Historically we used the (optional) {@code id} field returned by the
|
||||
* OpenAI streaming API as the primary key: the first occurrence of any given
|
||||
* {@code id} “won” and subsequent duplicates were dropped. In practice this
|
||||
* proved brittle because locally‑generated user messages don’t include an
|
||||
* {@code id}. The result was that if a user quickly pressed <Enter> twice the
|
||||
* exact same message would appear twice in the transcript.
|
||||
*
|
||||
* The new rules are therefore:
|
||||
* 1. If a {@link ResponseItem} has an {@code id} keep only the *first*
|
||||
* occurrence of that {@code id} (this retains the previous behaviour for
|
||||
* assistant / tool messages).
|
||||
* 2. Additionally, collapse *consecutive* user messages with identical
|
||||
* content. Two messages are considered identical when their serialized
|
||||
* {@code content} array matches exactly. We purposefully restrict this
|
||||
* to **adjacent** duplicates so that legitimately repeated questions at
|
||||
* a later point in the conversation are still shown.
|
||||
*/
|
||||
export function uniqueById(items: Array<ResponseItem>): Array<ResponseItem> {
|
||||
const seenIds = new Set<string>();
|
||||
const deduped: Array<ResponseItem> = [];
|
||||
|
||||
for (const item of items) {
|
||||
// ──────────────────────────────────────────────────────────────────
|
||||
// Rule #1 – de‑duplicate by id when present
|
||||
// ──────────────────────────────────────────────────────────────────
|
||||
if (typeof item.id === "string" && item.id.length > 0) {
|
||||
if (seenIds.has(item.id)) {
|
||||
continue; // skip duplicates
|
||||
}
|
||||
seenIds.add(item.id);
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────
|
||||
// Rule #2 – collapse consecutive identical user messages
|
||||
// ──────────────────────────────────────────────────────────────────
|
||||
if (isUserMessage(item) && deduped.length > 0) {
|
||||
const prev = deduped[deduped.length - 1]!;
|
||||
|
||||
if (
|
||||
isUserMessage(prev) &&
|
||||
// Note: the `content` field is an array of message parts. Performing
|
||||
// a deep compare is over‑kill here; serialising to JSON is sufficient
|
||||
// (and fast for the tiny payloads involved).
|
||||
JSON.stringify(prev.content) === JSON.stringify(item.content)
|
||||
) {
|
||||
continue; // skip duplicate user message
|
||||
}
|
||||
}
|
||||
|
||||
deduped.push(item);
|
||||
}
|
||||
|
||||
return deduped;
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
import type { AppRollout } from "../../app.js";
|
||||
import type { ApplyPatchCommand, ApprovalPolicy } from "../../approvals.js";
|
||||
import type { CommandConfirmation } from "../../utils/agent/agent-loop.js";
|
||||
import type { AppConfig } from "../../utils/config.js";
|
||||
@@ -6,51 +5,35 @@ import type { ColorName } from "chalk";
|
||||
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
|
||||
|
||||
import TerminalChatInput from "./terminal-chat-input.js";
|
||||
import TerminalChatPastRollout from "./terminal-chat-past-rollout.js";
|
||||
import { TerminalChatToolCallCommand } from "./terminal-chat-tool-call-command.js";
|
||||
import { TerminalChatToolCallCommand } from "./terminal-chat-tool-call-item.js";
|
||||
import {
|
||||
calculateContextPercentRemaining,
|
||||
uniqueById,
|
||||
} from "./terminal-chat-utils.js";
|
||||
import TerminalMessageHistory from "./terminal-message-history.js";
|
||||
import { formatCommandForDisplay } from "../../format-command.js";
|
||||
import { useConfirmation } from "../../hooks/use-confirmation.js";
|
||||
import { useTerminalSize } from "../../hooks/use-terminal-size.js";
|
||||
import { AgentLoop } from "../../utils/agent/agent-loop.js";
|
||||
import { isLoggingEnabled, log } from "../../utils/agent/log.js";
|
||||
import { ReviewDecision } from "../../utils/agent/review.js";
|
||||
import { generateCompactSummary } from "../../utils/compact-summary.js";
|
||||
import { saveConfig } from "../../utils/config.js";
|
||||
import { extractAppliedPatches as _extractAppliedPatches } from "../../utils/extract-applied-patches.js";
|
||||
import { getGitDiff } from "../../utils/get-diff.js";
|
||||
import { OPENAI_BASE_URL } from "../../utils/config.js";
|
||||
import { createInputItem } from "../../utils/input-utils.js";
|
||||
import { log } from "../../utils/logger/log.js";
|
||||
import {
|
||||
getAvailableModels,
|
||||
calculateContextPercentRemaining,
|
||||
uniqueById,
|
||||
} from "../../utils/model-utils.js";
|
||||
import { createOpenAIClient } from "../../utils/openai-client.js";
|
||||
import { getAvailableModels } from "../../utils/model-utils.js";
|
||||
import { CLI_VERSION } from "../../utils/session.js";
|
||||
import { shortCwd } from "../../utils/short-path.js";
|
||||
import { saveRollout } from "../../utils/storage/save-rollout.js";
|
||||
import { CLI_VERSION } from "../../version.js";
|
||||
import ApprovalModeOverlay from "../approval-mode-overlay.js";
|
||||
import DiffOverlay from "../diff-overlay.js";
|
||||
import HelpOverlay from "../help-overlay.js";
|
||||
import HistoryOverlay from "../history-overlay.js";
|
||||
import ModelOverlay from "../model-overlay.js";
|
||||
import SessionsOverlay from "../sessions-overlay.js";
|
||||
import chalk from "chalk";
|
||||
import fs from "fs/promises";
|
||||
import { Box, Text } from "ink";
|
||||
import { spawn } from "node:child_process";
|
||||
import { exec } from "node:child_process";
|
||||
import OpenAI from "openai";
|
||||
import React, { useEffect, useMemo, useRef, useState } from "react";
|
||||
import { inspect } from "util";
|
||||
|
||||
export type OverlayModeType =
|
||||
| "none"
|
||||
| "history"
|
||||
| "sessions"
|
||||
| "model"
|
||||
| "approval"
|
||||
| "help"
|
||||
| "diff";
|
||||
|
||||
type Props = {
|
||||
config: AppConfig;
|
||||
prompt?: string;
|
||||
@@ -71,19 +54,18 @@ const colorsByPolicy: Record<ApprovalPolicy, ColorName | undefined> = {
|
||||
*
|
||||
* @param command The command to explain
|
||||
* @param model The model to use for generating the explanation
|
||||
* @param flexMode Whether to use the flex-mode service tier
|
||||
* @param config The configuration object
|
||||
* @returns A human-readable explanation of what the command does
|
||||
*/
|
||||
async function generateCommandExplanation(
|
||||
command: Array<string>,
|
||||
model: string,
|
||||
flexMode: boolean,
|
||||
config: AppConfig,
|
||||
): Promise<string> {
|
||||
try {
|
||||
// Create a temporary OpenAI client
|
||||
const oai = createOpenAIClient(config);
|
||||
const oai = new OpenAI({
|
||||
apiKey: process.env["OPENAI_API_KEY"],
|
||||
baseURL: OPENAI_BASE_URL,
|
||||
});
|
||||
|
||||
// Format the command for display
|
||||
const commandForDisplay = formatCommandForDisplay(command);
|
||||
@@ -91,7 +73,6 @@ async function generateCommandExplanation(
|
||||
// Create a prompt that asks for an explanation with a more detailed system prompt
|
||||
const response = await oai.chat.completions.create({
|
||||
model,
|
||||
...(flexMode ? { service_tier: "flex" } : {}),
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
@@ -112,8 +93,11 @@ async function generateCommandExplanation(
|
||||
} catch (error) {
|
||||
log(`Error generating command explanation: ${error}`);
|
||||
|
||||
// Improved error handling with more specific error information
|
||||
let errorMessage = "Unable to generate explanation due to an error.";
|
||||
|
||||
if (error instanceof Error) {
|
||||
// Include specific error message for better debugging
|
||||
errorMessage = `Unable to generate explanation: ${error.message}`;
|
||||
|
||||
// If it's an API error, check for more specific information
|
||||
@@ -144,26 +128,21 @@ export default function TerminalChat({
|
||||
additionalWritableRoots,
|
||||
fullStdout,
|
||||
}: Props): React.ReactElement {
|
||||
const notify = Boolean(config.notify);
|
||||
// Desktop notification setting
|
||||
const notify = config.notify;
|
||||
const [model, setModel] = useState<string>(config.model);
|
||||
const [provider, setProvider] = useState<string>(config.provider || "openai");
|
||||
const [lastResponseId, setLastResponseId] = useState<string | null>(null);
|
||||
const [items, setItems] = useState<Array<ResponseItem>>([]);
|
||||
const [loading, setLoading] = useState<boolean>(false);
|
||||
// Allow switching approval modes at runtime via an overlay.
|
||||
const [approvalPolicy, setApprovalPolicy] = useState<ApprovalPolicy>(
|
||||
initialApprovalPolicy,
|
||||
);
|
||||
const [thinkingSeconds, setThinkingSeconds] = useState(0);
|
||||
|
||||
const handleCompact = async () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
const summary = await generateCompactSummary(
|
||||
items,
|
||||
model,
|
||||
Boolean(config.flexMode),
|
||||
config,
|
||||
);
|
||||
const summary = await generateCompactSummary(items, model);
|
||||
setItems([
|
||||
{
|
||||
id: `compact-${Date.now()}`,
|
||||
@@ -188,22 +167,15 @@ export default function TerminalChat({
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
const {
|
||||
requestConfirmation,
|
||||
confirmationPrompt,
|
||||
explanation,
|
||||
submitConfirmation,
|
||||
} = useConfirmation();
|
||||
const [overlayMode, setOverlayMode] = useState<OverlayModeType>("none");
|
||||
const [viewRollout, setViewRollout] = useState<AppRollout | null>(null);
|
||||
|
||||
// Store the diff text when opening the diff overlay so the view isn’t
|
||||
// recomputed on every re‑render while it is open.
|
||||
// diffText is passed down to the DiffOverlay component. The setter is
|
||||
// currently unused but retained for potential future updates. Prefix with
|
||||
// an underscore so eslint ignores the unused variable.
|
||||
const [diffText, _setDiffText] = useState<string>("");
|
||||
const [overlayMode, setOverlayMode] = useState<
|
||||
"none" | "history" | "model" | "approval" | "help"
|
||||
>("none");
|
||||
|
||||
const [initialPrompt, setInitialPrompt] = useState(_initialPrompt);
|
||||
const [initialImagePaths, setInitialImagePaths] =
|
||||
@@ -219,44 +191,39 @@ export default function TerminalChat({
|
||||
// ────────────────────────────────────────────────────────────────
|
||||
// DEBUG: log every render w/ key bits of state
|
||||
// ────────────────────────────────────────────────────────────────
|
||||
log(
|
||||
`render - agent? ${Boolean(agentRef.current)} loading=${loading} items=${
|
||||
items.length
|
||||
}`,
|
||||
);
|
||||
if (isLoggingEnabled()) {
|
||||
log(
|
||||
`render – agent? ${Boolean(agentRef.current)} loading=${loading} items=${
|
||||
items.length
|
||||
}`,
|
||||
);
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
// Skip recreating the agent if awaiting a decision on a pending confirmation.
|
||||
if (confirmationPrompt != null) {
|
||||
log("skip AgentLoop recreation due to pending confirmationPrompt");
|
||||
return;
|
||||
if (isLoggingEnabled()) {
|
||||
log("creating NEW AgentLoop");
|
||||
log(
|
||||
`model=${model} instructions=${Boolean(
|
||||
config.instructions,
|
||||
)} approvalPolicy=${approvalPolicy}`,
|
||||
);
|
||||
}
|
||||
|
||||
log("creating NEW AgentLoop");
|
||||
log(
|
||||
`model=${model} provider=${provider} instructions=${Boolean(
|
||||
config.instructions,
|
||||
)} approvalPolicy=${approvalPolicy}`,
|
||||
);
|
||||
|
||||
// Tear down any existing loop before creating a new one.
|
||||
// Tear down any existing loop before creating a new one
|
||||
agentRef.current?.terminate();
|
||||
|
||||
const sessionId = crypto.randomUUID();
|
||||
agentRef.current = new AgentLoop({
|
||||
model,
|
||||
provider,
|
||||
config,
|
||||
instructions: config.instructions,
|
||||
approvalPolicy,
|
||||
disableResponseStorage: config.disableResponseStorage,
|
||||
additionalWritableRoots,
|
||||
onLastResponseId: setLastResponseId,
|
||||
onItem: (item) => {
|
||||
log(`onItem: ${JSON.stringify(item)}`);
|
||||
setItems((prev) => {
|
||||
const updated = uniqueById([...prev, item as ResponseItem]);
|
||||
saveRollout(sessionId, updated);
|
||||
saveRollout(updated);
|
||||
return updated;
|
||||
});
|
||||
},
|
||||
@@ -273,18 +240,15 @@ export default function TerminalChat({
|
||||
<TerminalChatToolCallCommand commandForDisplay={commandForDisplay} />,
|
||||
);
|
||||
|
||||
// If the user wants an explanation, generate one and ask again.
|
||||
// If the user wants an explanation, generate one and ask again
|
||||
if (review === ReviewDecision.EXPLAIN) {
|
||||
log(`Generating explanation for command: ${commandForDisplay}`);
|
||||
const explanation = await generateCommandExplanation(
|
||||
command,
|
||||
model,
|
||||
Boolean(config.flexMode),
|
||||
config,
|
||||
);
|
||||
|
||||
// Generate an explanation using the same model
|
||||
const explanation = await generateCommandExplanation(command, model);
|
||||
log(`Generated explanation: ${explanation}`);
|
||||
|
||||
// Ask for confirmation again, but with the explanation.
|
||||
// Ask for confirmation again, but with the explanation
|
||||
const confirmResult = await requestConfirmation(
|
||||
<TerminalChatToolCallCommand
|
||||
commandForDisplay={commandForDisplay}
|
||||
@@ -292,11 +256,11 @@ export default function TerminalChat({
|
||||
/>,
|
||||
);
|
||||
|
||||
// Update the decision based on the second confirmation.
|
||||
// Update the decision based on the second confirmation
|
||||
review = confirmResult.decision;
|
||||
customDenyMessage = confirmResult.customDenyMessage;
|
||||
|
||||
// Return the final decision with the explanation.
|
||||
// Return the final decision with the explanation
|
||||
return { review, customDenyMessage, applyPatch, explanation };
|
||||
}
|
||||
|
||||
@@ -304,23 +268,30 @@ export default function TerminalChat({
|
||||
},
|
||||
});
|
||||
|
||||
// Force a render so JSX below can "see" the freshly created agent.
|
||||
// force a render so JSX below can "see" the freshly created agent
|
||||
forceUpdate();
|
||||
|
||||
log(`AgentLoop created: ${inspect(agentRef.current, { depth: 1 })}`);
|
||||
if (isLoggingEnabled()) {
|
||||
log(`AgentLoop created: ${inspect(agentRef.current, { depth: 1 })}`);
|
||||
}
|
||||
|
||||
return () => {
|
||||
log("terminating AgentLoop");
|
||||
if (isLoggingEnabled()) {
|
||||
log("terminating AgentLoop");
|
||||
}
|
||||
agentRef.current?.terminate();
|
||||
agentRef.current = undefined;
|
||||
forceUpdate(); // re‑render after teardown too
|
||||
};
|
||||
// We intentionally omit 'approvalPolicy' and 'confirmationPrompt' from the deps
|
||||
// so switching modes or showing confirmation dialogs doesn’t tear down the loop.
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [model, provider, config, requestConfirmation, additionalWritableRoots]);
|
||||
}, [
|
||||
model,
|
||||
config,
|
||||
approvalPolicy,
|
||||
requestConfirmation,
|
||||
additionalWritableRoots,
|
||||
]);
|
||||
|
||||
// Whenever loading starts/stops, reset or start a timer — but pause the
|
||||
// whenever loading starts/stops, reset or start a timer — but pause the
|
||||
// timer while a confirmation overlay is displayed so we don't trigger a
|
||||
// re‑render every second during apply_patch reviews.
|
||||
useEffect(() => {
|
||||
@@ -345,15 +316,14 @@ export default function TerminalChat({
|
||||
};
|
||||
}, [loading, confirmationPrompt]);
|
||||
|
||||
// Notify desktop with a preview when an assistant response arrives.
|
||||
// Notify desktop with a preview when an assistant response arrives
|
||||
const prevLoadingRef = useRef<boolean>(false);
|
||||
useEffect(() => {
|
||||
// Only notify when notifications are enabled.
|
||||
// Only notify when notifications are enabled
|
||||
if (!notify) {
|
||||
prevLoadingRef.current = loading;
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
prevLoadingRef.current &&
|
||||
!loading &&
|
||||
@@ -380,20 +350,21 @@ export default function TerminalChat({
|
||||
const safePreview = preview.replace(/"/g, '\\"');
|
||||
const title = "Codex CLI";
|
||||
const cwd = PWD;
|
||||
spawn("osascript", [
|
||||
"-e",
|
||||
`display notification "${safePreview}" with title "${title}" subtitle "${cwd}" sound name "Ping"`,
|
||||
]);
|
||||
exec(
|
||||
`osascript -e 'display notification "${safePreview}" with title "${title}" subtitle "${cwd}" sound name "Ping"'`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
prevLoadingRef.current = loading;
|
||||
}, [notify, loading, confirmationPrompt, items, PWD]);
|
||||
|
||||
// Let's also track whenever the ref becomes available.
|
||||
// Let's also track whenever the ref becomes available
|
||||
const agent = agentRef.current;
|
||||
useEffect(() => {
|
||||
log(`agentRef.current is now ${Boolean(agent)}`);
|
||||
if (isLoggingEnabled()) {
|
||||
log(`agentRef.current is now ${Boolean(agent)}`);
|
||||
}
|
||||
}, [agent]);
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
@@ -413,7 +384,7 @@ export default function TerminalChat({
|
||||
const inputItems = [
|
||||
await createInputItem(initialPrompt || "", initialImagePaths || []),
|
||||
];
|
||||
// Clear them to prevent subsequent runs.
|
||||
// Clear them to prevent subsequent runs
|
||||
setInitialPrompt("");
|
||||
setInitialImagePaths([]);
|
||||
agent?.run(inputItems);
|
||||
@@ -426,7 +397,7 @@ export default function TerminalChat({
|
||||
// ────────────────────────────────────────────────────────────────
|
||||
useEffect(() => {
|
||||
(async () => {
|
||||
const available = await getAvailableModels(provider);
|
||||
const available = await getAvailableModels();
|
||||
if (model && available.length > 0 && !available.includes(model)) {
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
@@ -437,7 +408,7 @@ export default function TerminalChat({
|
||||
content: [
|
||||
{
|
||||
type: "input_text",
|
||||
text: `Warning: model "${model}" is not in the list of available models for provider "${provider}".`,
|
||||
text: `Warning: model "${model}" is not in the list of available models returned by OpenAI.`,
|
||||
},
|
||||
],
|
||||
},
|
||||
@@ -448,7 +419,7 @@ export default function TerminalChat({
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
// Just render every item in order, no grouping/collapse.
|
||||
// Just render every item in order, no grouping/collapse
|
||||
const lastMessageBatch = items.map((item) => ({ item }));
|
||||
const groupCounts: Record<string, number> = {};
|
||||
const userMsgCount = items.filter(
|
||||
@@ -456,26 +427,21 @@ export default function TerminalChat({
|
||||
).length;
|
||||
|
||||
const contextLeftPercent = useMemo(
|
||||
() => calculateContextPercentRemaining(items, model),
|
||||
[items, model],
|
||||
() =>
|
||||
calculateContextPercentRemaining(
|
||||
items,
|
||||
model,
|
||||
// static system instructions count towards the context budget too
|
||||
config.instructions?.length ?? 0,
|
||||
),
|
||||
[items, model, config.instructions],
|
||||
);
|
||||
|
||||
if (viewRollout) {
|
||||
return (
|
||||
<TerminalChatPastRollout
|
||||
fileOpener={config.fileOpener}
|
||||
session={viewRollout.session}
|
||||
items={viewRollout.items}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Box flexDirection="column">
|
||||
{agent ? (
|
||||
<TerminalMessageHistory
|
||||
setOverlayMode={setOverlayMode}
|
||||
batch={lastMessageBatch}
|
||||
groupCounts={groupCounts}
|
||||
items={items}
|
||||
@@ -489,21 +455,18 @@ export default function TerminalChat({
|
||||
version: CLI_VERSION,
|
||||
PWD,
|
||||
model,
|
||||
provider,
|
||||
approvalPolicy,
|
||||
colorsByPolicy,
|
||||
agent,
|
||||
initialImagePaths,
|
||||
flexModeEnabled: Boolean(config.flexMode),
|
||||
}}
|
||||
fileOpener={config.fileOpener}
|
||||
/>
|
||||
) : (
|
||||
<Box>
|
||||
<Text color="gray">Initializing agent…</Text>
|
||||
</Box>
|
||||
)}
|
||||
{overlayMode === "none" && agent && (
|
||||
{agent && (
|
||||
<TerminalChatInput
|
||||
loading={loading}
|
||||
setItems={setItems}
|
||||
@@ -525,36 +488,17 @@ export default function TerminalChat({
|
||||
openModelOverlay={() => setOverlayMode("model")}
|
||||
openApprovalOverlay={() => setOverlayMode("approval")}
|
||||
openHelpOverlay={() => setOverlayMode("help")}
|
||||
openSessionsOverlay={() => setOverlayMode("sessions")}
|
||||
openDiffOverlay={() => {
|
||||
const { isGitRepo, diff } = getGitDiff();
|
||||
let text: string;
|
||||
if (isGitRepo) {
|
||||
text = diff;
|
||||
} else {
|
||||
text = "`/diff` — _not inside a git repository_";
|
||||
}
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `diff-${Date.now()}`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [{ type: "input_text", text }],
|
||||
},
|
||||
]);
|
||||
// Ensure no overlay is shown.
|
||||
setOverlayMode("none");
|
||||
}}
|
||||
onCompact={handleCompact}
|
||||
active={overlayMode === "none"}
|
||||
interruptAgent={() => {
|
||||
if (!agent) {
|
||||
return;
|
||||
}
|
||||
log(
|
||||
"TerminalChat: interruptAgent invoked – calling agent.cancel()",
|
||||
);
|
||||
if (isLoggingEnabled()) {
|
||||
log(
|
||||
"TerminalChat: interruptAgent invoked – calling agent.cancel()",
|
||||
);
|
||||
}
|
||||
agent.cancel();
|
||||
setLoading(false);
|
||||
|
||||
@@ -578,74 +522,32 @@ export default function TerminalChat({
|
||||
agent.run(inputs, lastResponseId || "");
|
||||
return {};
|
||||
}}
|
||||
items={items}
|
||||
thinkingSeconds={thinkingSeconds}
|
||||
/>
|
||||
)}
|
||||
{overlayMode === "history" && (
|
||||
<HistoryOverlay items={items} onExit={() => setOverlayMode("none")} />
|
||||
)}
|
||||
{overlayMode === "sessions" && (
|
||||
<SessionsOverlay
|
||||
onView={async (p) => {
|
||||
try {
|
||||
const txt = await fs.readFile(p, "utf-8");
|
||||
const data = JSON.parse(txt) as AppRollout;
|
||||
setViewRollout(data);
|
||||
setOverlayMode("none");
|
||||
} catch {
|
||||
setOverlayMode("none");
|
||||
}
|
||||
}}
|
||||
onResume={(p) => {
|
||||
setOverlayMode("none");
|
||||
setInitialPrompt(`Resume this session: ${p}`);
|
||||
}}
|
||||
onExit={() => setOverlayMode("none")}
|
||||
/>
|
||||
)}
|
||||
{overlayMode === "model" && (
|
||||
<ModelOverlay
|
||||
currentModel={model}
|
||||
providers={config.providers}
|
||||
currentProvider={provider}
|
||||
hasLastResponse={Boolean(lastResponseId)}
|
||||
onSelect={(allModels, newModel) => {
|
||||
log(
|
||||
"TerminalChat: interruptAgent invoked – calling agent.cancel()",
|
||||
);
|
||||
if (!agent) {
|
||||
log("TerminalChat: agent is not ready yet");
|
||||
onSelect={(newModel) => {
|
||||
if (isLoggingEnabled()) {
|
||||
log(
|
||||
"TerminalChat: interruptAgent invoked – calling agent.cancel()",
|
||||
);
|
||||
if (!agent) {
|
||||
log("TerminalChat: agent is not ready yet");
|
||||
}
|
||||
}
|
||||
agent?.cancel();
|
||||
setLoading(false);
|
||||
|
||||
if (!allModels?.includes(newModel)) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
chalk.bold.red(
|
||||
`Model "${chalk.yellow(
|
||||
newModel,
|
||||
)}" is not available for provider "${chalk.yellow(
|
||||
provider,
|
||||
)}".`,
|
||||
),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
setModel(newModel);
|
||||
setLastResponseId((prev) =>
|
||||
prev && newModel !== model ? null : prev,
|
||||
);
|
||||
|
||||
// Save model to config
|
||||
saveConfig({
|
||||
...config,
|
||||
model: newModel,
|
||||
provider: provider,
|
||||
});
|
||||
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
@@ -663,51 +565,6 @@ export default function TerminalChat({
|
||||
|
||||
setOverlayMode("none");
|
||||
}}
|
||||
onSelectProvider={(newProvider) => {
|
||||
log(
|
||||
"TerminalChat: interruptAgent invoked – calling agent.cancel()",
|
||||
);
|
||||
if (!agent) {
|
||||
log("TerminalChat: agent is not ready yet");
|
||||
}
|
||||
agent?.cancel();
|
||||
setLoading(false);
|
||||
|
||||
// Select default model for the new provider.
|
||||
const defaultModel = model;
|
||||
|
||||
// Save provider to config.
|
||||
const updatedConfig = {
|
||||
...config,
|
||||
provider: newProvider,
|
||||
model: defaultModel,
|
||||
};
|
||||
saveConfig(updatedConfig);
|
||||
|
||||
setProvider(newProvider);
|
||||
setModel(defaultModel);
|
||||
setLastResponseId((prev) =>
|
||||
prev && newProvider !== provider ? null : prev,
|
||||
);
|
||||
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `switch-provider-${Date.now()}`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [
|
||||
{
|
||||
type: "input_text",
|
||||
text: `Switched provider to ${newProvider} with model ${defaultModel}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
|
||||
// Don't close the overlay so user can select a model for the new provider
|
||||
// setOverlayMode("none");
|
||||
}}
|
||||
onExit={() => setOverlayMode("none")}
|
||||
/>
|
||||
)}
|
||||
@@ -716,19 +573,12 @@ export default function TerminalChat({
|
||||
<ApprovalModeOverlay
|
||||
currentMode={approvalPolicy}
|
||||
onSelect={(newMode) => {
|
||||
// Update approval policy without cancelling an in-progress session.
|
||||
agent?.cancel();
|
||||
setLoading(false);
|
||||
if (newMode === approvalPolicy) {
|
||||
return;
|
||||
}
|
||||
|
||||
setApprovalPolicy(newMode as ApprovalPolicy);
|
||||
if (agentRef.current) {
|
||||
(
|
||||
agentRef.current as unknown as {
|
||||
approvalPolicy: ApprovalPolicy;
|
||||
}
|
||||
).approvalPolicy = newMode as ApprovalPolicy;
|
||||
}
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
@@ -753,13 +603,6 @@ export default function TerminalChat({
|
||||
{overlayMode === "help" && (
|
||||
<HelpOverlay onExit={() => setOverlayMode("none")} />
|
||||
)}
|
||||
|
||||
{overlayMode === "diff" && (
|
||||
<DiffOverlay
|
||||
diffText={diffText}
|
||||
onExit={() => setOverlayMode("none")}
|
||||
/>
|
||||
)}
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
|
||||
@@ -9,12 +9,10 @@ export interface TerminalHeaderProps {
|
||||
version: string;
|
||||
PWD: string;
|
||||
model: string;
|
||||
provider?: string;
|
||||
approvalPolicy: string;
|
||||
colorsByPolicy: Record<string, string | undefined>;
|
||||
agent?: AgentLoop;
|
||||
initialImagePaths?: Array<string>;
|
||||
flexModeEnabled?: boolean;
|
||||
}
|
||||
|
||||
const TerminalHeader: React.FC<TerminalHeaderProps> = ({
|
||||
@@ -22,21 +20,18 @@ const TerminalHeader: React.FC<TerminalHeaderProps> = ({
|
||||
version,
|
||||
PWD,
|
||||
model,
|
||||
provider = "openai",
|
||||
approvalPolicy,
|
||||
colorsByPolicy,
|
||||
agent,
|
||||
initialImagePaths,
|
||||
flexModeEnabled = false,
|
||||
}) => {
|
||||
return (
|
||||
<>
|
||||
{terminalRows < 10 ? (
|
||||
// Compact header for small terminal windows
|
||||
<Text>
|
||||
● Codex v{version} - {PWD} - {model} ({provider}) -{" "}
|
||||
● Codex v{version} – {PWD} – {model} –{" "}
|
||||
<Text color={colorsByPolicy[approvalPolicy]}>{approvalPolicy}</Text>
|
||||
{flexModeEnabled ? " - flex-mode" : ""}
|
||||
</Text>
|
||||
) : (
|
||||
<>
|
||||
@@ -67,22 +62,12 @@ const TerminalHeader: React.FC<TerminalHeaderProps> = ({
|
||||
<Text dimColor>
|
||||
<Text color="blueBright">↳</Text> model: <Text bold>{model}</Text>
|
||||
</Text>
|
||||
<Text dimColor>
|
||||
<Text color="blueBright">↳</Text> provider:{" "}
|
||||
<Text bold>{provider}</Text>
|
||||
</Text>
|
||||
<Text dimColor>
|
||||
<Text color="blueBright">↳</Text> approval:{" "}
|
||||
<Text bold color={colorsByPolicy[approvalPolicy]}>
|
||||
<Text bold color={colorsByPolicy[approvalPolicy]} dimColor>
|
||||
{approvalPolicy}
|
||||
</Text>
|
||||
</Text>
|
||||
{flexModeEnabled && (
|
||||
<Text dimColor>
|
||||
<Text color="blueBright">↳</Text> flex-mode:{" "}
|
||||
<Text bold>enabled</Text>
|
||||
</Text>
|
||||
)}
|
||||
{initialImagePaths?.map((img, idx) => (
|
||||
<Text key={img ?? idx} color="gray">
|
||||
<Text color="blueBright">↳</Text> image:{" "}
|
||||
|
||||
@@ -1,19 +1,17 @@
|
||||
import type { OverlayModeType } from "./terminal-chat.js";
|
||||
import type { TerminalHeaderProps } from "./terminal-header.js";
|
||||
import type { GroupedResponseItem } from "./use-message-grouping.js";
|
||||
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
|
||||
import type { FileOpenerScheme } from "src/utils/config.js";
|
||||
|
||||
import TerminalChatResponseItem from "./terminal-chat-response-item.js";
|
||||
import TerminalHeader from "./terminal-header.js";
|
||||
import { Box, Static } from "ink";
|
||||
import { Box, Static, Text } from "ink";
|
||||
import React, { useMemo } from "react";
|
||||
|
||||
// A batch entry can either be a standalone response item or a grouped set of
|
||||
// items (e.g. auto‑approved tool‑call batches) that should be rendered
|
||||
// together.
|
||||
type BatchEntry = { item?: ResponseItem; group?: GroupedResponseItem };
|
||||
type TerminalMessageHistoryProps = {
|
||||
type MessageHistoryProps = {
|
||||
batch: Array<BatchEntry>;
|
||||
groupCounts: Record<string, number>;
|
||||
items: Array<ResponseItem>;
|
||||
@@ -23,27 +21,25 @@ type TerminalMessageHistoryProps = {
|
||||
thinkingSeconds: number;
|
||||
headerProps: TerminalHeaderProps;
|
||||
fullStdout: boolean;
|
||||
setOverlayMode: React.Dispatch<React.SetStateAction<OverlayModeType>>;
|
||||
fileOpener: FileOpenerScheme | undefined;
|
||||
};
|
||||
|
||||
const TerminalMessageHistory: React.FC<TerminalMessageHistoryProps> = ({
|
||||
const MessageHistory: React.FC<MessageHistoryProps> = ({
|
||||
batch,
|
||||
headerProps,
|
||||
// `loading` and `thinkingSeconds` handled by input component now.
|
||||
loading: _loading,
|
||||
thinkingSeconds: _thinkingSeconds,
|
||||
loading,
|
||||
thinkingSeconds,
|
||||
fullStdout,
|
||||
setOverlayMode,
|
||||
fileOpener,
|
||||
}) => {
|
||||
// Flatten batch entries to response items.
|
||||
const messages = useMemo(() => batch.map(({ item }) => item!), [batch]);
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
{/* The dedicated thinking indicator in the input area now displays the
|
||||
elapsed time, so we no longer render a separate counter here. */}
|
||||
{loading && (
|
||||
<Box marginTop={1}>
|
||||
<Text color="yellow">{`thinking for ${thinkingSeconds}s`}</Text>
|
||||
</Box>
|
||||
)}
|
||||
<Static items={["header", ...messages]}>
|
||||
{(item, index) => {
|
||||
if (item === "header") {
|
||||
@@ -62,25 +58,15 @@ const TerminalMessageHistory: React.FC<TerminalMessageHistoryProps> = ({
|
||||
key={`${message.id}-${index}`}
|
||||
flexDirection="column"
|
||||
marginLeft={
|
||||
message.type === "message" &&
|
||||
(message.role === "user" || message.role === "assistant")
|
||||
? 0
|
||||
: 4
|
||||
message.type === "message" && message.role === "user" ? 0 : 4
|
||||
}
|
||||
marginTop={
|
||||
message.type === "message" && message.role === "user" ? 0 : 1
|
||||
}
|
||||
marginBottom={
|
||||
message.type === "message" && message.role === "assistant"
|
||||
? 1
|
||||
: 0
|
||||
}
|
||||
>
|
||||
<TerminalChatResponseItem
|
||||
item={message}
|
||||
fullStdout={fullStdout}
|
||||
setOverlayMode={setOverlayMode}
|
||||
fileOpener={fileOpener}
|
||||
/>
|
||||
</Box>
|
||||
);
|
||||
@@ -90,4 +76,4 @@ const TerminalMessageHistory: React.FC<TerminalMessageHistoryProps> = ({
|
||||
);
|
||||
};
|
||||
|
||||
export default React.memo(TerminalMessageHistory);
|
||||
export default React.memo(MessageHistory);
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
import { Box, Text, useInput } from "ink";
|
||||
import React, { useState } from "react";
|
||||
|
||||
/**
|
||||
* Simple scrollable view for displaying a diff.
|
||||
* The component is intentionally lightweight and mirrors the UX of
|
||||
* HistoryOverlay: Up/Down or j/k to scroll, PgUp/PgDn for paging and Esc to
|
||||
* close. The caller is responsible for computing the diff text.
|
||||
*/
|
||||
export default function DiffOverlay({
|
||||
diffText,
|
||||
onExit,
|
||||
}: {
|
||||
diffText: string;
|
||||
onExit: () => void;
|
||||
}): JSX.Element {
|
||||
const lines = diffText.length > 0 ? diffText.split("\n") : ["(no changes)"];
|
||||
|
||||
const [cursor, setCursor] = useState(0);
|
||||
|
||||
// Determine how many rows we can display – similar to HistoryOverlay.
|
||||
const rows = process.stdout.rows || 24;
|
||||
const headerRows = 2;
|
||||
const footerRows = 1;
|
||||
const maxVisible = Math.max(4, rows - headerRows - footerRows);
|
||||
|
||||
useInput((input, key) => {
|
||||
if (key.escape || input === "q") {
|
||||
onExit();
|
||||
return;
|
||||
}
|
||||
|
||||
if (key.downArrow || input === "j") {
|
||||
setCursor((c) => Math.min(lines.length - 1, c + 1));
|
||||
} else if (key.upArrow || input === "k") {
|
||||
setCursor((c) => Math.max(0, c - 1));
|
||||
} else if (key.pageDown) {
|
||||
setCursor((c) => Math.min(lines.length - 1, c + maxVisible));
|
||||
} else if (key.pageUp) {
|
||||
setCursor((c) => Math.max(0, c - maxVisible));
|
||||
} else if (input === "g") {
|
||||
setCursor(0);
|
||||
} else if (input === "G") {
|
||||
setCursor(lines.length - 1);
|
||||
}
|
||||
});
|
||||
|
||||
const firstVisible = Math.min(
|
||||
Math.max(0, cursor - Math.floor(maxVisible / 2)),
|
||||
Math.max(0, lines.length - maxVisible),
|
||||
);
|
||||
const visible = lines.slice(firstVisible, firstVisible + maxVisible);
|
||||
|
||||
// Very small helper to colorize diff lines in a basic way.
|
||||
function renderLine(line: string, idx: number): JSX.Element {
|
||||
let color: "green" | "red" | "cyan" | undefined = undefined;
|
||||
if (line.startsWith("+")) {
|
||||
color = "green";
|
||||
} else if (line.startsWith("-")) {
|
||||
color = "red";
|
||||
} else if (line.startsWith("@@") || line.startsWith("diff --git")) {
|
||||
color = "cyan";
|
||||
}
|
||||
return (
|
||||
<Text key={idx} color={color} wrap="truncate-end">
|
||||
{line === "" ? " " : line}
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Box
|
||||
flexDirection="column"
|
||||
borderStyle="round"
|
||||
borderColor="gray"
|
||||
width={Math.min(120, process.stdout.columns || 120)}
|
||||
>
|
||||
<Box paddingX={1}>
|
||||
<Text bold>Working tree diff ({lines.length} lines)</Text>
|
||||
</Box>
|
||||
|
||||
<Box flexDirection="column" paddingX={1}>
|
||||
{visible.map((line, idx) => {
|
||||
return renderLine(line, firstVisible + idx);
|
||||
})}
|
||||
</Box>
|
||||
|
||||
<Box paddingX={1}>
|
||||
<Text dimColor>esc Close ↑↓ Scroll PgUp/PgDn g/G First/Last</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -52,13 +52,6 @@ export default function HelpOverlay({
|
||||
<Text>
|
||||
<Text color="cyan">/clearhistory</Text> – clear command history
|
||||
</Text>
|
||||
<Text>
|
||||
<Text color="cyan">/bug</Text> – generate a prefilled GitHub issue URL
|
||||
with session log
|
||||
</Text>
|
||||
<Text>
|
||||
<Text color="cyan">/diff</Text> – view working tree git diff
|
||||
</Text>
|
||||
<Text>
|
||||
<Text color="cyan">/compact</Text> – condense context into a summary
|
||||
</Text>
|
||||
|
||||
@@ -14,10 +14,7 @@ export default function HistoryOverlay({ items, onExit }: Props): JSX.Element {
|
||||
const [mode, setMode] = useState<Mode>("commands");
|
||||
const [cursor, setCursor] = useState(0);
|
||||
|
||||
const { commands, files } = useMemo(
|
||||
() => formatHistoryForDisplay(items),
|
||||
[items],
|
||||
);
|
||||
const { commands, files } = useMemo(() => buildLists(items), [items]);
|
||||
|
||||
const list = mode === "commands" ? commands : files;
|
||||
|
||||
@@ -98,7 +95,7 @@ export default function HistoryOverlay({ items, onExit }: Props): JSX.Element {
|
||||
);
|
||||
}
|
||||
|
||||
function formatHistoryForDisplay(items: Array<ResponseItem>): {
|
||||
function buildLists(items: Array<ResponseItem>): {
|
||||
commands: Array<string>;
|
||||
files: Array<string>;
|
||||
} {
|
||||
@@ -106,9 +103,33 @@ function formatHistoryForDisplay(items: Array<ResponseItem>): {
|
||||
const filesSet = new Set<string>();
|
||||
|
||||
for (const item of items) {
|
||||
const userPrompt = processUserMessage(item);
|
||||
if (userPrompt) {
|
||||
commands.push(userPrompt);
|
||||
if (
|
||||
item.type === "message" &&
|
||||
(item as unknown as { role?: string }).role === "user"
|
||||
) {
|
||||
// TODO: We're ignoring images/files here.
|
||||
const parts =
|
||||
(item as unknown as { content?: Array<unknown> }).content ?? [];
|
||||
const texts: Array<string> = [];
|
||||
if (Array.isArray(parts)) {
|
||||
for (const part of parts) {
|
||||
if (part && typeof part === "object" && "text" in part) {
|
||||
const t = (part as unknown as { text?: string }).text;
|
||||
if (typeof t === "string" && t.length > 0) {
|
||||
texts.push(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (texts.length > 0) {
|
||||
const fullPrompt = texts.join(" ");
|
||||
// Truncate very long prompts so the history view stays legible.
|
||||
const truncated =
|
||||
fullPrompt.length > 120 ? `${fullPrompt.slice(0, 117)}…` : fullPrompt;
|
||||
commands.push(`> ${truncated}`);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -148,11 +169,35 @@ function formatHistoryForDisplay(items: Array<ResponseItem>): {
|
||||
const cmdArray: Array<string> | undefined = Array.isArray(argsObj?.["cmd"])
|
||||
? (argsObj!["cmd"] as Array<string>)
|
||||
: Array.isArray(argsObj?.["command"])
|
||||
? (argsObj!["command"] as Array<string>)
|
||||
: undefined;
|
||||
? (argsObj!["command"] as Array<string>)
|
||||
: undefined;
|
||||
|
||||
if (cmdArray && cmdArray.length > 0) {
|
||||
commands.push(processCommandArray(cmdArray, filesSet));
|
||||
commands.push(cmdArray.join(" "));
|
||||
|
||||
// Heuristic for file paths in command args
|
||||
for (const part of cmdArray) {
|
||||
if (!part.startsWith("-") && part.includes("/")) {
|
||||
filesSet.add(part);
|
||||
}
|
||||
}
|
||||
|
||||
// Special‑case apply_patch so we can extract the list of modified files
|
||||
if (cmdArray[0] === "apply_patch" || cmdArray.includes("apply_patch")) {
|
||||
const patchTextMaybe = cmdArray.find((s) =>
|
||||
s.includes("*** Begin Patch"),
|
||||
);
|
||||
if (typeof patchTextMaybe === "string") {
|
||||
const lines = patchTextMaybe.split("\n");
|
||||
for (const line of lines) {
|
||||
const m = line.match(/^[-+]{3} [ab]\/(.+)$/);
|
||||
if (m && m[1]) {
|
||||
filesSet.add(m[1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
continue; // We processed this as a command; no need to treat as generic tool call.
|
||||
}
|
||||
|
||||
@@ -160,96 +205,33 @@ function formatHistoryForDisplay(items: Array<ResponseItem>): {
|
||||
// short argument representation to give users an idea of what
|
||||
// happened.
|
||||
if (typeof toolName === "string" && toolName.length > 0) {
|
||||
commands.push(processNonExecTool(toolName, argsJson, filesSet));
|
||||
let summary = toolName;
|
||||
|
||||
if (argsJson && typeof argsJson === "object") {
|
||||
// Extract a few common argument keys to make the summary more useful
|
||||
// without being overly verbose.
|
||||
const interestingKeys = [
|
||||
"path",
|
||||
"file",
|
||||
"filepath",
|
||||
"filename",
|
||||
"pattern",
|
||||
];
|
||||
for (const key of interestingKeys) {
|
||||
const val = (argsJson as Record<string, unknown>)[key];
|
||||
if (typeof val === "string") {
|
||||
summary += ` ${val}`;
|
||||
if (val.includes("/")) {
|
||||
filesSet.add(val);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
commands.push(summary);
|
||||
}
|
||||
}
|
||||
|
||||
return { commands, files: Array.from(filesSet) };
|
||||
}
|
||||
|
||||
function processUserMessage(item: ResponseItem): string | null {
|
||||
if (
|
||||
item.type === "message" &&
|
||||
(item as unknown as { role?: string }).role === "user"
|
||||
) {
|
||||
// TODO: We're ignoring images/files here.
|
||||
const parts =
|
||||
(item as unknown as { content?: Array<unknown> }).content ?? [];
|
||||
const texts: Array<string> = [];
|
||||
if (Array.isArray(parts)) {
|
||||
for (const part of parts) {
|
||||
if (part && typeof part === "object" && "text" in part) {
|
||||
const t = (part as unknown as { text?: string }).text;
|
||||
if (typeof t === "string" && t.length > 0) {
|
||||
texts.push(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (texts.length > 0) {
|
||||
const fullPrompt = texts.join(" ");
|
||||
// Truncate very long prompts so the history view stays legible.
|
||||
return fullPrompt.length > 120
|
||||
? `> ${fullPrompt.slice(0, 117)}…`
|
||||
: `> ${fullPrompt}`;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function processCommandArray(
|
||||
cmdArray: Array<string>,
|
||||
filesSet: Set<string>,
|
||||
): string {
|
||||
const cmd = cmdArray.join(" ");
|
||||
|
||||
// Heuristic for file paths in command args
|
||||
for (const part of cmdArray) {
|
||||
if (!part.startsWith("-") && part.includes("/")) {
|
||||
filesSet.add(part);
|
||||
}
|
||||
}
|
||||
|
||||
// Special‑case apply_patch so we can extract the list of modified files
|
||||
if (cmdArray[0] === "apply_patch" || cmdArray.includes("apply_patch")) {
|
||||
const patchTextMaybe = cmdArray.find((s) => s.includes("*** Begin Patch"));
|
||||
if (typeof patchTextMaybe === "string") {
|
||||
const lines = patchTextMaybe.split("\n");
|
||||
for (const line of lines) {
|
||||
const m = line.match(/^[-+]{3} [ab]\/(.+)$/);
|
||||
if (m && m[1]) {
|
||||
filesSet.add(m[1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cmd;
|
||||
}
|
||||
|
||||
function processNonExecTool(
|
||||
toolName: string,
|
||||
argsJson: unknown,
|
||||
filesSet: Set<string>,
|
||||
): string {
|
||||
let summary = toolName;
|
||||
|
||||
if (argsJson && typeof argsJson === "object") {
|
||||
// Extract a few common argument keys to make the summary more useful
|
||||
// without being overly verbose.
|
||||
const interestingKeys = ["path", "file", "filepath", "filename", "pattern"];
|
||||
for (const key of interestingKeys) {
|
||||
const val = (argsJson as Record<string, unknown>)[key];
|
||||
if (typeof val === "string") {
|
||||
summary += ` ${val}`;
|
||||
if (val.includes("/")) {
|
||||
filesSet.add(val);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return summary;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import TypeaheadOverlay from "./typeahead-overlay.js";
|
||||
import {
|
||||
getAvailableModels,
|
||||
RECOMMENDED_MODELS as _RECOMMENDED_MODELS,
|
||||
RECOMMENDED_MODELS,
|
||||
} from "../utils/model-utils.js";
|
||||
import { Box, Text, useInput } from "ink";
|
||||
import React, { useEffect, useState } from "react";
|
||||
@@ -16,53 +16,39 @@ import React, { useEffect, useState } from "react";
|
||||
*/
|
||||
type Props = {
|
||||
currentModel: string;
|
||||
currentProvider?: string;
|
||||
hasLastResponse: boolean;
|
||||
providers?: Record<string, { name: string; baseURL: string; envKey: string }>;
|
||||
onSelect: (allModels: Array<string>, model: string) => void;
|
||||
onSelectProvider?: (provider: string) => void;
|
||||
onSelect: (model: string) => void;
|
||||
onExit: () => void;
|
||||
};
|
||||
|
||||
export default function ModelOverlay({
|
||||
currentModel,
|
||||
providers = {},
|
||||
currentProvider = "openai",
|
||||
hasLastResponse,
|
||||
onSelect,
|
||||
onSelectProvider,
|
||||
onExit,
|
||||
}: Props): JSX.Element {
|
||||
const [items, setItems] = useState<Array<{ label: string; value: string }>>(
|
||||
[],
|
||||
);
|
||||
const [providerItems, _setProviderItems] = useState<
|
||||
Array<{ label: string; value: string }>
|
||||
>(Object.values(providers).map((p) => ({ label: p.name, value: p.name })));
|
||||
const [mode, setMode] = useState<"model" | "provider">("model");
|
||||
const [isLoading, setIsLoading] = useState<boolean>(true);
|
||||
|
||||
// This effect will run when the provider changes to update the model list
|
||||
useEffect(() => {
|
||||
setIsLoading(true);
|
||||
(async () => {
|
||||
try {
|
||||
const models = await getAvailableModels(currentProvider);
|
||||
// Convert the models to the format needed by TypeaheadOverlay
|
||||
setItems(
|
||||
models.map((m) => ({
|
||||
label: m,
|
||||
value: m,
|
||||
})),
|
||||
);
|
||||
} catch (error) {
|
||||
// Silently handle errors - remove console.error
|
||||
// console.error("Error loading models:", error);
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
const models = await getAvailableModels();
|
||||
|
||||
// Split the list into recommended and “other” models.
|
||||
const recommended = RECOMMENDED_MODELS.filter((m) => models.includes(m));
|
||||
const others = models.filter((m) => !recommended.includes(m));
|
||||
|
||||
const ordered = [...recommended, ...others.sort()];
|
||||
|
||||
setItems(
|
||||
ordered.map((m) => ({
|
||||
label: recommended.includes(m) ? `⭐ ${m}` : m,
|
||||
value: m,
|
||||
})),
|
||||
);
|
||||
})();
|
||||
}, [currentProvider]);
|
||||
}, []);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// If the conversation already contains a response we cannot change the model
|
||||
@@ -72,14 +58,10 @@ export default function ModelOverlay({
|
||||
// available action is to dismiss the overlay (Esc or Enter).
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Register input handling for switching between model and provider selection
|
||||
// Always register input handling so hooks are called consistently.
|
||||
useInput((_input, key) => {
|
||||
if (hasLastResponse && (key.escape || key.return)) {
|
||||
onExit();
|
||||
} else if (!hasLastResponse) {
|
||||
if (key.tab) {
|
||||
setMode(mode === "model" ? "provider" : "model");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -109,56 +91,17 @@ export default function ModelOverlay({
|
||||
);
|
||||
}
|
||||
|
||||
if (mode === "provider") {
|
||||
return (
|
||||
<TypeaheadOverlay
|
||||
title="Select provider"
|
||||
description={
|
||||
<Box flexDirection="column">
|
||||
<Text>
|
||||
Current provider:{" "}
|
||||
<Text color="greenBright">{currentProvider}</Text>
|
||||
</Text>
|
||||
<Text dimColor>press tab to switch to model selection</Text>
|
||||
</Box>
|
||||
}
|
||||
initialItems={providerItems}
|
||||
currentValue={currentProvider}
|
||||
onSelect={(provider) => {
|
||||
if (onSelectProvider) {
|
||||
onSelectProvider(provider);
|
||||
// Immediately switch to model selection so user can pick a model for the new provider
|
||||
setMode("model");
|
||||
}
|
||||
}}
|
||||
onExit={onExit}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<TypeaheadOverlay
|
||||
title="Select model"
|
||||
title="Switch model"
|
||||
description={
|
||||
<Box flexDirection="column">
|
||||
<Text>
|
||||
Current model: <Text color="greenBright">{currentModel}</Text>
|
||||
</Text>
|
||||
<Text>
|
||||
Current provider: <Text color="greenBright">{currentProvider}</Text>
|
||||
</Text>
|
||||
{isLoading && <Text color="yellow">Loading models...</Text>}
|
||||
<Text dimColor>press tab to switch to provider selection</Text>
|
||||
</Box>
|
||||
<Text>
|
||||
Current model: <Text color="greenBright">{currentModel}</Text>
|
||||
</Text>
|
||||
}
|
||||
initialItems={items}
|
||||
currentValue={currentModel}
|
||||
onSelect={(selectedModel) =>
|
||||
onSelect(
|
||||
items?.map((m) => m.value),
|
||||
selectedModel,
|
||||
)
|
||||
}
|
||||
onSelect={onSelect}
|
||||
onExit={onExit}
|
||||
/>
|
||||
);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import Indicator, { type Props as IndicatorProps } from "./indicator.js";
|
||||
import ItemComponent, { type Props as ItemProps } from "./item.js";
|
||||
import Indicator, { type Props as IndicatorProps } from "./Indicator.js";
|
||||
import ItemComponent, { type Props as ItemProps } from "./Item.js";
|
||||
import isEqual from "fast-deep-equal";
|
||||
import { Box, useInput } from "ink";
|
||||
import React, {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user