Compare commits

..

3 Commits

Author SHA1 Message Date
lukeqin-oai
ee193c9ec1 Remove intentionally failing CI test 2025-12-17 23:17:53 -08:00
Alexander Embiricos
012bf21679 chore: intentionally fail tests 2025-12-17 22:45:24 -08:00
Alexander Embiricos
55edfc386a temp 2025-12-17 22:32:35 -08:00
2312 changed files with 87180 additions and 210233 deletions

View File

@@ -1,4 +0,0 @@
# Without this, Bazel will consider BUILD.bazel files in
# .git/sl/origbackups (which can be populated by Sapling SCM).
.git
codex-rs/target

View File

@@ -1,51 +0,0 @@
common --repo_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
common --repo_env=BAZEL_NO_APPLE_CPP_TOOLCHAIN=1
# Dummy xcode config so we don't need to build xcode_locator in repo rule.
common --xcode_version_config=//:disable_xcode
common --disk_cache=~/.cache/bazel-disk-cache
common --repo_contents_cache=~/.cache/bazel-repo-contents-cache
common --repository_cache=~/.cache/bazel-repo-cache
common --remote_cache_compression
startup --experimental_remote_repo_contents_cache
common --experimental_platform_in_output_dir
# Runfiles strategy rationale: codex-rs/utils/cargo-bin/README.md
common --noenable_runfiles
common --enable_platform_specific_config
# TODO(zbarsky): We need to untangle these libc constraints to get linux remote builds working.
common:linux --host_platform=//:local
common --@rules_cc//cc/toolchains/args/archiver_flags:use_libtool_on_macos=False
common --@toolchains_llvm_bootstrapped//config:experimental_stub_libgcc_s
# We need to use the sh toolchain on windows so we don't send host bash paths to the linux executor.
common:windows --@rules_rust//rust/settings:experimental_use_sh_toolchain_for_bootstrap_process_wrapper
# TODO(zbarsky): rules_rust doesn't implement this flag properly with remote exec...
# common --@rules_rust//rust/settings:pipelined_compilation
common --incompatible_strict_action_env
# Not ideal, but We need to allow dotslash to be found
common --test_env=PATH=/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
common --test_output=errors
common --bes_results_url=https://app.buildbuddy.io/invocation/
common --bes_backend=grpcs://remote.buildbuddy.io
common --remote_cache=grpcs://remote.buildbuddy.io
common --remote_download_toplevel
common --nobuild_runfile_links
common --remote_timeout=3600
common --noexperimental_throttle_remote_action_building
common --experimental_remote_execution_keepalive
common --grpc_keepalive_time=30s
# This limits both in-flight executions and concurrent downloads. Even with high number
# of jobs execution will still be limited by CPU cores, so this just pays a bit of
# memory in exchange for higher download concurrency.
common --jobs=30
common:remote --extra_execution_platforms=//:rbe
common:remote --remote_executor=grpcs://remote.buildbuddy.io
common:remote --jobs=800

View File

@@ -1 +0,0 @@
9.0.0

View File

@@ -1,3 +1,2 @@
iTerm
iTerm2
psuedo

View File

@@ -1,6 +1,6 @@
[codespell]
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt
check-hidden = true
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm
ignore-words-list = ratatui,ser

View File

@@ -40,18 +40,11 @@ body:
description: |
For MacOS and Linux: copy the output of `uname -mprs`
For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console
- type: input
id: terminal
attributes:
label: What terminal emulator and version are you using (if applicable)?
description: Also note any multiplexer in use (screen / tmux / zellij)
description: |
E.g, VSCode, Terminal.app, iTerm2, Ghostty, Windows Terminal (WSL / PowerShell)
- type: textarea
id: actual
attributes:
label: What issue are you seeing?
description: Please include the full error messages and prompts with PII redacted. If possible, please provide text instead of a screenshot.
description: Please include the full error messages and prompts with PII redacted. If possible, please provide text instead of a screenshot.
validations:
required: true
- type: textarea

View File

@@ -4,14 +4,6 @@ inputs:
target:
description: Rust compilation target triple (e.g. aarch64-apple-darwin).
required: true
sign-binaries:
description: Whether to sign and notarize the macOS binaries.
required: false
default: "true"
sign-dmg:
description: Whether to sign and notarize the macOS dmg.
required: false
default: "true"
apple-certificate:
description: Base64-encoded Apple signing certificate (P12).
required: true
@@ -115,7 +107,6 @@ runs:
echo "::add-mask::$APPLE_CODESIGN_IDENTITY"
- name: Sign macOS binaries
if: ${{ inputs.sign-binaries == 'true' }}
shell: bash
run: |
set -euo pipefail
@@ -136,7 +127,6 @@ runs:
done
- name: Notarize macOS binaries
if: ${{ inputs.sign-binaries == 'true' }}
shell: bash
env:
APPLE_NOTARIZATION_KEY_P8: ${{ inputs.apple-notarization-key-p8 }}
@@ -159,8 +149,6 @@ runs:
}
trap cleanup_notary EXIT
source "$GITHUB_ACTION_PATH/notary_helpers.sh"
notarize_binary() {
local binary="$1"
local source_path="codex-rs/target/${{ inputs.target }}/release/${binary}"
@@ -174,54 +162,32 @@ runs:
rm -f "$archive_path"
ditto -c -k --keepParent "$source_path" "$archive_path"
notarize_submission "$binary" "$archive_path" "$notary_key_path"
submission_json=$(xcrun notarytool submit "$archive_path" \
--key "$notary_key_path" \
--key-id "$APPLE_NOTARIZATION_KEY_ID" \
--issuer "$APPLE_NOTARIZATION_ISSUER_ID" \
--output-format json \
--wait)
status=$(printf '%s\n' "$submission_json" | jq -r '.status // "Unknown"')
submission_id=$(printf '%s\n' "$submission_json" | jq -r '.id // ""')
if [[ -z "$submission_id" ]]; then
echo "Failed to retrieve submission ID for $binary"
exit 1
fi
echo "::notice title=Notarization::$binary submission ${submission_id} completed with status ${status}"
if [[ "$status" != "Accepted" ]]; then
echo "Notarization failed for ${binary} (submission ${submission_id}, status ${status})"
exit 1
fi
}
notarize_binary "codex"
notarize_binary "codex-responses-api-proxy"
- name: Sign and notarize macOS dmg
if: ${{ inputs.sign-dmg == 'true' }}
shell: bash
env:
APPLE_NOTARIZATION_KEY_P8: ${{ inputs.apple-notarization-key-p8 }}
APPLE_NOTARIZATION_KEY_ID: ${{ inputs.apple-notarization-key-id }}
APPLE_NOTARIZATION_ISSUER_ID: ${{ inputs.apple-notarization-issuer-id }}
run: |
set -euo pipefail
for var in APPLE_CODESIGN_IDENTITY APPLE_NOTARIZATION_KEY_P8 APPLE_NOTARIZATION_KEY_ID APPLE_NOTARIZATION_ISSUER_ID; do
if [[ -z "${!var:-}" ]]; then
echo "$var is required"
exit 1
fi
done
notary_key_path="${RUNNER_TEMP}/notarytool.key.p8"
echo "$APPLE_NOTARIZATION_KEY_P8" | base64 -d > "$notary_key_path"
cleanup_notary() {
rm -f "$notary_key_path"
}
trap cleanup_notary EXIT
source "$GITHUB_ACTION_PATH/notary_helpers.sh"
dmg_path="codex-rs/target/${{ inputs.target }}/release/codex-${{ inputs.target }}.dmg"
if [[ ! -f "$dmg_path" ]]; then
echo "dmg $dmg_path not found"
exit 1
fi
keychain_args=()
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" && -f "${APPLE_CODESIGN_KEYCHAIN}" ]]; then
keychain_args+=(--keychain "${APPLE_CODESIGN_KEYCHAIN}")
fi
codesign --force --timestamp --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$dmg_path"
notarize_submission "codex-${{ inputs.target }}.dmg" "$dmg_path" "$notary_key_path"
xcrun stapler staple "$dmg_path"
- name: Remove signing keychain
if: ${{ always() }}
shell: bash

View File

@@ -1,46 +0,0 @@
#!/usr/bin/env bash
notarize_submission() {
local label="$1"
local path="$2"
local notary_key_path="$3"
if [[ -z "${APPLE_NOTARIZATION_KEY_ID:-}" || -z "${APPLE_NOTARIZATION_ISSUER_ID:-}" ]]; then
echo "APPLE_NOTARIZATION_KEY_ID and APPLE_NOTARIZATION_ISSUER_ID are required for notarization"
exit 1
fi
if [[ -z "$notary_key_path" || ! -f "$notary_key_path" ]]; then
echo "Notary key file $notary_key_path not found"
exit 1
fi
if [[ ! -f "$path" ]]; then
echo "Notarization payload $path not found"
exit 1
fi
local submission_json
submission_json=$(xcrun notarytool submit "$path" \
--key "$notary_key_path" \
--key-id "$APPLE_NOTARIZATION_KEY_ID" \
--issuer "$APPLE_NOTARIZATION_ISSUER_ID" \
--output-format json \
--wait)
local status submission_id
status=$(printf '%s\n' "$submission_json" | jq -r '.status // "Unknown"')
submission_id=$(printf '%s\n' "$submission_json" | jq -r '.id // ""')
if [[ -z "$submission_id" ]]; then
echo "Failed to retrieve submission ID for $label"
exit 1
fi
echo "::notice title=Notarization::$label submission ${submission_id} completed with status ${status}"
if [[ "$status" != "Accepted" ]]; then
echo "Notarization failed for ${label} (submission ${submission_id}, status ${status})"
exit 1
fi
}

BIN
.github/codex-cli-login.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 MiB

BIN
.github/codex-cli-permissions.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 408 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 818 KiB

After

Width:  |  Height:  |  Size: 3.1 MiB

View File

@@ -15,10 +15,10 @@ Things to look out for when doing the review:
## Code Organization
- Each crate in the Cargo workspace in `codex-rs` has a specific purpose: make a note if you believe new code is not introduced in the correct crate.
- Each create in the Cargo workspace in `codex-rs` has a specific purpose: make a note if you believe new code is not introduced in the correct crate.
- When possible, try to keep the `core` crate as small as possible. Non-core but shared logic is often a good candidate for `codex-rs/common`.
- Be wary of large files and offer suggestions for how to break things into more reasonably-sized files.
- Rust files should generally be organized such that the public parts of the API appear near the top of the file and helper functions go below. This is analogous to the "inverted pyramid" structure that is favored in journalism.
- Rust files should generally be organized such that the public parts of the API appear near the top of the file and helper functions go below. This is analagous to the "inverted pyramid" structure that is favored in journalism.
## Assertions in Tests

BIN
.github/demo.gif vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 MiB

View File

@@ -1,163 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
: "${TARGET:?TARGET environment variable is required}"
: "${GITHUB_ENV:?GITHUB_ENV environment variable is required}"
apt_update_args=()
if [[ -n "${APT_UPDATE_ARGS:-}" ]]; then
# shellcheck disable=SC2206
apt_update_args=(${APT_UPDATE_ARGS})
fi
apt_install_args=()
if [[ -n "${APT_INSTALL_ARGS:-}" ]]; then
# shellcheck disable=SC2206
apt_install_args=(${APT_INSTALL_ARGS})
fi
sudo apt-get update "${apt_update_args[@]}"
sudo apt-get install -y "${apt_install_args[@]}" musl-tools pkg-config g++ clang libc++-dev libc++abi-dev lld
case "${TARGET}" in
x86_64-unknown-linux-musl)
arch="x86_64"
;;
aarch64-unknown-linux-musl)
arch="aarch64"
;;
*)
echo "Unexpected musl target: ${TARGET}" >&2
exit 1
;;
esac
# Use the musl toolchain as the Rust linker to avoid Zig injecting its own CRT.
if command -v "${arch}-linux-musl-gcc" >/dev/null; then
musl_linker="$(command -v "${arch}-linux-musl-gcc")"
elif command -v musl-gcc >/dev/null; then
musl_linker="$(command -v musl-gcc)"
else
echo "musl gcc not found after install; arch=${arch}" >&2
exit 1
fi
zig_target="${TARGET/-unknown-linux-musl/-linux-musl}"
runner_temp="${RUNNER_TEMP:-/tmp}"
tool_root="${runner_temp}/codex-musl-tools-${TARGET}"
mkdir -p "${tool_root}"
sysroot=""
if command -v zig >/dev/null; then
zig_bin="$(command -v zig)"
cc="${tool_root}/zigcc"
cxx="${tool_root}/zigcxx"
cat >"${cc}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
args=()
skip_next=0
for arg in "\$@"; do
if [[ "\${skip_next}" -eq 1 ]]; then
skip_next=0
continue
fi
case "\${arg}" in
--target)
skip_next=1
continue
;;
--target=*|-target=*|-target)
# Drop any explicit --target/-target flags. Zig expects -target and
# rejects Rust triples like *-unknown-linux-musl.
if [[ "\${arg}" == "-target" ]]; then
skip_next=1
fi
continue
;;
esac
args+=("\${arg}")
done
exec "${zig_bin}" cc -target "${zig_target}" "\${args[@]}"
EOF
cat >"${cxx}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
args=()
skip_next=0
for arg in "\$@"; do
if [[ "\${skip_next}" -eq 1 ]]; then
skip_next=0
continue
fi
case "\${arg}" in
--target)
skip_next=1
continue
;;
--target=*|-target=*|-target)
if [[ "\${arg}" == "-target" ]]; then
skip_next=1
fi
continue
;;
esac
args+=("\${arg}")
done
exec "${zig_bin}" c++ -target "${zig_target}" "\${args[@]}"
EOF
chmod +x "${cc}" "${cxx}"
sysroot="$("${zig_bin}" cc -target "${zig_target}" -print-sysroot 2>/dev/null || true)"
else
cc="${musl_linker}"
if command -v "${arch}-linux-musl-g++" >/dev/null; then
cxx="$(command -v "${arch}-linux-musl-g++")"
elif command -v musl-g++ >/dev/null; then
cxx="$(command -v musl-g++)"
else
cxx="${cc}"
fi
fi
if [[ -n "${sysroot}" && "${sysroot}" != "/" ]]; then
echo "BORING_BSSL_SYSROOT=${sysroot}" >> "$GITHUB_ENV"
boring_sysroot_var="BORING_BSSL_SYSROOT_${TARGET}"
boring_sysroot_var="${boring_sysroot_var//-/_}"
echo "${boring_sysroot_var}=${sysroot}" >> "$GITHUB_ENV"
fi
cflags="-pthread"
cxxflags="-pthread"
if [[ "${TARGET}" == "aarch64-unknown-linux-musl" ]]; then
# BoringSSL enables -Wframe-larger-than=25344 under clang and treats warnings as errors.
cflags="${cflags} -Wno-error=frame-larger-than"
cxxflags="${cxxflags} -Wno-error=frame-larger-than"
fi
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
echo "CC=${cc}" >> "$GITHUB_ENV"
echo "TARGET_CC=${cc}" >> "$GITHUB_ENV"
target_cc_var="CC_${TARGET}"
target_cc_var="${target_cc_var//-/_}"
echo "${target_cc_var}=${cc}" >> "$GITHUB_ENV"
echo "CXX=${cxx}" >> "$GITHUB_ENV"
echo "TARGET_CXX=${cxx}" >> "$GITHUB_ENV"
target_cxx_var="CXX_${TARGET}"
target_cxx_var="${target_cxx_var//-/_}"
echo "${target_cxx_var}=${cxx}" >> "$GITHUB_ENV"
cargo_linker_var="CARGO_TARGET_${TARGET^^}_LINKER"
cargo_linker_var="${cargo_linker_var//-/_}"
echo "${cargo_linker_var}=${musl_linker}" >> "$GITHUB_ENV"
echo "CMAKE_C_COMPILER=${cc}" >> "$GITHUB_ENV"
echo "CMAKE_CXX_COMPILER=${cxx}" >> "$GITHUB_ENV"
echo "CMAKE_ARGS=-DCMAKE_HAVE_THREADS_LIBRARY=1 -DCMAKE_USE_PTHREADS_INIT=1 -DCMAKE_THREAD_LIBS_INIT=-pthread -DTHREADS_PREFER_PTHREAD_FLAG=ON" >> "$GITHUB_ENV"

View File

@@ -1,20 +0,0 @@
FROM ubuntu:24.04
# TODO(mbolin): Published to docker.io/mbolin491/codex-bazel:latest for
# initial debugging, but we should publish to a more proper location.
#
# docker buildx create --use
# docker buildx build --platform linux/amd64,linux/arm64 -f .github/workflows/Dockerfile.bazel -t mbolin491/codex-bazel:latest --push .
RUN apt-get update && \
apt-get install -y --no-install-recommends \
curl git python3 ca-certificates && \
rm -rf /var/lib/apt/lists/*
# Install dotslash.
RUN curl -LSfs "https://github.com/facebook/dotslash/releases/download/v0.5.8/dotslash-ubuntu-22.04.$(uname -m).tar.gz" | tar fxz - -C /usr/local/bin
# Ubuntu 24.04 ships with user 'ubuntu' already created with UID 1000.
USER ubuntu
WORKDIR /workspace

View File

@@ -1,110 +0,0 @@
name: Bazel (experimental)
# Note this workflow was originally derived from:
# https://github.com/cerisier/toolchains_llvm_bootstrapped/blob/main/.github/workflows/ci.yaml
on:
pull_request: {}
push:
branches:
- main
workflow_dispatch:
concurrency:
# Cancel previous actions from the same PR or branch except 'main' branch.
# See https://docs.github.com/en/actions/using-jobs/using-concurrency and https://docs.github.com/en/actions/learn-github-actions/contexts for more info.
group: concurrency-group::${{ github.workflow }}::${{ github.event.pull_request.number > 0 && format('pr-{0}', github.event.pull_request.number) || github.ref_name }}${{ github.ref_name == 'main' && format('::{0}', github.run_id) || ''}}
cancel-in-progress: ${{ github.ref_name != 'main' }}
jobs:
test:
strategy:
fail-fast: false
matrix:
include:
# macOS
- os: macos-15-xlarge
target: aarch64-apple-darwin
- os: macos-15-xlarge
target: x86_64-apple-darwin
# Linux
- os: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
- os: ubuntu-24.04
target: x86_64-unknown-linux-gnu
- os: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
- os: ubuntu-24.04
target: x86_64-unknown-linux-musl
# TODO: Enable Windows once we fix the toolchain issues there.
#- os: windows-latest
# target: x86_64-pc-windows-gnullvm
runs-on: ${{ matrix.os }}
# Configure a human readable name for each job
name: Local Bazel build on ${{ matrix.os }} for ${{ matrix.target }}
steps:
- uses: actions/checkout@v6
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- name: Make DotSlash available in PATH (Unix)
if: runner.os != 'Windows'
run: cp "$(which dotslash)" /usr/local/bin
- name: Make DotSlash available in PATH (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: Copy-Item (Get-Command dotslash).Source -Destination "$env:LOCALAPPDATA\Microsoft\WindowsApps\dotslash.exe"
# Install Bazel via Bazelisk
- name: Set up Bazel
uses: bazelbuild/setup-bazelisk@v3
# TODO(mbolin): Bring this back once we have caching working. Currently,
# we never seem to get a cache hit but we still end up paying the cost of
# uploading at the end of the build, which takes over a minute!
#
# Cache build and external artifacts so that the next ci build is incremental.
# Because github action caches cannot be updated after a build, we need to
# store the contents of each build in a unique cache key, then fall back to loading
# it on the next ci run. We use hashFiles(...) in the key and restore-keys- with
# the prefix to load the most recent cache for the branch on a cache miss. You
# should customize the contents of hashFiles to capture any bazel input sources,
# although this doesn't need to be perfect. If none of the input sources change
# then a cache hit will load an existing cache and bazel won't have to do any work.
# In the case of a cache miss, you want the fallback cache to contain most of the
# previously built artifacts to minimize build time. The more precise you are with
# hashFiles sources the less work bazel will have to do.
# - name: Mount bazel caches
# uses: actions/cache@v5
# with:
# path: |
# ~/.cache/bazel-repo-cache
# ~/.cache/bazel-repo-contents-cache
# key: bazel-cache-${{ matrix.os }}-${{ hashFiles('**/BUILD.bazel', '**/*.bzl', 'MODULE.bazel') }}
# restore-keys: |
# bazel-cache-${{ matrix.os }}
- name: Configure Bazel startup args (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
# Use a very short path to reduce argv/path length issues.
"BAZEL_STARTUP_ARGS=--output_user_root=C:\" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
- name: bazel test //...
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
bazel $BAZEL_STARTUP_ARGS --bazelrc=.github/workflows/ci.bazelrc test //... \
--build_metadata=REPO_URL=https://github.com/openai/codex.git \
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD) \
--build_metadata=ROLE=CI \
--build_metadata=VISIBILITY=PUBLIC \
"--remote_header=x-buildbuddy-api-key=$BUILDBUDDY_API_KEY"

View File

@@ -20,7 +20,7 @@ jobs:
uses: dtolnay/rust-toolchain@stable
- name: Run cargo-deny
uses: EmbarkStudios/cargo-deny-action@v2
uses: EmbarkStudios/cargo-deny-action@v1
with:
rust-version: stable
manifest-path: ./codex-rs/Cargo.toml

View File

@@ -1,20 +0,0 @@
common --remote_download_minimal
common --nobuild_runfile_links
common --keep_going
# We prefer to run the build actions entirely remotely so we can dial up the concurrency.
# We have platform-specific tests, so we want to execute the tests on all platforms using the strongest sandboxing available on each platform.
# On linux, we can do a full remote build/test, by targeting the right (x86/arm) runners, so we have coverage of both.
# Linux crossbuilds don't work until we untangle the libc constraint mess.
common:linux --config=remote
common:linux --strategy=remote
common:linux --platforms=//:rbe
# On mac, we can run all the build actions remotely but test actions locally.
common:macos --config=remote
common:macos --strategy=remote
common:macos --strategy=TestRunner=darwin-sandbox,local
common:windows --strategy=TestRunner=local

View File

@@ -37,7 +37,7 @@ jobs:
run: |
set -euo pipefail
# Use a rust-release version that includes all native binaries.
CODEX_VERSION=0.74.0
CODEX_VERSION=0.74.0-alpha.3
OUTPUT_DIR="${RUNNER_TEMP}"
python3 ./scripts/stage_npm_packages.py \
--release-version "$CODEX_VERSION" \

View File

@@ -12,8 +12,6 @@ permissions:
jobs:
close-stale-contributor-prs:
# Prevent scheduled runs on forks
if: github.repository == 'openai/codex'
runs-on: ubuntu-latest
steps:
- name: Close inactive PRs from contributors

View File

@@ -9,8 +9,7 @@ on:
jobs:
gather-duplicates:
name: Identify potential duplicates
# Prevent runs on forks (requires OpenAI API key, wastes Actions minutes)
if: github.repository == 'openai/codex' && (github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate'))
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate') }}
runs-on: ubuntu-latest
permissions:
contents: read

View File

@@ -9,8 +9,7 @@ on:
jobs:
gather-labels:
name: Generate label suggestions
# Prevent runs on forks (requires OpenAI API key, wastes Actions minutes)
if: github.repository == 'openai/codex' && (github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label'))
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label') }}
runs-on: ubuntu-latest
permissions:
contents: read
@@ -38,10 +37,9 @@ jobs:
- If applicable, add one of the following labels to specify which sub-product or product surface the issue relates to.
1. CLI — the Codex command line interface.
2. extension — VS Code (or other IDE) extension-specific issues.
3. app - Issues related to the Codex desktop application.
4. codex-web — Issues targeting the Codex web UI/Cloud experience.
5. github-action — Issues with the Codex GitHub action.
6. iOS — Issues with the Codex iOS app.
3. codex-web — Issues targeting the Codex web UI/Cloud experience.
4. github-action — Issues with the Codex GitHub action.
5. iOS — Issues with the Codex iOS app.
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).

View File

@@ -59,7 +59,7 @@ jobs:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
with:
components: rustfmt
- name: cargo fmt
@@ -77,7 +77,7 @@ jobs:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: cargo-shear
@@ -88,7 +88,7 @@ jobs:
# --- CI to validate on different os/targets --------------------------------
lint_build:
name: Lint/Build — ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
needs: changed
# Keep job-level if to avoid spinning up runners when not needed
@@ -106,102 +106,55 @@ jobs:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
- runner: macos-14
target: aarch64-apple-darwin
profile: dev
- runner: macos-15-xlarge
- runner: macos-14
target: x86_64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
- runner: windows-latest
target: x86_64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
- runner: windows-11-arm
target: aarch64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-arm64
# Also run representative release builds on Mac and Linux because
# there could be release-only build errors we want to catch.
# Hopefully this also pre-populates the build cache to speed up
# releases.
- runner: macos-15-xlarge
- runner: macos-14
target: aarch64-apple-darwin
profile: release
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
profile: release
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: windows-x64
- runner: windows-latest
target: x86_64-pc-windows-msvc
profile: release
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
- runner: windows-11-arm
target: aarch64-pc-windows-msvc
profile: release
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@v6
- name: Install UBSan runtime (musl)
if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
fi
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
components: clippy
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Use hermetic Cargo home (musl)
shell: bash
run: |
set -euo pipefail
cargo_home="${GITHUB_WORKSPACE}/.cargo-home"
mkdir -p "${cargo_home}/bin"
echo "CARGO_HOME=${cargo_home}" >> "$GITHUB_ENV"
echo "${cargo_home}/bin" >> "$GITHUB_PATH"
: > "${cargo_home}/config.toml"
- name: Compute lockfile hash
id: lockhash
working-directory: codex-rs
@@ -222,10 +175,6 @@ jobs:
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
@@ -268,14 +217,6 @@ jobs:
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Disable sccache wrapper (musl)
shell: bash
run: |
set -euo pipefail
echo "RUSTC_WRAPPER=" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Prepare APT cache directories (musl)
shell: bash
@@ -293,73 +234,15 @@ jobs:
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
DEBIAN_FRONTEND: noninteractive
TARGET: ${{ matrix.target }}
APT_UPDATE_ARGS: -o Acquire::Retries=3
APT_INSTALL_ARGS: --no-install-recommends
shell: bash
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
sudo apt-get -y update -o Acquire::Retries=3
sudo apt-get -y install --no-install-recommends musl-tools pkg-config
- name: Install cargo-chef
if: ${{ matrix.profile == 'release' }}
@@ -406,10 +289,6 @@ jobs:
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
- name: Save sccache cache (fallback)
@@ -457,7 +336,7 @@ jobs:
tests:
name: Tests — ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
needs: changed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
@@ -474,43 +353,46 @@ jobs:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
- runner: macos-14
target: aarch64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
- runner: windows-latest
target: x86_64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
- runner: windows-11-arm
target: aarch64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@v6
# We have been running out of space when running this job on Linux for
# x86_64-unknown-linux-gnu, so remove some unnecessary dependencies.
- name: Remove unnecessary dependencies to save space
if: ${{ startsWith(matrix.runner, 'ubuntu') }}
shell: bash
run: |
set -euo pipefail
sudo rm -rf \
/usr/local/lib/android \
/usr/share/dotnet \
/usr/local/share/boost \
/usr/local/lib/node_modules \
/opt/ghc
sudo apt-get remove -y docker.io docker-compose podman buildah
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}

View File

@@ -14,8 +14,6 @@ permissions:
jobs:
prepare:
# Prevent scheduled runs on forks (no secrets, wastes Actions minutes)
if: github.repository == 'openai/codex'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
@@ -43,7 +41,7 @@ jobs:
curl --http1.1 --fail --show-error --location "${headers[@]}" "${url}" | jq '.' > codex-rs/core/models.json
- name: Open pull request (if changed)
uses: peter-evans/create-pull-request@v8
uses: peter-evans/create-pull-request@v7
with:
commit-message: "Update models.json"
title: "Update models.json"

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- name: Validate tag matches Cargo.toml version
shell: bash
run: |
@@ -45,20 +45,11 @@ jobs:
echo "✅ Tag and Cargo.toml agree (${tag_ver})"
echo "::endgroup::"
- name: Verify config schema fixture
shell: bash
working-directory: codex-rs
run: |
set -euo pipefail
echo "If this fails, run: just write-config-schema to overwrite fixture with intentional changes."
cargo run -p codex-core --bin codex-write-config-schema
git diff --exit-code core/config.schema.json
build:
needs: tag-check
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 60
timeout-minutes: 30
permissions:
contents: read
id-token: write
@@ -89,30 +80,10 @@ jobs:
steps:
- uses: actions/checkout@v6
- name: Install UBSan runtime (musl)
if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
fi
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Use hermetic Cargo home (musl)
shell: bash
run: |
set -euo pipefail
cargo_home="${GITHUB_WORKSPACE}/.cargo-home"
mkdir -p "${cargo_home}/bin"
echo "CARGO_HOME=${cargo_home}" >> "$GITHUB_ENV"
echo "${cargo_home}/bin" >> "$GITHUB_PATH"
: > "${cargo_home}/config.toml"
- uses: actions/cache@v5
with:
path: |
@@ -120,76 +91,14 @@ jobs:
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
${{ github.workspace }}/codex-rs/target/
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-release-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
TARGET: ${{ matrix.target }}
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
sudo apt-get update
sudo apt-get install -y musl-tools pkg-config
- name: Cargo build
shell: bash
@@ -219,72 +128,11 @@ jobs:
account-name: ${{ secrets.AZURE_TRUSTED_SIGNING_ACCOUNT_NAME }}
certificate-profile-name: ${{ secrets.AZURE_TRUSTED_SIGNING_CERTIFICATE_PROFILE_NAME }}
- if: ${{ runner.os == 'macOS' }}
name: MacOS code signing (binaries)
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
name: MacOS code signing
uses: ./.github/actions/macos-code-sign
with:
target: ${{ matrix.target }}
sign-binaries: "true"
sign-dmg: "false"
apple-certificate: ${{ secrets.APPLE_CERTIFICATE_P12 }}
apple-certificate-password: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
apple-notarization-key-p8: ${{ secrets.APPLE_NOTARIZATION_KEY_P8 }}
apple-notarization-key-id: ${{ secrets.APPLE_NOTARIZATION_KEY_ID }}
apple-notarization-issuer-id: ${{ secrets.APPLE_NOTARIZATION_ISSUER_ID }}
- if: ${{ runner.os == 'macOS' }}
name: Build macOS dmg
shell: bash
run: |
set -euo pipefail
target="${{ matrix.target }}"
release_dir="target/${target}/release"
dmg_root="${RUNNER_TEMP}/codex-dmg-root"
volname="Codex (${target})"
dmg_path="${release_dir}/codex-${target}.dmg"
# The previous "MacOS code signing (binaries)" step signs + notarizes the
# built artifacts in `${release_dir}`. This step packages *those same*
# signed binaries into a dmg.
codex_binary_path="${release_dir}/codex"
proxy_binary_path="${release_dir}/codex-responses-api-proxy"
rm -rf "$dmg_root"
mkdir -p "$dmg_root"
if [[ ! -f "$codex_binary_path" ]]; then
echo "Binary $codex_binary_path not found"
exit 1
fi
if [[ ! -f "$proxy_binary_path" ]]; then
echo "Binary $proxy_binary_path not found"
exit 1
fi
ditto "$codex_binary_path" "${dmg_root}/codex"
ditto "$proxy_binary_path" "${dmg_root}/codex-responses-api-proxy"
rm -f "$dmg_path"
hdiutil create \
-volname "$volname" \
-srcfolder "$dmg_root" \
-format UDZO \
-ov \
"$dmg_path"
if [[ ! -f "$dmg_path" ]]; then
echo "dmg $dmg_path not found after build"
exit 1
fi
- if: ${{ runner.os == 'macOS' }}
name: MacOS code signing (dmg)
uses: ./.github/actions/macos-code-sign
with:
target: ${{ matrix.target }}
sign-binaries: "false"
sign-dmg: "true"
apple-certificate: ${{ secrets.APPLE_CERTIFICATE_P12 }}
apple-certificate-password: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
apple-notarization-key-p8: ${{ secrets.APPLE_NOTARIZATION_KEY_P8 }}
@@ -312,10 +160,6 @@ jobs:
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.sigstore "$dest/codex-responses-api-proxy-${{ matrix.target }}.sigstore"
fi
if [[ "${{ matrix.target }}" == *apple-darwin ]]; then
cp target/${{ matrix.target }}/release/codex-${{ matrix.target }}.dmg "$dest/codex-${{ matrix.target }}.dmg"
fi
- if: ${{ matrix.runner == 'windows-11-arm' }}
name: Install zstd
shell: powershell
@@ -327,7 +171,6 @@ jobs:
# Path that contains the uncompressed binaries for the current
# ${{ matrix.target }}
dest="dist/${{ matrix.target }}"
repo_root=$PWD
# We want to ship the raw Windows executables in the GitHub Release
# in addition to the compressed archives. Keep the originals for
@@ -351,7 +194,7 @@ jobs:
base="$(basename "$f")"
# Skip files that are already archives (shouldn't happen, but be
# safe).
if [[ "$base" == *.tar.gz || "$base" == *.zip || "$base" == *.dmg ]]; then
if [[ "$base" == *.tar.gz || "$base" == *.zip ]]; then
continue
fi
@@ -367,30 +210,7 @@ jobs:
# Must run from inside the dest dir so 7z won't
# embed the directory path inside the zip.
if [[ "${{ matrix.runner }}" == windows* ]]; then
if [[ "$base" == "codex-${{ matrix.target }}.exe" ]]; then
# Bundle the sandbox helper binaries into the main codex zip so
# WinGet installs include the required helpers next to codex.exe.
# Fall back to the single-binary zip if the helpers are missing
# to avoid breaking releases.
bundle_dir="$(mktemp -d)"
runner_src="$dest/codex-command-runner-${{ matrix.target }}.exe"
setup_src="$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
if [[ -f "$runner_src" && -f "$setup_src" ]]; then
cp "$dest/$base" "$bundle_dir/$base"
cp "$runner_src" "$bundle_dir/codex-command-runner.exe"
cp "$setup_src" "$bundle_dir/codex-windows-sandbox-setup.exe"
# Use an absolute path so bundle zips land in the real dist
# dir even when 7z runs from a temp directory.
(cd "$bundle_dir" && 7z a "$repo_root/$dest/${base}.zip" .)
else
echo "warning: missing sandbox binaries; falling back to single-binary zip"
echo "warning: expected $runner_src and $setup_src"
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
rm -rf "$bundle_dir"
else
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
# Also create .zst (existing behaviour) *and* remove the original
@@ -438,26 +258,6 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v6
- name: Generate release notes from tag commit message
id: release_notes
shell: bash
run: |
set -euo pipefail
# On tag pushes, GITHUB_SHA may be a tag object for annotated tags;
# peel it to the underlying commit.
commit="$(git rev-parse "${GITHUB_SHA}^{commit}")"
notes_path="${RUNNER_TEMP}/release-notes.md"
# Use the commit message for the commit the tag points at (not the
# annotated tag message).
git log -1 --format=%B "${commit}" > "${notes_path}"
# Ensure trailing newline so GitHub's markdown renderer doesn't
# occasionally run the last line into subsequent content.
echo >> "${notes_path}"
echo "path=${notes_path}" >> "${GITHUB_OUTPUT}"
- uses: actions/download-artifact@v7
with:
path: dist
@@ -473,10 +273,6 @@ jobs:
ls -R dist/
- name: Add config schema release asset
run: |
cp codex-rs/core/config.schema.json dist/config-schema.json
- name: Define release name
id: release_name
run: |
@@ -534,7 +330,6 @@ jobs:
with:
name: ${{ steps.release_name.outputs.name }}
tag_name: ${{ github.ref_name }}
body_path: ${{ steps.release_notes.outputs.path }}
files: dist/**
# Mark as prerelease only when the version has a suffix after x.y.z
# (e.g. -alpha, -beta). Otherwise publish a normal release.
@@ -547,19 +342,6 @@ jobs:
tag: ${{ github.ref_name }}
config: .github/dotslash-config.json
- name: Trigger developers.openai.com deploy
# Only trigger the deploy if the release is not a pre-release.
# The deploy is used to update the developers.openai.com website with the new config schema json file.
if: ${{ !contains(steps.release_name.outputs.name, '-') }}
continue-on-error: true
env:
DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL: ${{ secrets.DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL }}
run: |
if ! curl -sS -f -o /dev/null -X POST "$DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL"; then
echo "::warning title=developers.openai.com deploy hook failed::Vercel deploy hook POST failed for ${GITHUB_REF_NAME}"
exit 1
fi
# Publish to npm using OIDC authentication.
# July 31, 2025: https://github.blog/changelog/2025-07-31-npm-trusted-publishing-with-oidc-is-generally-available/
# npm docs: https://docs.npmjs.com/trusted-publishers

View File

@@ -24,7 +24,7 @@ jobs:
node-version: 22
cache: pnpm
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
- name: build codex
run: cargo build --bin codex

View File

@@ -93,83 +93,15 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v6
- name: Install UBSan runtime (musl)
if: ${{ matrix.install_musl }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
fi
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
- if: ${{ matrix.install_musl }}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.install_musl }}
name: Install musl build dependencies
env:
TARGET: ${{ matrix.target }}
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.install_musl }}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.install_musl }}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
sudo apt-get update
sudo apt-get install -y musl-tools pkg-config
- name: Build exec server binaries
run: cargo build --release --target ${{ matrix.target }} --bin codex-exec-mcp-server --bin codex-execve-wrapper
@@ -266,7 +198,7 @@ jobs:
shell: bash
run: |
set -euo pipefail
git clone --depth 1 https://github.com/bolinfest/bash /tmp/bash
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
cd /tmp/bash
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
@@ -308,7 +240,7 @@ jobs:
shell: bash
run: |
set -euo pipefail
git clone --depth 1 https://github.com/bolinfest/bash /tmp/bash
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
cd /tmp/bash
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
@@ -344,6 +276,7 @@ jobs:
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.8.1
run_install: false
- name: Setup Node.js
@@ -436,6 +369,12 @@ jobs:
id-token: write
contents: read
steps:
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.8.1
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
with:
@@ -443,7 +382,6 @@ jobs:
registry-url: https://registry.npmjs.org
scope: "@openai"
# Trusted publishing requires npm CLI version 11.5.1 or later.
- name: Update npm
run: npm install -g npm@latest

6
.gitignore vendored
View File

@@ -9,7 +9,6 @@ node_modules
# build
dist/
bazel-*
build/
out/
storybook-static/
@@ -86,8 +85,3 @@ CHANGELOG.ignore.md
# nix related
.direnv
.envrc
# Python bytecode files
__pycache__/
*.pyc

View File

@@ -1,6 +0,0 @@
config:
MD013:
line_length: 100
globs:
- "docs/tui-chat-composer.md"

View File

@@ -11,17 +11,14 @@ In the codex-rs folder where the rust code lives:
- Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
- When possible, make `match` statements exhaustive and avoid wildcard arms.
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
- If you change `ConfigToml` or nested config types, run `just write-config-schema` to update `codex-rs/core/config.schema.json`.
Run `just fmt` (in `codex-rs` directory) automatically after you have finished making Rust code changes; do not ask for approval to run it. Additionally, run the tests:
Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspacewide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests:
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
Before finalizing a large change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspacewide Clippy builds; only run `just fix` without `-p` if you changed shared crates.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
## TUI style conventions
@@ -80,12 +77,6 @@ If you dont have the tool:
- Prefer deep equals comparisons whenever possible. Perform `assert_eq!()` on entire objects, rather than individual fields.
- Avoid mutating process environment in tests; prefer passing environment-derived flags or dependencies from above.
### Spawning workspace binaries in tests (Cargo vs Bazel)
- Prefer `codex_utils_cargo_bin::cargo_bin("...")` over `assert_cmd::Command::cargo_bin(...)` or `escargot` when tests need to spawn first-party binaries.
- Under Bazel, binaries and resources may live under runfiles; use `codex_utils_cargo_bin::cargo_bin` to resolve absolute paths that remain stable after `chdir`.
- When locating fixture files or test resources under Bazel, avoid `env!("CARGO_MANIFEST_DIR")`. Prefer `codex_utils_cargo_bin::find_resource!` so paths resolve correctly under both Cargo and Bazel runfiles.
### Integration tests (core)
- Prefer the utilities in `core_test_support::responses` when writing end-to-end Codex tests.

View File

@@ -1,23 +0,0 @@
load("@apple_support//xcode:xcode_config.bzl", "xcode_config")
xcode_config(name = "disable_xcode")
# We mark the local platform as glibc-compatible so that rust can grab a toolchain for us.
# TODO(zbarsky): Upstream a better libc constraint into rules_rust.
# We only enable this on linux though for sanity, and because it breaks remote execution.
platform(
name = "local",
constraint_values = [
"@toolchains_llvm_bootstrapped//constraints/libc:gnu.2.28",
],
parents = [
"@platforms//host",
],
)
alias(
name = "rbe",
actual = "@rbe_platform",
)
exports_files(["AGENTS.md"])

152
EXEC_PLAN.md Normal file
View File

@@ -0,0 +1,152 @@
# Codex Execution Plans (ExecPlans):
This document describes the requirements for an execution plan ("ExecPlan"), a design document that a coding agent can follow to deliver a working feature or system change. Treat the reader as a complete beginner to this repository: they have only the current working tree and the single ExecPlan file you provide. There is no memory of prior plans and no external context.
## How to use ExecPlans and PLANS.md
When authoring an executable specification (ExecPlan), follow PLANS.md _to the letter_. If it is not in your context, refresh your memory by reading the entire PLANS.md file. Be thorough in reading (and re-reading) source material to produce an accurate specification. When creating a spec, start from the skeleton and flesh it out as you do your research.
When implementing an executable specification (ExecPlan), do not prompt the user for "next steps"; simply proceed to the next milestone. Keep all sections up to date, add or split entries in the list at every stopping point to affirmatively state the progress made and next steps. Resolve ambiguities autonomously, and commit frequently.
When discussing an executable specification (ExecPlan), record decisions in a log in the spec for posterity; it should be unambiguously clear why any change to the specification was made. ExecPlans are living documents, and it should always be possible to restart from _only_ the ExecPlan and no other work.
When researching a design with challenging requirements or significant unknowns, use milestones to implement proof of concepts, "toy implementations", etc., that allow validating whether the user's proposal is feasible. Read the source code of libraries by finding or acquiring them, research deeply, and include prototypes to guide a fuller implementation.
## Requirements
NON-NEGOTIABLE REQUIREMENTS:
* Every ExecPlan must be fully self-contained. Self-contained means that in its current form it contains all knowledge and instructions needed for a novice to succeed.
* Every ExecPlan is a living document. Contributors are required to revise it as progress is made, as discoveries occur, and as design decisions are finalized. Each revision must remain fully self-contained.
* Every ExecPlan must enable a complete novice to implement the feature end-to-end without prior knowledge of this repo.
* Every ExecPlan must produce a demonstrably working behavior, not merely code changes to "meet a definition".
* Every ExecPlan must define every term of art in plain language or do not use it.
Purpose and intent come first. Begin by explaining, in a few sentences, why the work matters from a user's perspective: what someone can do after this change that they could not do before, and how to see it working. Then guide the reader through the exact steps to achieve that outcome, including what to edit, what to run, and what they should observe.
The agent executing your plan can list files, read files, search, run the project, and run tests. It does not know any prior context and cannot infer what you meant from earlier milestones. Repeat any assumption you rely on. Do not point to external blogs or docs; if knowledge is required, embed it in the plan itself in your own words. If an ExecPlan builds upon a prior ExecPlan and that file is checked in, incorporate it by reference. If it is not, you must include all relevant context from that plan.
## Formatting
Format and envelope are simple and strict. Each ExecPlan must be one single fenced code block labeled as `md` that begins and ends with triple backticks. Do not nest additional triple-backtick code fences inside; when you need to show commands, transcripts, diffs, or code, present them as indented blocks within that single fence. Use indentation for clarity rather than code fences inside an ExecPlan to avoid prematurely closing the ExecPlan's code fence. Use two newlines after every heading, use # and ## and so on, and correct syntax for ordered and unordered lists.
When writing an ExecPlan to a Markdown (.md) file where the content of the file *is only* the single ExecPlan, you should omit the triple backticks.
Write in plain prose. Prefer sentences over lists. Avoid checklists, tables, and long enumerations unless brevity would obscure meaning. Checklists are permitted only in the `Progress` section, where they are mandatory. Narrative sections must remain prose-first.
## Guidelines
Self-containment and plain language are paramount. If you introduce a phrase that is not ordinary English ("daemon", "middleware", "RPC gateway", "filter graph"), define it immediately and remind the reader how it manifests in this repository (for example, by naming the files or commands where it appears). Do not say "as defined previously" or "according to the architecture doc." Include the needed explanation here, even if you repeat yourself.
Avoid common failure modes. Do not rely on undefined jargon. Do not describe "the letter of a feature" so narrowly that the resulting code compiles but does nothing meaningful. Do not outsource key decisions to the reader. When ambiguity exists, resolve it in the plan itself and explain why you chose that path. Err on the side of over-explaining user-visible effects and under-specifying incidental implementation details.
Anchor the plan with observable outcomes. State what the user can do after implementation, the commands to run, and the outputs they should see. Acceptance should be phrased as behavior a human can verify ("after starting the server, navigating to [http://localhost:8080/health](http://localhost:8080/health) returns HTTP 200 with body OK") rather than internal attributes ("added a HealthCheck struct"). If a change is internal, explain how its impact can still be demonstrated (for example, by running tests that fail before and pass after, and by showing a scenario that uses the new behavior).
Specify repository context explicitly. Name files with full repository-relative paths, name functions and modules precisely, and describe where new files should be created. If touching multiple areas, include a short orientation paragraph that explains how those parts fit together so a novice can navigate confidently. When running commands, show the working directory and exact command line. When outcomes depend on environment, state the assumptions and provide alternatives when reasonable.
Be idempotent and safe. Write the steps so they can be run multiple times without causing damage or drift. If a step can fail halfway, include how to retry or adapt. If a migration or destructive operation is necessary, spell out backups or safe fallbacks. Prefer additive, testable changes that can be validated as you go.
Validation is not optional. Include instructions to run tests, to start the system if applicable, and to observe it doing something useful. Describe comprehensive testing for any new features or capabilities. Include expected outputs and error messages so a novice can tell success from failure. Where possible, show how to prove that the change is effective beyond compilation (for example, through a small end-to-end scenario, a CLI invocation, or an HTTP request/response transcript). State the exact test commands appropriate to the projects toolchain and how to interpret their results.
Capture evidence. When your steps produce terminal output, short diffs, or logs, include them inside the single fenced block as indented examples. Keep them concise and focused on what proves success. If you need to include a patch, prefer file-scoped diffs or small excerpts that a reader can recreate by following your instructions rather than pasting large blobs.
## Milestones
Milestones are narrative, not bureaucracy. If you break the work into milestones, introduce each with a brief paragraph that describes the scope, what will exist at the end of the milestone that did not exist before, the commands to run, and the acceptance you expect to observe. Keep it readable as a story: goal, work, result, proof. Progress and milestones are distinct: milestones tell the story, progress tracks granular work. Both must exist. Never abbreviate a milestone merely for the sake of brevity, do not leave out details that could be crucial to a future implementation.
Each milestone must be independently verifiable and incrementally implement the overall goal of the execution plan.
## Living plans and design decisions
* ExecPlans are living documents. As you make key design decisions, update the plan to record both the decision and the thinking behind it. Record all decisions in the `Decision Log` section.
* ExecPlans must contain and maintain a `Progress` section, a `Surprises & Discoveries` section, a `Decision Log`, and an `Outcomes & Retrospective` section. These are not optional.
* When you discover optimizer behavior, performance tradeoffs, unexpected bugs, or inverse/unapply semantics that shaped your approach, capture those observations in the `Surprises & Discoveries` section with short evidence snippets (test output is ideal).
* If you change course mid-implementation, document why in the `Decision Log` and reflect the implications in `Progress`. Plans are guides for the next contributor as much as checklists for you.
* At completion of a major task or the full plan, write an `Outcomes & Retrospective` entry summarizing what was achieved, what remains, and lessons learned.
# Prototyping milestones and parallel implementations
It is acceptable—-and often encouraged—-to include explicit prototyping milestones when they de-risk a larger change. Examples: adding a low-level operator to a dependency to validate feasibility, or exploring two composition orders while measuring optimizer effects. Keep prototypes additive and testable. Clearly label the scope as “prototyping”; describe how to run and observe results; and state the criteria for promoting or discarding the prototype.
Prefer additive code changes followed by subtractions that keep tests passing. Parallel implementations (e.g., keeping an adapter alongside an older path during migration) are fine when they reduce risk or enable tests to continue passing during a large migration. Describe how to validate both paths and how to retire one safely with tests. When working with multiple new libraries or feature areas, consider creating spikes that evaluate the feasibility of these features _independently_ of one another, proving that the external library performs as expected and implements the features we need in isolation.
## Skeleton of a Good ExecPlan
```md
# <Short, action-oriented description>
This ExecPlan is a living document. The sections `Progress`, `Surprises & Discoveries`, `Decision Log`, and `Outcomes & Retrospective` must be kept up to date as work proceeds.
If PLANS.md file is checked into the repo, reference the path to that file here from the repository root and note that this document must be maintained in accordance with PLANS.md.
## Purpose / Big Picture
Explain in a few sentences what someone gains after this change and how they can see it working. State the user-visible behavior you will enable.
## Progress
Use a list with checkboxes to summarize granular steps. Every stopping point must be documented here, even if it requires splitting a partially completed task into two (“done” vs. “remaining”). This section must always reflect the actual current state of the work.
- [x] (2025-10-01 13:00Z) Example completed step.
- [ ] Example incomplete step.
- [ ] Example partially completed step (completed: X; remaining: Y).
Use timestamps to measure rates of progress.
## Surprises & Discoveries
Document unexpected behaviors, bugs, optimizations, or insights discovered during implementation. Provide concise evidence.
- Observation: …
Evidence: …
## Decision Log
Record every decision made while working on the plan in the format:
- Decision: …
Rationale: …
Date/Author: …
## Outcomes & Retrospective
Summarize outcomes, gaps, and lessons learned at major milestones or at completion. Compare the result against the original purpose.
## Context and Orientation
Describe the current state relevant to this task as if the reader knows nothing. Name the key files and modules by full path. Define any non-obvious term you will use. Do not refer to prior plans.
## Plan of Work
Describe, in prose, the sequence of edits and additions. For each edit, name the file and location (function, module) and what to insert or change. Keep it concrete and minimal.
## Concrete Steps
State the exact commands to run and where to run them (working directory). When a command generates output, show a short expected transcript so the reader can compare. This section must be updated as work proceeds.
## Validation and Acceptance
Describe how to start or exercise the system and what to observe. Phrase acceptance as behavior, with specific inputs and outputs. If tests are involved, say "run <projects test command> and expect <N> passed; the new test <name> fails before the change and passes after>".
## Idempotence and Recovery
If steps can be repeated safely, say so. If a step is risky, provide a safe retry or rollback path. Keep the environment clean after completion.
## Artifacts and Notes
Include the most important transcripts, diffs, or snippets as indented examples. Keep them concise and focused on what proves success.
## Interfaces and Dependencies
Be prescriptive. Name the libraries, modules, and services to use and why. Specify the types, traits/interfaces, and function signatures that must exist at the end of the milestone. Prefer stable names and paths such as `crate::module::function` or `package.submodule.Interface`. E.g.:
In crates/foo/planner.rs, define:
pub trait Planner {
fn plan(&self, observed: &Observed) -> Vec<Action>;
}
```
If you follow the guidance above, a single, stateless agent -- or a human novice -- can read your ExecPlan from top to bottom and produce a working, observable result. That is the bar: SELF-CONTAINED, SELF-SUFFICIENT, NOVICE-GUIDING, OUTCOME-FOCUSED.
When you revise a plan, you must ensure your changes are comprehensively reflected across all sections, including the living document sections, and you must write a note at the bottom of the plan describing the change and the reason why. ExecPlans must describe not just the what but the why for almost everything.

View File

@@ -1,136 +0,0 @@
bazel_dep(name = "platforms", version = "1.0.0")
bazel_dep(name = "toolchains_llvm_bootstrapped", version = "0.3.1")
archive_override(
module_name = "toolchains_llvm_bootstrapped",
integrity = "sha256-4/2h4tYSUSptxFVI9G50yJxWGOwHSeTeOGBlaLQBV8g=",
strip_prefix = "toolchains_llvm_bootstrapped-d20baf67e04d8e2887e3779022890d1dc5e6b948",
urls = ["https://github.com/cerisier/toolchains_llvm_bootstrapped/archive/d20baf67e04d8e2887e3779022890d1dc5e6b948.tar.gz"],
)
osx = use_extension("@toolchains_llvm_bootstrapped//toolchain/extension:osx.bzl", "osx")
osx.framework(name = "ApplicationServices")
osx.framework(name = "AppKit")
osx.framework(name = "ColorSync")
osx.framework(name = "CoreFoundation")
osx.framework(name = "CoreGraphics")
osx.framework(name = "CoreServices")
osx.framework(name = "CoreText")
osx.framework(name = "CFNetwork")
osx.framework(name = "Foundation")
osx.framework(name = "ImageIO")
osx.framework(name = "Kernel")
osx.framework(name = "OSLog")
osx.framework(name = "Security")
osx.framework(name = "SystemConfiguration")
register_toolchains(
"@toolchains_llvm_bootstrapped//toolchain:all",
)
# Needed to disable xcode...
bazel_dep(name = "apple_support", version = "2.1.0")
bazel_dep(name = "rules_cc", version = "0.2.16")
bazel_dep(name = "rules_platform", version = "0.1.0")
bazel_dep(name = "rules_rust", version = "0.68.1")
single_version_override(
module_name = "rules_rust",
patch_strip = 1,
patches = [
"//patches:rules_rust.patch",
"//patches:rules_rust_windows_gnu.patch",
"//patches:rules_rust_musl.patch",
],
)
RUST_TRIPLES = [
"aarch64-unknown-linux-musl",
"aarch64-apple-darwin",
"aarch64-pc-windows-gnullvm",
"x86_64-unknown-linux-musl",
"x86_64-apple-darwin",
"x86_64-pc-windows-gnullvm",
]
rust = use_extension("@rules_rust//rust:extensions.bzl", "rust")
rust.toolchain(
edition = "2024",
extra_target_triples = RUST_TRIPLES,
versions = ["1.93.0"],
)
use_repo(rust, "rust_toolchains")
register_toolchains("@rust_toolchains//:all")
bazel_dep(name = "rules_rs", version = "0.0.23")
crate = use_extension("@rules_rs//rs:extensions.bzl", "crate")
crate.from_cargo(
cargo_lock = "//codex-rs:Cargo.lock",
cargo_toml = "//codex-rs:Cargo.toml",
platform_triples = RUST_TRIPLES,
)
crate.annotation(
crate = "nucleo-matcher",
strip_prefix = "matcher",
version = "0.3.1",
)
bazel_dep(name = "openssl", version = "3.5.4.bcr.0")
crate.annotation(
build_script_data = [
"@openssl//:gen_dir",
],
build_script_env = {
"OPENSSL_DIR": "$(execpath @openssl//:gen_dir)",
"OPENSSL_NO_VENDOR": "1",
"OPENSSL_STATIC": "1",
},
crate = "openssl-sys",
data = ["@openssl//:gen_dir"],
)
inject_repo(crate, "openssl")
crate.annotation(
crate = "runfiles",
workspace_cargo_toml = "rust/runfiles/Cargo.toml",
)
# Fix readme inclusions
crate.annotation(
crate = "windows-link",
patch_args = ["-p1"],
patches = [
"//patches:windows-link.patch",
],
)
WINDOWS_IMPORT_LIB = """
load("@rules_cc//cc:defs.bzl", "cc_import")
cc_import(
name = "windows_import_lib",
static_library = glob(["lib/*.a"])[0],
)
"""
crate.annotation(
additive_build_file_content = WINDOWS_IMPORT_LIB,
crate = "windows_x86_64_gnullvm",
gen_build_script = "off",
deps = [":windows_import_lib"],
)
crate.annotation(
additive_build_file_content = WINDOWS_IMPORT_LIB,
crate = "windows_aarch64_gnullvm",
gen_build_script = "off",
deps = [":windows_import_lib"],
)
use_repo(crate, "crates")
rbe_platform_repository = use_repo_rule("//:rbe.bzl", "rbe_platform_repository")
rbe_platform_repository(
name = "rbe_platform",
)

1416
MODULE.bazel.lock generated

File diff suppressed because one or more lines are too long

70
PNPM.md Normal file
View File

@@ -0,0 +1,70 @@
# Migration to pnpm
This project has been migrated from npm to pnpm to improve dependency management and developer experience.
## Why pnpm?
- **Faster installation**: pnpm is significantly faster than npm and yarn
- **Disk space savings**: pnpm uses a content-addressable store to avoid duplication
- **Phantom dependency prevention**: pnpm creates a strict node_modules structure
- **Native workspaces support**: simplified monorepo management
## How to use pnpm
### Installation
```bash
# Global installation of pnpm
npm install -g pnpm@10.8.1
# Or with corepack (available with Node.js 22+)
corepack enable
corepack prepare pnpm@10.8.1 --activate
```
### Common commands
| npm command | pnpm equivalent |
| --------------- | ---------------- |
| `npm install` | `pnpm install` |
| `npm run build` | `pnpm run build` |
| `npm test` | `pnpm test` |
| `npm run lint` | `pnpm run lint` |
### Workspace-specific commands
| Action | Command |
| ------------------------------------------ | ---------------------------------------- |
| Run a command in a specific package | `pnpm --filter @openai/codex run build` |
| Install a dependency in a specific package | `pnpm --filter @openai/codex add lodash` |
| Run a command in all packages | `pnpm -r run test` |
## Monorepo structure
```
codex/
├── pnpm-workspace.yaml # Workspace configuration
├── .npmrc # pnpm configuration
├── package.json # Root dependencies and scripts
├── codex-cli/ # Main package
│ └── package.json # codex-cli specific dependencies
└── docs/ # Documentation (future package)
```
## Configuration files
- **pnpm-workspace.yaml**: Defines the packages included in the monorepo
- **.npmrc**: Configures pnpm behavior
- **Root package.json**: Contains shared scripts and dependencies
## CI/CD
CI/CD workflows have been updated to use pnpm instead of npm. Make sure your CI environments use pnpm 10.8.1 or higher.
## Known issues
If you encounter issues with pnpm, try the following solutions:
1. Remove the `node_modules` folder and `pnpm-lock.yaml` file, then run `pnpm install`
2. Make sure you're using pnpm 10.8.1 or higher
3. Verify that Node.js 22 or higher is installed

View File

@@ -1,11 +1,13 @@
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install --cask codex</code></p>
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
<p align="center">
<img src="https://github.com/openai/codex/blob/main/.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />
</p>
</br>
If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE.</a>
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a>.</p>
</br>If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE</a>
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a></p>
<p align="center">
<img src="./.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />
</p>
---
@@ -13,19 +15,25 @@ If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="http
### Installing and running Codex CLI
Install globally with your preferred package manager:
Install globally with your preferred package manager. If you use npm:
```shell
# Install using npm
npm install -g @openai/codex
```
Alternatively, if you use Homebrew:
```shell
# Install using Homebrew
brew install --cask codex
```
Then simply run `codex` to get started.
Then simply run `codex` to get started:
```shell
codex
```
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-upgrade-codex-isnt-upgrading-me).
<details>
<summary>You can also go to the <a href="https://github.com/openai/codex/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
@@ -45,15 +53,60 @@ Each archive contains a single entry with the platform baked into the name (e.g.
### Using Codex with your ChatGPT plan
<p align="center">
<img src="./.github/codex-cli-login.png" alt="Codex CLI login" width="80%" />
</p>
Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt).
You can also use Codex with an API key, but this requires [additional setup](https://developers.openai.com/codex/auth#sign-in-with-an-api-key).
You can also use Codex with an API key, but this requires [additional setup](./docs/authentication.md#usage-based-billing-alternative-use-an-openai-api-key). If you previously used an API key for usage-based billing, see the [migration steps](./docs/authentication.md#migrating-from-usage-based-billing-api-key). If you're having trouble with login, please comment on [this issue](https://github.com/openai/codex/issues/1243).
## Docs
### Model Context Protocol (MCP)
- [**Codex Documentation**](https://developers.openai.com/codex)
Codex can access MCP servers. To configure them, refer to the [config docs](./docs/config.md#mcp_servers).
### Configuration
Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md).
### Execpolicy
See the [Execpolicy quickstart](./docs/execpolicy.md) to set up rules that govern what commands Codex can execute.
### Docs & FAQ
- [**Getting started**](./docs/getting-started.md)
- [CLI usage](./docs/getting-started.md#cli-usage)
- [Slash Commands](./docs/slash_commands.md)
- [Running with a prompt as input](./docs/getting-started.md#running-with-a-prompt-as-input)
- [Example prompts](./docs/getting-started.md#example-prompts)
- [Custom prompts](./docs/prompts.md)
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
- [**Configuration**](./docs/config.md)
- [Example config](./docs/example-config.md)
- [**Sandbox & approvals**](./docs/sandbox.md)
- [**Execpolicy quickstart**](./docs/execpolicy.md)
- [**Authentication**](./docs/authentication.md)
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
- [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine)
- **Automating Codex**
- [GitHub Action](https://github.com/openai/codex-action)
- [TypeScript SDK](./sdk/typescript/README.md)
- [Non-interactive mode (`codex exec`)](./docs/exec.md)
- [**Advanced**](./docs/advanced.md)
- [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging)
- [Model Context Protocol (MCP)](./docs/advanced.md#model-context-protocol-mcp)
- [**Zero data retention (ZDR)**](./docs/zdr.md)
- [**Contributing**](./docs/contributing.md)
- [**Installing & building**](./docs/install.md)
- [**Install & build**](./docs/install.md)
- [System Requirements](./docs/install.md#system-requirements)
- [DotSlash](./docs/install.md#dotslash)
- [Build from source](./docs/install.md#build-from-source)
- [**FAQ**](./docs/faq.md)
- [**Open source fund**](./docs/open-source-fund.md)
---
## License
This repository is licensed under the [Apache-2.0 License](LICENSE).

View File

@@ -1,17 +0,0 @@
# Example announcement tips for Codex TUI.
# Each [[announcements]] entry is evaluated in order; the last matching one is shown.
# Dates are UTC, formatted as YYYY-MM-DD. The from_date is inclusive and the to_date is exclusive.
# version_regex matches against the CLI version (env!("CARGO_PKG_VERSION")); omit to apply to all versions.
# target_app specify which app should display the announcement (cli, vsce, ...).
[[announcements]]
content = "Welcome to Codex! Check out the new onboarding flow."
from_date = "2024-10-01"
to_date = "2024-10-15"
target_app = "cli"
# Test announcement only for local build version until 2026-01-10 excluded (past)
[[announcements]]
content = "This is a test announcement"
version_regex = "^0\\.0\\.0$"
to_date = "2026-05-10"

1
codex-cli/bin/codex.js Executable file → Normal file
View File

@@ -95,6 +95,7 @@ function detectPackageManager() {
return "bun";
}
if (
__dirname.includes(".bun/install/global") ||
__dirname.includes(".bun\\install\\global")

18
codex-cli/package-lock.json generated Normal file
View File

@@ -0,0 +1,18 @@
{
"name": "@openai/codex",
"version": "0.0.0-dev",
"lockfileVersion": 3,
"packages": {
"": {
"name": "@openai/codex",
"version": "0.0.0-dev",
"license": "Apache-2.0",
"bin": {
"codex": "bin/codex.js"
},
"engines": {
"node": ">=16"
}
}
}
}

View File

@@ -17,6 +17,5 @@
"type": "git",
"url": "git+https://github.com/openai/codex.git",
"directory": "codex-cli"
},
"packageManager": "pnpm@10.28.2+sha512.41872f037ad22f7348e3b1debbaf7e867cfd448f2726d9cf74c08f19507c31d2c8e7a11525b983febc2df640b5438dee6023ebb1f84ed43cc2d654d2bc326264"
}
}

View File

@@ -2,7 +2,6 @@
"""Install Codex native binaries (Rust CLI plus ripgrep helpers)."""
import argparse
from contextlib import contextmanager
import json
import os
import shutil
@@ -13,7 +12,6 @@ import zipfile
from dataclasses import dataclass
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
import sys
from typing import Iterable, Sequence
from urllib.parse import urlparse
from urllib.request import urlopen
@@ -79,45 +77,6 @@ RG_TARGET_PLATFORM_PAIRS: list[tuple[str, str]] = [
RG_TARGET_TO_PLATFORM = {target: platform for target, platform in RG_TARGET_PLATFORM_PAIRS}
DEFAULT_RG_TARGETS = [target for target, _ in RG_TARGET_PLATFORM_PAIRS]
# urllib.request.urlopen() defaults to no timeout (can hang indefinitely), which is painful in CI.
DOWNLOAD_TIMEOUT_SECS = 60
def _gha_enabled() -> bool:
# GitHub Actions supports "workflow commands" (e.g. ::group:: / ::error::) that make logs
# much easier to scan: groups collapse noisy sections and error annotations surface the
# failure in the UI without changing the actual exception/traceback output.
return os.environ.get("GITHUB_ACTIONS") == "true"
def _gha_escape(value: str) -> str:
# Workflow commands require percent/newline escaping.
return value.replace("%", "%25").replace("\r", "%0D").replace("\n", "%0A")
def _gha_error(*, title: str, message: str) -> None:
# Emit a GitHub Actions error annotation. This does not replace stdout/stderr logs; it just
# adds a prominent summary line to the job UI so the root cause is easier to spot.
if not _gha_enabled():
return
print(
f"::error title={_gha_escape(title)}::{_gha_escape(message)}",
flush=True,
)
@contextmanager
def _gha_group(title: str):
# Wrap a block in a collapsible log group on GitHub Actions. Outside of GHA this is a no-op
# so local output remains unchanged.
if _gha_enabled():
print(f"::group::{_gha_escape(title)}", flush=True)
try:
yield
finally:
if _gha_enabled():
print("::endgroup::", flush=True)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Install native Codex binaries.")
@@ -172,20 +131,18 @@ def main() -> int:
workflow_id = workflow_url.rstrip("/").split("/")[-1]
print(f"Downloading native artifacts from workflow {workflow_id}...")
with _gha_group(f"Download native artifacts from workflow {workflow_id}"):
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
artifacts_dir = Path(artifacts_dir_str)
_download_artifacts(workflow_id, artifacts_dir)
install_binary_components(
artifacts_dir,
vendor_dir,
[BINARY_COMPONENTS[name] for name in components if name in BINARY_COMPONENTS],
)
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
artifacts_dir = Path(artifacts_dir_str)
_download_artifacts(workflow_id, artifacts_dir)
install_binary_components(
artifacts_dir,
vendor_dir,
[BINARY_COMPONENTS[name] for name in components if name in BINARY_COMPONENTS],
)
if "rg" in components:
with _gha_group("Fetch ripgrep binaries"):
print("Fetching ripgrep binaries...")
fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST)
print("Fetching ripgrep binaries...")
fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST)
print(f"Installed native dependencies into {vendor_dir}")
return 0
@@ -246,14 +203,7 @@ def fetch_rg(
for future in as_completed(future_map):
target = future_map[future]
try:
results[target] = future.result()
except Exception as exc:
_gha_error(
title="ripgrep install failed",
message=f"target={target} error={exc!r}",
)
raise RuntimeError(f"Failed to install ripgrep for target {target}.") from exc
results[target] = future.result()
print(f" installed ripgrep for {target}")
return [results[target] for target in targets]
@@ -351,8 +301,6 @@ def _fetch_single_rg(
url = providers[0]["url"]
archive_format = platform_info.get("format", "zst")
archive_member = platform_info.get("path")
digest = platform_info.get("digest")
expected_size = platform_info.get("size")
dest_dir = vendor_dir / target / "path"
dest_dir.mkdir(parents=True, exist_ok=True)
@@ -365,32 +313,10 @@ def _fetch_single_rg(
tmp_dir = Path(tmp_dir_str)
archive_filename = os.path.basename(urlparse(url).path)
download_path = tmp_dir / archive_filename
print(
f" downloading ripgrep for {target} ({platform_key}) from {url}",
flush=True,
)
try:
_download_file(url, download_path)
except Exception as exc:
_gha_error(
title="ripgrep download failed",
message=f"target={target} platform={platform_key} url={url} error={exc!r}",
)
raise RuntimeError(
"Failed to download ripgrep "
f"(target={target}, platform={platform_key}, format={archive_format}, "
f"expected_size={expected_size!r}, digest={digest!r}, url={url}, dest={download_path})."
) from exc
_download_file(url, download_path)
dest.unlink(missing_ok=True)
try:
extract_archive(download_path, archive_format, archive_member, dest)
except Exception as exc:
raise RuntimeError(
"Failed to extract ripgrep "
f"(target={target}, platform={platform_key}, format={archive_format}, "
f"member={archive_member!r}, url={url}, archive={download_path})."
) from exc
extract_archive(download_path, archive_format, archive_member, dest)
if not is_windows:
dest.chmod(0o755)
@@ -400,9 +326,7 @@ def _fetch_single_rg(
def _download_file(url: str, dest: Path) -> None:
dest.parent.mkdir(parents=True, exist_ok=True)
dest.unlink(missing_ok=True)
with urlopen(url, timeout=DOWNLOAD_TIMEOUT_SECS) as response, open(dest, "wb") as out:
with urlopen(url) as response, open(dest, "wb") as out:
shutil.copyfileobj(response, out)

View File

@@ -1 +0,0 @@

3614
codex-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -6,12 +6,10 @@ members = [
"app-server",
"app-server-protocol",
"app-server-test-client",
"debug-client",
"apply-patch",
"arg0",
"feedback",
"codex-backend-openapi-models",
"cloud-requirements",
"cloud-tasks",
"cloud-tasks-client",
"cli",
@@ -28,7 +26,6 @@ members = [
"login",
"mcp-server",
"mcp-types",
"network-proxy",
"ollama",
"process-hardening",
"protocol",
@@ -37,20 +34,17 @@ members = [
"stdio-to-uds",
"otel",
"tui",
"tui2",
"utils/absolute-path",
"utils/cargo-bin",
"utils/git",
"utils/cache",
"utils/image",
"utils/json-to-toml",
"utils/home-dir",
"utils/pty",
"utils/readiness",
"utils/string",
"codex-client",
"codex-api",
"state",
"codex-experimental-api-macros",
]
resolver = "2"
@@ -74,15 +68,12 @@ codex-apply-patch = { path = "apply-patch" }
codex-arg0 = { path = "arg0" }
codex-async-utils = { path = "async-utils" }
codex-backend-client = { path = "backend-client" }
codex-cloud-requirements = { path = "cloud-requirements" }
codex-chatgpt = { path = "chatgpt" }
codex-cli = { path = "cli"}
codex-client = { path = "codex-client" }
codex-common = { path = "common" }
codex-core = { path = "core" }
codex-exec = { path = "exec" }
codex-execpolicy = { path = "execpolicy" }
codex-experimental-api-macros = { path = "codex-experimental-api-macros" }
codex-feedback = { path = "feedback" }
codex-file-search = { path = "file-search" }
codex-git = { path = "utils/git" }
@@ -97,15 +88,13 @@ codex-process-hardening = { path = "process-hardening" }
codex-protocol = { path = "protocol" }
codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-rmcp-client = { path = "rmcp-client" }
codex-state = { path = "state" }
codex-stdio-to-uds = { path = "stdio-to-uds" }
codex-tui = { path = "tui" }
codex-tui2 = { path = "tui2" }
codex-utils-absolute-path = { path = "utils/absolute-path" }
codex-utils-cache = { path = "utils/cache" }
codex-utils-cargo-bin = { path = "utils/cargo-bin" }
codex-utils-image = { path = "utils/image" }
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
codex-utils-home-dir = { path = "utils/home-dir" }
codex-utils-pty = { path = "utils/pty" }
codex-utils-readiness = { path = "utils/readiness" }
codex-utils-string = { path = "utils/string" }
@@ -129,13 +118,12 @@ axum = { version = "0.8", default-features = false }
base64 = "0.22.1"
bytes = "1.10.1"
chardetng = "0.1.17"
chrono = "0.4.43"
chrono = "0.4.42"
clap = "4"
clap_complete = "4"
color-eyre = "0.6.3"
crossterm = "0.28.1"
crossbeam-channel = "0.5.15"
ctor = "0.6.3"
ctor = "0.5.0"
derive_more = "2"
diffy = "0.4.2"
dirs = "6"
@@ -144,40 +132,38 @@ dunce = "1.0.4"
encoding_rs = "0.8.35"
env-flags = "0.1.1"
env_logger = "0.11.5"
escargot = "0.5"
eventsource-stream = "0.2.3"
futures = { version = "0.3", default-features = false }
globset = "0.4"
http = "1.3.1"
icu_decimal = "2.1"
icu_locale_core = "2.1"
icu_provider = { version = "2.1", features = ["sync"] }
ignore = "0.4.23"
indoc = "2.0"
image = { version = "^0.25.9", default-features = false }
include_dir = "0.7.4"
indexmap = "2.12.0"
insta = "1.46.0"
inventory = "0.3.19"
insta = "1.44.3"
itertools = "0.14.0"
keyring = { version = "3.6", default-features = false }
landlock = "0.4.4"
landlock = "0.4.1"
lazy_static = "1"
libc = "0.2.177"
log = "0.4"
lru = "0.16.3"
lru = "0.16.2"
maplit = "1.0.2"
mime_guess = "2.0.5"
multimap = "0.10.0"
notify = "8.2.0"
nucleo = { git = "https://github.com/helix-editor/nucleo.git", rev = "4253de9faabb4e5c6d81d946a5e35a90f87347ee" }
nucleo-matcher = "0.3.1"
once_cell = "1.20.2"
openssl-sys = "*"
opentelemetry = "0.31.0"
opentelemetry-appender-tracing = "0.31.0"
opentelemetry-otlp = "0.31.0"
opentelemetry-semantic-conventions = "0.31.0"
opentelemetry_sdk = "0.31.0"
tracing-opentelemetry = "0.32.0"
opentelemetry = "0.30.0"
opentelemetry-appender-tracing = "0.30.0"
opentelemetry-otlp = "0.30.0"
opentelemetry-semantic-conventions = "0.30.0"
opentelemetry_sdk = "0.30.0"
tracing-opentelemetry = "0.31.0"
os_info = "3.12.0"
owo-colors = "4.2.0"
path-absolutize = "3.1.1"
@@ -190,51 +176,46 @@ rand = "0.9"
ratatui = "0.29.0"
ratatui-macros = "0.6.0"
regex = "1.12.2"
regex-lite = "0.1.8"
regex-lite = "0.1.7"
reqwest = "0.12"
rmcp = { version = "0.12.0", default-features = false }
runfiles = { git = "https://github.com/dzbarsky/rules_rust", rev = "b56cbaa8465e74127f1ea216f813cd377295ad81" }
rmcp = { version = "0.10.0", default-features = false }
schemars = "0.8.22"
seccompiler = "0.5.0"
sentry = "0.46.0"
serde = "1"
serde_json = "1"
serde_path_to_error = "0.1.20"
serde_with = "3.16"
serde_yaml = "0.9"
serial_test = "3.2.0"
sha1 = "0.10.6"
sha2 = "0.10"
semver = "1.0"
shlex = "1.3.0"
similar = "2.7.0"
socket2 = "0.6.1"
sqlx = { version = "0.8.6", default-features = false, features = ["chrono", "json", "macros", "migrate", "runtime-tokio-rustls", "sqlite", "time", "uuid"] }
starlark = "0.13.0"
strum = "0.27.2"
strum_macros = "0.27.2"
supports-color = "3.0.2"
sys-locale = "0.3.2"
tempfile = "3.23.0"
test-log = "0.2.19"
test-log = "0.2.18"
textwrap = "0.16.2"
thiserror = "2.0.17"
time = "0.3"
tiny_http = "0.12"
tokio = "1"
tokio-stream = "0.1.18"
tokio-stream = "0.1.17"
tokio-test = "0.4"
tokio-tungstenite = { version = "0.28.0", features = ["proxy", "rustls-tls-native-roots"] }
tokio-util = "0.7.18"
tokio-util = "0.7.16"
toml = "0.9.5"
toml_edit = "0.24.0"
tracing = "0.1.44"
toml_edit = "0.23.5"
tonic = "0.13.1"
tracing = "0.1.43"
tracing-appender = "0.2.3"
tracing-subscriber = "0.3.22"
tracing-subscriber = "0.3.20"
tracing-test = "0.2.5"
tree-sitter = "0.25.10"
tree-sitter-bash = "0.25"
zstd = "0.13"
tree-sitter-highlight = "0.25.10"
ts-rs = "11"
uds_windows = "1.1.0"
@@ -246,7 +227,7 @@ uuid = "1"
vt100 = "0.16.2"
walkdir = "2.5.0"
webbrowser = "1.0"
which = "8"
which = "6"
wildmatch = "2.6.1"
wiremock = "0.6"
@@ -314,10 +295,6 @@ opt-level = 0
# ratatui = { path = "../../ratatui" }
crossterm = { git = "https://github.com/nornagon/crossterm", branch = "nornagon/color-query" }
ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" }
tokio-tungstenite = { git = "https://github.com/JakkuSakura/tokio-tungstenite", rev = "2ae536b0de793f3ddf31fc2f22d445bf1ef2023d" }
# Uncomment to debug local changes.
# rmcp = { path = "../../rust-sdk/crates/rmcp" }
[patch."ssh://git@github.com/JakkuSakura/tungstenite-rs.git"]
tungstenite = { git = "https://github.com/JakkuSakura/tungstenite-rs", rev = "f514de8644821113e5d18a027d6d28a5c8cc0a6e" }

View File

@@ -15,8 +15,8 @@ You can also install via Homebrew (`brew install --cask codex`) or download a pl
## Documentation quickstart
- First run with Codex? Start with [`docs/getting-started.md`](../docs/getting-started.md) (links to the walkthrough for prompts, keyboard shortcuts, and session management).
- Want deeper control? See [`docs/config.md`](../docs/config.md) and [`docs/install.md`](../docs/install.md).
- First run with Codex? Follow the walkthrough in [`docs/getting-started.md`](../docs/getting-started.md) for prompts, keyboard shortcuts, and session management.
- Already shipping with Codex and want deeper control? Jump to [`docs/advanced.md`](../docs/advanced.md) and the configuration reference at [`docs/config.md`](../docs/config.md).
## What's new in the Rust CLI
@@ -30,7 +30,7 @@ Codex supports a rich set of configuration options. Note that the Rust CLI uses
#### MCP client
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#connecting-to-mcp-servers) for details.
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#mcp_servers) for details.
#### MCP server (experimental)

View File

@@ -1,6 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "ansi-escape",
crate_name = "codex_ansi_escape",
)

View File

@@ -1,7 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "app-server-protocol",
crate_name = "codex_app_server_protocol",
test_data_extra = glob(["schema/**"], allow_empty = True),
)

View File

@@ -15,7 +15,6 @@ workspace = true
anyhow = { workspace = true }
clap = { workspace = true, features = ["derive"] }
codex-protocol = { workspace = true }
codex-experimental-api-macros = { workspace = true }
codex-utils-absolute-path = { workspace = true }
mcp-types = { workspace = true }
schemars = { workspace = true }
@@ -24,12 +23,8 @@ serde_json = { workspace = true }
strum_macros = { workspace = true }
thiserror = { workspace = true }
ts-rs = { workspace = true }
inventory = { workspace = true }
uuid = { workspace = true, features = ["serde", "v7"] }
[dev-dependencies]
anyhow = { workspace = true }
codex-utils-cargo-bin = { workspace = true }
pretty_assertions = { workspace = true }
similar = { workspace = true }
tempfile = { workspace = true }

View File

@@ -1,114 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"FileChange": {
"oneOf": [
{
"properties": {
"content": {
"type": "string"
},
"type": {
"enum": [
"add"
],
"title": "AddFileChangeType",
"type": "string"
}
},
"required": [
"content",
"type"
],
"title": "AddFileChange",
"type": "object"
},
{
"properties": {
"content": {
"type": "string"
},
"type": {
"enum": [
"delete"
],
"title": "DeleteFileChangeType",
"type": "string"
}
},
"required": [
"content",
"type"
],
"title": "DeleteFileChange",
"type": "object"
},
{
"properties": {
"move_path": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"update"
],
"title": "UpdateFileChangeType",
"type": "string"
},
"unified_diff": {
"type": "string"
}
},
"required": [
"type",
"unified_diff"
],
"title": "UpdateFileChange",
"type": "object"
}
]
},
"ThreadId": {
"type": "string"
}
},
"properties": {
"callId": {
"description": "Use to correlate this with [codex_core::protocol::PatchApplyBeginEvent] and [codex_core::protocol::PatchApplyEndEvent].",
"type": "string"
},
"conversationId": {
"$ref": "#/definitions/ThreadId"
},
"fileChanges": {
"additionalProperties": {
"$ref": "#/definitions/FileChange"
},
"type": "object"
},
"grantRoot": {
"description": "When set, the agent is asking the user to allow writes under this root for the remainder of the session (unclear if this is honored today).",
"type": [
"string",
"null"
]
},
"reason": {
"description": "Optional explanatory reason (e.g. request for extra write access).",
"type": [
"string",
"null"
]
}
},
"required": [
"callId",
"conversationId",
"fileChanges"
],
"title": "ApplyPatchApprovalParams",
"type": "object"
}

View File

@@ -1,73 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"ReviewDecision": {
"description": "User's decision in response to an ExecApprovalRequest.",
"oneOf": [
{
"description": "User has approved this command and the agent should execute it.",
"enum": [
"approved"
],
"type": "string"
},
{
"additionalProperties": false,
"description": "User has approved this command and wants to apply the proposed execpolicy amendment so future matching commands are permitted.",
"properties": {
"approved_execpolicy_amendment": {
"properties": {
"proposed_execpolicy_amendment": {
"items": {
"type": "string"
},
"type": "array"
}
},
"required": [
"proposed_execpolicy_amendment"
],
"type": "object"
}
},
"required": [
"approved_execpolicy_amendment"
],
"title": "ApprovedExecpolicyAmendmentReviewDecision",
"type": "object"
},
{
"description": "User has approved this command and wants to automatically approve any future identical instances (`command` and `cwd` match exactly) for the remainder of the session.",
"enum": [
"approved_for_session"
],
"type": "string"
},
{
"description": "User has denied this command and the agent should not execute it, but it should continue the session and try something else.",
"enum": [
"denied"
],
"type": "string"
},
{
"description": "User has denied this command and the agent should not do anything until the user's next command.",
"enum": [
"abort"
],
"type": "string"
}
]
}
},
"properties": {
"decision": {
"$ref": "#/definitions/ReviewDecision"
}
},
"required": [
"decision"
],
"title": "ApplyPatchApprovalResponse",
"type": "object"
}

View File

@@ -1,33 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"ChatgptAuthTokensRefreshReason": {
"oneOf": [
{
"description": "Codex attempted a backend request and received `401 Unauthorized`.",
"enum": [
"unauthorized"
],
"type": "string"
}
]
}
},
"properties": {
"previousAccountId": {
"description": "Workspace/account identifier that Codex was previously using.\n\nClients that manage multiple accounts/workspaces can use this as a hint to refresh the token for the correct workspace.\n\nThis may be `null` when the prior ID token did not include a workspace identifier (`chatgpt_account_id`) or when the token could not be parsed.",
"type": [
"string",
"null"
]
},
"reason": {
"$ref": "#/definitions/ChatgptAuthTokensRefreshReason"
}
},
"required": [
"reason"
],
"title": "ChatgptAuthTokensRefreshParams",
"type": "object"
}

View File

@@ -1,17 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"accessToken": {
"type": "string"
},
"idToken": {
"type": "string"
}
},
"required": [
"accessToken",
"idToken"
],
"title": "ChatgptAuthTokensRefreshResponse",
"type": "object"
}

View File

@@ -1,22 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"oneOf": [
{
"properties": {
"method": {
"enum": [
"initialized"
],
"title": "InitializedNotificationMethod",
"type": "string"
}
},
"required": [
"method"
],
"title": "InitializedNotification",
"type": "object"
}
],
"title": "ClientNotification"
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,174 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"CommandAction": {
"oneOf": [
{
"properties": {
"command": {
"type": "string"
},
"name": {
"type": "string"
},
"path": {
"type": "string"
},
"type": {
"enum": [
"read"
],
"title": "ReadCommandActionType",
"type": "string"
}
},
"required": [
"command",
"name",
"path",
"type"
],
"title": "ReadCommandAction",
"type": "object"
},
{
"properties": {
"command": {
"type": "string"
},
"path": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"listFiles"
],
"title": "ListFilesCommandActionType",
"type": "string"
}
},
"required": [
"command",
"type"
],
"title": "ListFilesCommandAction",
"type": "object"
},
{
"properties": {
"command": {
"type": "string"
},
"path": {
"type": [
"string",
"null"
]
},
"query": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"search"
],
"title": "SearchCommandActionType",
"type": "string"
}
},
"required": [
"command",
"type"
],
"title": "SearchCommandAction",
"type": "object"
},
{
"properties": {
"command": {
"type": "string"
},
"type": {
"enum": [
"unknown"
],
"title": "UnknownCommandActionType",
"type": "string"
}
},
"required": [
"command",
"type"
],
"title": "UnknownCommandAction",
"type": "object"
}
]
}
},
"properties": {
"command": {
"description": "The command to be executed.",
"type": [
"string",
"null"
]
},
"commandActions": {
"description": "Best-effort parsed command actions for friendly display.",
"items": {
"$ref": "#/definitions/CommandAction"
},
"type": [
"array",
"null"
]
},
"cwd": {
"description": "The command's working directory.",
"type": [
"string",
"null"
]
},
"itemId": {
"type": "string"
},
"proposedExecpolicyAmendment": {
"description": "Optional proposed execpolicy amendment to allow similar commands without prompting.",
"items": {
"type": "string"
},
"type": [
"array",
"null"
]
},
"reason": {
"description": "Optional explanatory reason (e.g. request for network access).",
"type": [
"string",
"null"
]
},
"threadId": {
"type": "string"
},
"turnId": {
"type": "string"
}
},
"required": [
"itemId",
"threadId",
"turnId"
],
"title": "CommandExecutionRequestApprovalParams",
"type": "object"
}

View File

@@ -1,72 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"CommandExecutionApprovalDecision": {
"oneOf": [
{
"description": "User approved the command.",
"enum": [
"accept"
],
"type": "string"
},
{
"description": "User approved the command and future identical commands should run without prompting.",
"enum": [
"acceptForSession"
],
"type": "string"
},
{
"additionalProperties": false,
"description": "User approved the command, and wants to apply the proposed execpolicy amendment so future matching commands can run without prompting.",
"properties": {
"acceptWithExecpolicyAmendment": {
"properties": {
"execpolicy_amendment": {
"items": {
"type": "string"
},
"type": "array"
}
},
"required": [
"execpolicy_amendment"
],
"type": "object"
}
},
"required": [
"acceptWithExecpolicyAmendment"
],
"title": "AcceptWithExecpolicyAmendmentCommandExecutionApprovalDecision",
"type": "object"
},
{
"description": "User denied the command. The agent will continue the turn.",
"enum": [
"decline"
],
"type": "string"
},
{
"description": "User denied the command. The turn will also be immediately interrupted.",
"enum": [
"cancel"
],
"type": "string"
}
]
}
},
"properties": {
"decision": {
"$ref": "#/definitions/CommandExecutionApprovalDecision"
}
},
"required": [
"decision"
],
"title": "CommandExecutionRequestApprovalResponse",
"type": "object"
}

View File

@@ -1,27 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"arguments": true,
"callId": {
"type": "string"
},
"threadId": {
"type": "string"
},
"tool": {
"type": "string"
},
"turnId": {
"type": "string"
}
},
"required": [
"arguments",
"callId",
"threadId",
"tool",
"turnId"
],
"title": "DynamicToolCallParams",
"type": "object"
}

View File

@@ -1,17 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"output": {
"type": "string"
},
"success": {
"type": "boolean"
}
},
"required": [
"output",
"success"
],
"title": "DynamicToolCallResponse",
"type": "object"
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,158 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"ParsedCommand": {
"oneOf": [
{
"properties": {
"cmd": {
"type": "string"
},
"name": {
"type": "string"
},
"path": {
"description": "(Best effort) Path to the file being read by the command. When possible, this is an absolute path, though when relative, it should be resolved against the `cwd`` that will be used to run the command to derive the absolute path.",
"type": "string"
},
"type": {
"enum": [
"read"
],
"title": "ReadParsedCommandType",
"type": "string"
}
},
"required": [
"cmd",
"name",
"path",
"type"
],
"title": "ReadParsedCommand",
"type": "object"
},
{
"properties": {
"cmd": {
"type": "string"
},
"path": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"list_files"
],
"title": "ListFilesParsedCommandType",
"type": "string"
}
},
"required": [
"cmd",
"type"
],
"title": "ListFilesParsedCommand",
"type": "object"
},
{
"properties": {
"cmd": {
"type": "string"
},
"path": {
"type": [
"string",
"null"
]
},
"query": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"search"
],
"title": "SearchParsedCommandType",
"type": "string"
}
},
"required": [
"cmd",
"type"
],
"title": "SearchParsedCommand",
"type": "object"
},
{
"properties": {
"cmd": {
"type": "string"
},
"type": {
"enum": [
"unknown"
],
"title": "UnknownParsedCommandType",
"type": "string"
}
},
"required": [
"cmd",
"type"
],
"title": "UnknownParsedCommand",
"type": "object"
}
]
},
"ThreadId": {
"type": "string"
}
},
"properties": {
"callId": {
"description": "Use to correlate this with [codex_core::protocol::ExecCommandBeginEvent] and [codex_core::protocol::ExecCommandEndEvent].",
"type": "string"
},
"command": {
"items": {
"type": "string"
},
"type": "array"
},
"conversationId": {
"$ref": "#/definitions/ThreadId"
},
"cwd": {
"type": "string"
},
"parsedCmd": {
"items": {
"$ref": "#/definitions/ParsedCommand"
},
"type": "array"
},
"reason": {
"type": [
"string",
"null"
]
}
},
"required": [
"callId",
"command",
"conversationId",
"cwd",
"parsedCmd"
],
"title": "ExecCommandApprovalParams",
"type": "object"
}

View File

@@ -1,73 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"ReviewDecision": {
"description": "User's decision in response to an ExecApprovalRequest.",
"oneOf": [
{
"description": "User has approved this command and the agent should execute it.",
"enum": [
"approved"
],
"type": "string"
},
{
"additionalProperties": false,
"description": "User has approved this command and wants to apply the proposed execpolicy amendment so future matching commands are permitted.",
"properties": {
"approved_execpolicy_amendment": {
"properties": {
"proposed_execpolicy_amendment": {
"items": {
"type": "string"
},
"type": "array"
}
},
"required": [
"proposed_execpolicy_amendment"
],
"type": "object"
}
},
"required": [
"approved_execpolicy_amendment"
],
"title": "ApprovedExecpolicyAmendmentReviewDecision",
"type": "object"
},
{
"description": "User has approved this command and wants to automatically approve any future identical instances (`command` and `cwd` match exactly) for the remainder of the session.",
"enum": [
"approved_for_session"
],
"type": "string"
},
{
"description": "User has denied this command and the agent should not execute it, but it should continue the session and try something else.",
"enum": [
"denied"
],
"type": "string"
},
{
"description": "User has denied this command and the agent should not do anything until the user's next command.",
"enum": [
"abort"
],
"type": "string"
}
]
}
},
"properties": {
"decision": {
"$ref": "#/definitions/ReviewDecision"
}
},
"required": [
"decision"
],
"title": "ExecCommandApprovalResponse",
"type": "object"
}

View File

@@ -1,35 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"grantRoot": {
"description": "[UNSTABLE] When set, the agent is asking the user to allow writes under this root for the remainder of the session (unclear if this is honored today).",
"type": [
"string",
"null"
]
},
"itemId": {
"type": "string"
},
"reason": {
"description": "Optional explanatory reason (e.g. request for extra write access).",
"type": [
"string",
"null"
]
},
"threadId": {
"type": "string"
},
"turnId": {
"type": "string"
}
},
"required": [
"itemId",
"threadId",
"turnId"
],
"title": "FileChangeRequestApprovalParams",
"type": "object"
}

View File

@@ -1,47 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"FileChangeApprovalDecision": {
"oneOf": [
{
"description": "User approved the file changes.",
"enum": [
"accept"
],
"type": "string"
},
{
"description": "User approved the file changes and future changes to the same files should run without prompting.",
"enum": [
"acceptForSession"
],
"type": "string"
},
{
"description": "User denied the file changes. The agent will continue the turn.",
"enum": [
"decline"
],
"type": "string"
},
{
"description": "User denied the file changes. The turn will also be immediately interrupted.",
"enum": [
"cancel"
],
"type": "string"
}
]
}
},
"properties": {
"decision": {
"$ref": "#/definitions/FileChangeApprovalDecision"
}
},
"required": [
"decision"
],
"title": "FileChangeRequestApprovalResponse",
"type": "object"
}

View File

@@ -1,26 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"cancellationToken": {
"type": [
"string",
"null"
]
},
"query": {
"type": "string"
},
"roots": {
"items": {
"type": "string"
},
"type": "array"
}
},
"required": [
"query",
"roots"
],
"title": "FuzzyFileSearchParams",
"type": "object"
}

View File

@@ -1,55 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"FuzzyFileSearchResult": {
"description": "Superset of [`codex_file_search::FileMatch`]",
"properties": {
"file_name": {
"type": "string"
},
"indices": {
"items": {
"format": "uint32",
"minimum": 0.0,
"type": "integer"
},
"type": [
"array",
"null"
]
},
"path": {
"type": "string"
},
"root": {
"type": "string"
},
"score": {
"format": "uint32",
"minimum": 0.0,
"type": "integer"
}
},
"required": [
"file_name",
"path",
"root",
"score"
],
"type": "object"
}
},
"properties": {
"files": {
"items": {
"$ref": "#/definitions/FuzzyFileSearchResult"
},
"type": "array"
}
},
"required": [
"files"
],
"title": "FuzzyFileSearchResponse",
"type": "object"
}

View File

@@ -1,48 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"JSONRPCErrorError": {
"properties": {
"code": {
"format": "int64",
"type": "integer"
},
"data": true,
"message": {
"type": "string"
}
},
"required": [
"code",
"message"
],
"type": "object"
},
"RequestId": {
"anyOf": [
{
"type": "string"
},
{
"format": "int64",
"type": "integer"
}
]
}
},
"description": "A response to a request that indicates an error occurred.",
"properties": {
"error": {
"$ref": "#/definitions/JSONRPCErrorError"
},
"id": {
"$ref": "#/definitions/RequestId"
}
},
"required": [
"error",
"id"
],
"title": "JSONRPCError",
"type": "object"
}

View File

@@ -1,19 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"code": {
"format": "int64",
"type": "integer"
},
"data": true,
"message": {
"type": "string"
}
},
"required": [
"code",
"message"
],
"title": "JSONRPCErrorError",
"type": "object"
}

View File

@@ -1,109 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"anyOf": [
{
"$ref": "#/definitions/JSONRPCRequest"
},
{
"$ref": "#/definitions/JSONRPCNotification"
},
{
"$ref": "#/definitions/JSONRPCResponse"
},
{
"$ref": "#/definitions/JSONRPCError"
}
],
"definitions": {
"JSONRPCError": {
"description": "A response to a request that indicates an error occurred.",
"properties": {
"error": {
"$ref": "#/definitions/JSONRPCErrorError"
},
"id": {
"$ref": "#/definitions/RequestId"
}
},
"required": [
"error",
"id"
],
"type": "object"
},
"JSONRPCErrorError": {
"properties": {
"code": {
"format": "int64",
"type": "integer"
},
"data": true,
"message": {
"type": "string"
}
},
"required": [
"code",
"message"
],
"type": "object"
},
"JSONRPCNotification": {
"description": "A notification which does not expect a response.",
"properties": {
"method": {
"type": "string"
},
"params": true
},
"required": [
"method"
],
"type": "object"
},
"JSONRPCRequest": {
"description": "A request that expects a response.",
"properties": {
"id": {
"$ref": "#/definitions/RequestId"
},
"method": {
"type": "string"
},
"params": true
},
"required": [
"id",
"method"
],
"type": "object"
},
"JSONRPCResponse": {
"description": "A successful (non-error) response to a request.",
"properties": {
"id": {
"$ref": "#/definitions/RequestId"
},
"result": true
},
"required": [
"id",
"result"
],
"type": "object"
},
"RequestId": {
"anyOf": [
{
"type": "string"
},
{
"format": "int64",
"type": "integer"
}
]
}
},
"description": "Refers to any valid JSON-RPC object that can be decoded off the wire, or encoded to be sent.",
"title": "JSONRPCMessage"
}

View File

@@ -1,15 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "A notification which does not expect a response.",
"properties": {
"method": {
"type": "string"
},
"params": true
},
"required": [
"method"
],
"title": "JSONRPCNotification",
"type": "object"
}

View File

@@ -1,32 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"RequestId": {
"anyOf": [
{
"type": "string"
},
{
"format": "int64",
"type": "integer"
}
]
}
},
"description": "A request that expects a response.",
"properties": {
"id": {
"$ref": "#/definitions/RequestId"
},
"method": {
"type": "string"
},
"params": true
},
"required": [
"id",
"method"
],
"title": "JSONRPCRequest",
"type": "object"
}

View File

@@ -1,29 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"RequestId": {
"anyOf": [
{
"type": "string"
},
{
"format": "int64",
"type": "integer"
}
]
}
},
"description": "A successful (non-error) response to a request.",
"properties": {
"id": {
"$ref": "#/definitions/RequestId"
},
"result": true
},
"required": [
"id",
"result"
],
"title": "JSONRPCResponse",
"type": "object"
}

View File

@@ -1,13 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"anyOf": [
{
"type": "string"
},
{
"format": "int64",
"type": "integer"
}
],
"title": "RequestId"
}

View File

@@ -1,792 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"ApplyPatchApprovalParams": {
"properties": {
"callId": {
"description": "Use to correlate this with [codex_core::protocol::PatchApplyBeginEvent] and [codex_core::protocol::PatchApplyEndEvent].",
"type": "string"
},
"conversationId": {
"$ref": "#/definitions/ThreadId"
},
"fileChanges": {
"additionalProperties": {
"$ref": "#/definitions/FileChange"
},
"type": "object"
},
"grantRoot": {
"description": "When set, the agent is asking the user to allow writes under this root for the remainder of the session (unclear if this is honored today).",
"type": [
"string",
"null"
]
},
"reason": {
"description": "Optional explanatory reason (e.g. request for extra write access).",
"type": [
"string",
"null"
]
}
},
"required": [
"callId",
"conversationId",
"fileChanges"
],
"type": "object"
},
"ChatgptAuthTokensRefreshParams": {
"properties": {
"previousAccountId": {
"description": "Workspace/account identifier that Codex was previously using.\n\nClients that manage multiple accounts/workspaces can use this as a hint to refresh the token for the correct workspace.\n\nThis may be `null` when the prior ID token did not include a workspace identifier (`chatgpt_account_id`) or when the token could not be parsed.",
"type": [
"string",
"null"
]
},
"reason": {
"$ref": "#/definitions/ChatgptAuthTokensRefreshReason"
}
},
"required": [
"reason"
],
"type": "object"
},
"ChatgptAuthTokensRefreshReason": {
"oneOf": [
{
"description": "Codex attempted a backend request and received `401 Unauthorized`.",
"enum": [
"unauthorized"
],
"type": "string"
}
]
},
"CommandAction": {
"oneOf": [
{
"properties": {
"command": {
"type": "string"
},
"name": {
"type": "string"
},
"path": {
"type": "string"
},
"type": {
"enum": [
"read"
],
"title": "ReadCommandActionType",
"type": "string"
}
},
"required": [
"command",
"name",
"path",
"type"
],
"title": "ReadCommandAction",
"type": "object"
},
{
"properties": {
"command": {
"type": "string"
},
"path": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"listFiles"
],
"title": "ListFilesCommandActionType",
"type": "string"
}
},
"required": [
"command",
"type"
],
"title": "ListFilesCommandAction",
"type": "object"
},
{
"properties": {
"command": {
"type": "string"
},
"path": {
"type": [
"string",
"null"
]
},
"query": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"search"
],
"title": "SearchCommandActionType",
"type": "string"
}
},
"required": [
"command",
"type"
],
"title": "SearchCommandAction",
"type": "object"
},
{
"properties": {
"command": {
"type": "string"
},
"type": {
"enum": [
"unknown"
],
"title": "UnknownCommandActionType",
"type": "string"
}
},
"required": [
"command",
"type"
],
"title": "UnknownCommandAction",
"type": "object"
}
]
},
"CommandExecutionRequestApprovalParams": {
"properties": {
"command": {
"description": "The command to be executed.",
"type": [
"string",
"null"
]
},
"commandActions": {
"description": "Best-effort parsed command actions for friendly display.",
"items": {
"$ref": "#/definitions/CommandAction"
},
"type": [
"array",
"null"
]
},
"cwd": {
"description": "The command's working directory.",
"type": [
"string",
"null"
]
},
"itemId": {
"type": "string"
},
"proposedExecpolicyAmendment": {
"description": "Optional proposed execpolicy amendment to allow similar commands without prompting.",
"items": {
"type": "string"
},
"type": [
"array",
"null"
]
},
"reason": {
"description": "Optional explanatory reason (e.g. request for network access).",
"type": [
"string",
"null"
]
},
"threadId": {
"type": "string"
},
"turnId": {
"type": "string"
}
},
"required": [
"itemId",
"threadId",
"turnId"
],
"type": "object"
},
"DynamicToolCallParams": {
"properties": {
"arguments": true,
"callId": {
"type": "string"
},
"threadId": {
"type": "string"
},
"tool": {
"type": "string"
},
"turnId": {
"type": "string"
}
},
"required": [
"arguments",
"callId",
"threadId",
"tool",
"turnId"
],
"type": "object"
},
"ExecCommandApprovalParams": {
"properties": {
"callId": {
"description": "Use to correlate this with [codex_core::protocol::ExecCommandBeginEvent] and [codex_core::protocol::ExecCommandEndEvent].",
"type": "string"
},
"command": {
"items": {
"type": "string"
},
"type": "array"
},
"conversationId": {
"$ref": "#/definitions/ThreadId"
},
"cwd": {
"type": "string"
},
"parsedCmd": {
"items": {
"$ref": "#/definitions/ParsedCommand"
},
"type": "array"
},
"reason": {
"type": [
"string",
"null"
]
}
},
"required": [
"callId",
"command",
"conversationId",
"cwd",
"parsedCmd"
],
"type": "object"
},
"FileChange": {
"oneOf": [
{
"properties": {
"content": {
"type": "string"
},
"type": {
"enum": [
"add"
],
"title": "AddFileChangeType",
"type": "string"
}
},
"required": [
"content",
"type"
],
"title": "AddFileChange",
"type": "object"
},
{
"properties": {
"content": {
"type": "string"
},
"type": {
"enum": [
"delete"
],
"title": "DeleteFileChangeType",
"type": "string"
}
},
"required": [
"content",
"type"
],
"title": "DeleteFileChange",
"type": "object"
},
{
"properties": {
"move_path": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"update"
],
"title": "UpdateFileChangeType",
"type": "string"
},
"unified_diff": {
"type": "string"
}
},
"required": [
"type",
"unified_diff"
],
"title": "UpdateFileChange",
"type": "object"
}
]
},
"FileChangeRequestApprovalParams": {
"properties": {
"grantRoot": {
"description": "[UNSTABLE] When set, the agent is asking the user to allow writes under this root for the remainder of the session (unclear if this is honored today).",
"type": [
"string",
"null"
]
},
"itemId": {
"type": "string"
},
"reason": {
"description": "Optional explanatory reason (e.g. request for extra write access).",
"type": [
"string",
"null"
]
},
"threadId": {
"type": "string"
},
"turnId": {
"type": "string"
}
},
"required": [
"itemId",
"threadId",
"turnId"
],
"type": "object"
},
"ParsedCommand": {
"oneOf": [
{
"properties": {
"cmd": {
"type": "string"
},
"name": {
"type": "string"
},
"path": {
"description": "(Best effort) Path to the file being read by the command. When possible, this is an absolute path, though when relative, it should be resolved against the `cwd`` that will be used to run the command to derive the absolute path.",
"type": "string"
},
"type": {
"enum": [
"read"
],
"title": "ReadParsedCommandType",
"type": "string"
}
},
"required": [
"cmd",
"name",
"path",
"type"
],
"title": "ReadParsedCommand",
"type": "object"
},
{
"properties": {
"cmd": {
"type": "string"
},
"path": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"list_files"
],
"title": "ListFilesParsedCommandType",
"type": "string"
}
},
"required": [
"cmd",
"type"
],
"title": "ListFilesParsedCommand",
"type": "object"
},
{
"properties": {
"cmd": {
"type": "string"
},
"path": {
"type": [
"string",
"null"
]
},
"query": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"search"
],
"title": "SearchParsedCommandType",
"type": "string"
}
},
"required": [
"cmd",
"type"
],
"title": "SearchParsedCommand",
"type": "object"
},
{
"properties": {
"cmd": {
"type": "string"
},
"type": {
"enum": [
"unknown"
],
"title": "UnknownParsedCommandType",
"type": "string"
}
},
"required": [
"cmd",
"type"
],
"title": "UnknownParsedCommand",
"type": "object"
}
]
},
"RequestId": {
"anyOf": [
{
"type": "string"
},
{
"format": "int64",
"type": "integer"
}
]
},
"ThreadId": {
"type": "string"
},
"ToolRequestUserInputOption": {
"description": "EXPERIMENTAL. Defines a single selectable option for request_user_input.",
"properties": {
"description": {
"type": "string"
},
"label": {
"type": "string"
}
},
"required": [
"description",
"label"
],
"type": "object"
},
"ToolRequestUserInputParams": {
"description": "EXPERIMENTAL. Params sent with a request_user_input event.",
"properties": {
"itemId": {
"type": "string"
},
"questions": {
"items": {
"$ref": "#/definitions/ToolRequestUserInputQuestion"
},
"type": "array"
},
"threadId": {
"type": "string"
},
"turnId": {
"type": "string"
}
},
"required": [
"itemId",
"questions",
"threadId",
"turnId"
],
"type": "object"
},
"ToolRequestUserInputQuestion": {
"description": "EXPERIMENTAL. Represents one request_user_input question and its required options.",
"properties": {
"header": {
"type": "string"
},
"id": {
"type": "string"
},
"isOther": {
"default": false,
"type": "boolean"
},
"isSecret": {
"default": false,
"type": "boolean"
},
"options": {
"items": {
"$ref": "#/definitions/ToolRequestUserInputOption"
},
"type": [
"array",
"null"
]
},
"question": {
"type": "string"
}
},
"required": [
"header",
"id",
"question"
],
"type": "object"
}
},
"description": "Request initiated from the server and sent to the client.",
"oneOf": [
{
"description": "NEW APIs Sent when approval is requested for a specific command execution. This request is used for Turns started via turn/start.",
"properties": {
"id": {
"$ref": "#/definitions/RequestId"
},
"method": {
"enum": [
"item/commandExecution/requestApproval"
],
"title": "Item/commandExecution/requestApprovalRequestMethod",
"type": "string"
},
"params": {
"$ref": "#/definitions/CommandExecutionRequestApprovalParams"
}
},
"required": [
"id",
"method",
"params"
],
"title": "Item/commandExecution/requestApprovalRequest",
"type": "object"
},
{
"description": "Sent when approval is requested for a specific file change. This request is used for Turns started via turn/start.",
"properties": {
"id": {
"$ref": "#/definitions/RequestId"
},
"method": {
"enum": [
"item/fileChange/requestApproval"
],
"title": "Item/fileChange/requestApprovalRequestMethod",
"type": "string"
},
"params": {
"$ref": "#/definitions/FileChangeRequestApprovalParams"
}
},
"required": [
"id",
"method",
"params"
],
"title": "Item/fileChange/requestApprovalRequest",
"type": "object"
},
{
"description": "EXPERIMENTAL - Request input from the user for a tool call.",
"properties": {
"id": {
"$ref": "#/definitions/RequestId"
},
"method": {
"enum": [
"item/tool/requestUserInput"
],
"title": "Item/tool/requestUserInputRequestMethod",
"type": "string"
},
"params": {
"$ref": "#/definitions/ToolRequestUserInputParams"
}
},
"required": [
"id",
"method",
"params"
],
"title": "Item/tool/requestUserInputRequest",
"type": "object"
},
{
"description": "Execute a dynamic tool call on the client.",
"properties": {
"id": {
"$ref": "#/definitions/RequestId"
},
"method": {
"enum": [
"item/tool/call"
],
"title": "Item/tool/callRequestMethod",
"type": "string"
},
"params": {
"$ref": "#/definitions/DynamicToolCallParams"
}
},
"required": [
"id",
"method",
"params"
],
"title": "Item/tool/callRequest",
"type": "object"
},
{
"properties": {
"id": {
"$ref": "#/definitions/RequestId"
},
"method": {
"enum": [
"account/chatgptAuthTokens/refresh"
],
"title": "Account/chatgptAuthTokens/refreshRequestMethod",
"type": "string"
},
"params": {
"$ref": "#/definitions/ChatgptAuthTokensRefreshParams"
}
},
"required": [
"id",
"method",
"params"
],
"title": "Account/chatgptAuthTokens/refreshRequest",
"type": "object"
},
{
"description": "DEPRECATED APIs below Request to approve a patch. This request is used for Turns started via the legacy APIs (i.e. SendUserTurn, SendUserMessage).",
"properties": {
"id": {
"$ref": "#/definitions/RequestId"
},
"method": {
"enum": [
"applyPatchApproval"
],
"title": "ApplyPatchApprovalRequestMethod",
"type": "string"
},
"params": {
"$ref": "#/definitions/ApplyPatchApprovalParams"
}
},
"required": [
"id",
"method",
"params"
],
"title": "ApplyPatchApprovalRequest",
"type": "object"
},
{
"description": "Request to exec a command. This request is used for Turns started via the legacy APIs (i.e. SendUserTurn, SendUserMessage).",
"properties": {
"id": {
"$ref": "#/definitions/RequestId"
},
"method": {
"enum": [
"execCommandApproval"
],
"title": "ExecCommandApprovalRequestMethod",
"type": "string"
},
"params": {
"$ref": "#/definitions/ExecCommandApprovalParams"
}
},
"required": [
"id",
"method",
"params"
],
"title": "ExecCommandApprovalRequest",
"type": "object"
}
],
"title": "ServerRequest"
}

View File

@@ -1,84 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"ToolRequestUserInputOption": {
"description": "EXPERIMENTAL. Defines a single selectable option for request_user_input.",
"properties": {
"description": {
"type": "string"
},
"label": {
"type": "string"
}
},
"required": [
"description",
"label"
],
"type": "object"
},
"ToolRequestUserInputQuestion": {
"description": "EXPERIMENTAL. Represents one request_user_input question and its required options.",
"properties": {
"header": {
"type": "string"
},
"id": {
"type": "string"
},
"isOther": {
"default": false,
"type": "boolean"
},
"isSecret": {
"default": false,
"type": "boolean"
},
"options": {
"items": {
"$ref": "#/definitions/ToolRequestUserInputOption"
},
"type": [
"array",
"null"
]
},
"question": {
"type": "string"
}
},
"required": [
"header",
"id",
"question"
],
"type": "object"
}
},
"description": "EXPERIMENTAL. Params sent with a request_user_input event.",
"properties": {
"itemId": {
"type": "string"
},
"questions": {
"items": {
"$ref": "#/definitions/ToolRequestUserInputQuestion"
},
"type": "array"
},
"threadId": {
"type": "string"
},
"turnId": {
"type": "string"
}
},
"required": [
"itemId",
"questions",
"threadId",
"turnId"
],
"title": "ToolRequestUserInputParams",
"type": "object"
}

View File

@@ -1,34 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"ToolRequestUserInputAnswer": {
"description": "EXPERIMENTAL. Captures a user's answer to a request_user_input question.",
"properties": {
"answers": {
"items": {
"type": "string"
},
"type": "array"
}
},
"required": [
"answers"
],
"type": "object"
}
},
"description": "EXPERIMENTAL. Response payload mapping question ids to answers.",
"properties": {
"answers": {
"additionalProperties": {
"$ref": "#/definitions/ToolRequestUserInputAnswer"
},
"type": "object"
}
},
"required": [
"answers"
],
"title": "ToolRequestUserInputResponse",
"type": "object"
}

View File

@@ -1,22 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"ThreadId": {
"type": "string"
}
},
"properties": {
"conversationId": {
"$ref": "#/definitions/ThreadId"
},
"experimentalRawEvents": {
"default": false,
"type": "boolean"
}
},
"required": [
"conversationId"
],
"title": "AddConversationListenerParams",
"type": "object"
}

View File

@@ -1,13 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"subscriptionId": {
"type": "string"
}
},
"required": [
"subscriptionId"
],
"title": "AddConversationSubscriptionResponse",
"type": "object"
}

View File

@@ -1,22 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"ThreadId": {
"type": "string"
}
},
"properties": {
"conversationId": {
"$ref": "#/definitions/ThreadId"
},
"rolloutPath": {
"type": "string"
}
},
"required": [
"conversationId",
"rolloutPath"
],
"title": "ArchiveConversationParams",
"type": "object"
}

View File

@@ -1,5 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "ArchiveConversationResponse",
"type": "object"
}

View File

@@ -1,46 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"AuthMode": {
"description": "Authentication mode for OpenAI-backed providers.",
"oneOf": [
{
"description": "OpenAI API key provided by the caller and stored by Codex.",
"enum": [
"apikey"
],
"type": "string"
},
{
"description": "ChatGPT OAuth managed by Codex (tokens persisted and refreshed by Codex).",
"enum": [
"chatgpt"
],
"type": "string"
},
{
"description": "[UNSTABLE] FOR OPENAI INTERNAL USE ONLY - DO NOT USE.\n\nChatGPT auth tokens are supplied by an external host app and are only stored in memory. Token refresh must be handled by the external host app.",
"enum": [
"chatgptAuthTokens"
],
"type": "string"
}
]
}
},
"description": "Deprecated notification. Use AccountUpdatedNotification instead.",
"properties": {
"authMethod": {
"anyOf": [
{
"$ref": "#/definitions/AuthMode"
},
{
"type": "null"
}
]
}
},
"title": "AuthStatusChangeNotification",
"type": "object"
}

View File

@@ -1,13 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"loginId": {
"type": "string"
}
},
"required": [
"loginId"
],
"title": "CancelLoginChatGptParams",
"type": "object"
}

View File

@@ -1,5 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "CancelLoginChatGptResponse",
"type": "object"
}

View File

@@ -1,158 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"AbsolutePathBuf": {
"description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.",
"type": "string"
},
"NetworkAccess": {
"description": "Represents whether outbound network access is available to the agent.",
"enum": [
"restricted",
"enabled"
],
"type": "string"
},
"SandboxPolicy": {
"description": "Determines execution restrictions for model shell commands.",
"oneOf": [
{
"description": "No restrictions whatsoever. Use with caution.",
"properties": {
"type": {
"enum": [
"danger-full-access"
],
"title": "DangerFullAccessSandboxPolicyType",
"type": "string"
}
},
"required": [
"type"
],
"title": "DangerFullAccessSandboxPolicy",
"type": "object"
},
{
"description": "Read-only access to the entire file-system.",
"properties": {
"type": {
"enum": [
"read-only"
],
"title": "ReadOnlySandboxPolicyType",
"type": "string"
}
},
"required": [
"type"
],
"title": "ReadOnlySandboxPolicy",
"type": "object"
},
{
"description": "Indicates the process is already in an external sandbox. Allows full disk access while honoring the provided network setting.",
"properties": {
"network_access": {
"allOf": [
{
"$ref": "#/definitions/NetworkAccess"
}
],
"default": "restricted",
"description": "Whether the external sandbox permits outbound network traffic."
},
"type": {
"enum": [
"external-sandbox"
],
"title": "ExternalSandboxSandboxPolicyType",
"type": "string"
}
},
"required": [
"type"
],
"title": "ExternalSandboxSandboxPolicy",
"type": "object"
},
{
"description": "Same as `ReadOnly` but additionally grants write access to the current working directory (\"workspace\").",
"properties": {
"exclude_slash_tmp": {
"default": false,
"description": "When set to `true`, will NOT include the `/tmp` among the default writable roots on UNIX. Defaults to `false`.",
"type": "boolean"
},
"exclude_tmpdir_env_var": {
"default": false,
"description": "When set to `true`, will NOT include the per-user `TMPDIR` environment variable among the default writable roots. Defaults to `false`.",
"type": "boolean"
},
"network_access": {
"default": false,
"description": "When set to `true`, outbound network access is allowed. `false` by default.",
"type": "boolean"
},
"type": {
"enum": [
"workspace-write"
],
"title": "WorkspaceWriteSandboxPolicyType",
"type": "string"
},
"writable_roots": {
"description": "Additional folders (beyond cwd and possibly TMPDIR) that should be writable from within the sandbox.",
"items": {
"$ref": "#/definitions/AbsolutePathBuf"
},
"type": "array"
}
},
"required": [
"type"
],
"title": "WorkspaceWriteSandboxPolicy",
"type": "object"
}
]
}
},
"properties": {
"command": {
"items": {
"type": "string"
},
"type": "array"
},
"cwd": {
"type": [
"string",
"null"
]
},
"sandboxPolicy": {
"anyOf": [
{
"$ref": "#/definitions/SandboxPolicy"
},
{
"type": "null"
}
]
},
"timeoutMs": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"command"
],
"title": "ExecOneOffCommandParams",
"type": "object"
}

View File

@@ -1,22 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"exitCode": {
"format": "int32",
"type": "integer"
},
"stderr": {
"type": "string"
},
"stdout": {
"type": "string"
}
},
"required": [
"exitCode",
"stderr",
"stdout"
],
"title": "ExecOneOffCommandResponse",
"type": "object"
}

View File

@@ -1,159 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"AskForApproval": {
"description": "Determines the conditions under which the user is consulted to approve running the command proposed by Codex.",
"oneOf": [
{
"description": "Under this policy, only \"known safe\" commands—as determined by `is_safe_command()`—that **only read files** are autoapproved. Everything else will ask the user to approve.",
"enum": [
"untrusted"
],
"type": "string"
},
{
"description": "*All* commands are autoapproved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
"enum": [
"on-failure"
],
"type": "string"
},
{
"description": "The model decides when to ask the user for approval.",
"enum": [
"on-request"
],
"type": "string"
},
{
"description": "Never ask the user to approve commands. Failures are immediately returned to the model, and never escalated to the user for approval.",
"enum": [
"never"
],
"type": "string"
}
]
},
"NewConversationParams": {
"properties": {
"approvalPolicy": {
"anyOf": [
{
"$ref": "#/definitions/AskForApproval"
},
{
"type": "null"
}
]
},
"baseInstructions": {
"type": [
"string",
"null"
]
},
"compactPrompt": {
"type": [
"string",
"null"
]
},
"config": {
"additionalProperties": true,
"type": [
"object",
"null"
]
},
"cwd": {
"type": [
"string",
"null"
]
},
"developerInstructions": {
"type": [
"string",
"null"
]
},
"includeApplyPatchTool": {
"type": [
"boolean",
"null"
]
},
"model": {
"type": [
"string",
"null"
]
},
"modelProvider": {
"type": [
"string",
"null"
]
},
"profile": {
"type": [
"string",
"null"
]
},
"sandbox": {
"anyOf": [
{
"$ref": "#/definitions/SandboxMode"
},
{
"type": "null"
}
]
}
},
"type": "object"
},
"SandboxMode": {
"enum": [
"read-only",
"workspace-write",
"danger-full-access"
],
"type": "string"
},
"ThreadId": {
"type": "string"
}
},
"properties": {
"conversationId": {
"anyOf": [
{
"$ref": "#/definitions/ThreadId"
},
{
"type": "null"
}
]
},
"overrides": {
"anyOf": [
{
"$ref": "#/definitions/NewConversationParams"
},
{
"type": "null"
}
]
},
"path": {
"type": [
"string",
"null"
]
}
},
"title": "ForkConversationParams",
"type": "object"
}

View File

@@ -1,19 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"includeToken": {
"type": [
"boolean",
"null"
]
},
"refreshToken": {
"type": [
"boolean",
"null"
]
}
},
"title": "GetAuthStatusParams",
"type": "object"
}

View File

@@ -1,57 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"AuthMode": {
"description": "Authentication mode for OpenAI-backed providers.",
"oneOf": [
{
"description": "OpenAI API key provided by the caller and stored by Codex.",
"enum": [
"apikey"
],
"type": "string"
},
{
"description": "ChatGPT OAuth managed by Codex (tokens persisted and refreshed by Codex).",
"enum": [
"chatgpt"
],
"type": "string"
},
{
"description": "[UNSTABLE] FOR OPENAI INTERNAL USE ONLY - DO NOT USE.\n\nChatGPT auth tokens are supplied by an external host app and are only stored in memory. Token refresh must be handled by the external host app.",
"enum": [
"chatgptAuthTokens"
],
"type": "string"
}
]
}
},
"properties": {
"authMethod": {
"anyOf": [
{
"$ref": "#/definitions/AuthMode"
},
{
"type": "null"
}
]
},
"authToken": {
"type": [
"string",
"null"
]
},
"requiresOpenaiAuth": {
"type": [
"boolean",
"null"
]
}
},
"title": "GetAuthStatusResponse",
"type": "object"
}

View File

@@ -1,35 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"anyOf": [
{
"properties": {
"rolloutPath": {
"type": "string"
}
},
"required": [
"rolloutPath"
],
"title": "RolloutPathv1::GetConversationSummaryParams",
"type": "object"
},
{
"properties": {
"conversationId": {
"$ref": "#/definitions/ThreadId"
}
},
"required": [
"conversationId"
],
"title": "ConversationIdv1::GetConversationSummaryParams",
"type": "object"
}
],
"definitions": {
"ThreadId": {
"type": "string"
}
},
"title": "GetConversationSummaryParams"
}

View File

@@ -1,175 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"ConversationGitInfo": {
"properties": {
"branch": {
"type": [
"string",
"null"
]
},
"origin_url": {
"type": [
"string",
"null"
]
},
"sha": {
"type": [
"string",
"null"
]
}
},
"type": "object"
},
"ConversationSummary": {
"properties": {
"cliVersion": {
"type": "string"
},
"conversationId": {
"$ref": "#/definitions/ThreadId"
},
"cwd": {
"type": "string"
},
"gitInfo": {
"anyOf": [
{
"$ref": "#/definitions/ConversationGitInfo"
},
{
"type": "null"
}
]
},
"modelProvider": {
"type": "string"
},
"path": {
"type": "string"
},
"preview": {
"type": "string"
},
"source": {
"$ref": "#/definitions/SessionSource"
},
"timestamp": {
"type": [
"string",
"null"
]
},
"updatedAt": {
"type": [
"string",
"null"
]
}
},
"required": [
"cliVersion",
"conversationId",
"cwd",
"modelProvider",
"path",
"preview",
"source"
],
"type": "object"
},
"SessionSource": {
"oneOf": [
{
"enum": [
"cli",
"vscode",
"exec",
"mcp",
"unknown"
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"subagent": {
"$ref": "#/definitions/SubAgentSource"
}
},
"required": [
"subagent"
],
"title": "SubagentSessionSource",
"type": "object"
}
]
},
"SubAgentSource": {
"oneOf": [
{
"enum": [
"review",
"compact"
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"thread_spawn": {
"properties": {
"depth": {
"format": "int32",
"type": "integer"
},
"parent_thread_id": {
"$ref": "#/definitions/ThreadId"
}
},
"required": [
"depth",
"parent_thread_id"
],
"type": "object"
}
},
"required": [
"thread_spawn"
],
"title": "ThreadSpawnSubAgentSource",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
"other": {
"type": "string"
}
},
"required": [
"other"
],
"title": "OtherSubAgentSource",
"type": "object"
}
]
},
"ThreadId": {
"type": "string"
}
},
"properties": {
"summary": {
"$ref": "#/definitions/ConversationSummary"
}
},
"required": [
"summary"
],
"title": "GetConversationSummaryResponse",
"type": "object"
}

View File

@@ -1,13 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"userAgent": {
"type": "string"
}
},
"required": [
"userAgent"
],
"title": "GetUserAgentResponse",
"type": "object"
}

View File

@@ -1,330 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"AbsolutePathBuf": {
"description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.",
"type": "string"
},
"AskForApproval": {
"description": "Determines the conditions under which the user is consulted to approve running the command proposed by Codex.",
"oneOf": [
{
"description": "Under this policy, only \"known safe\" commands—as determined by `is_safe_command()`—that **only read files** are autoapproved. Everything else will ask the user to approve.",
"enum": [
"untrusted"
],
"type": "string"
},
{
"description": "*All* commands are autoapproved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
"enum": [
"on-failure"
],
"type": "string"
},
{
"description": "The model decides when to ask the user for approval.",
"enum": [
"on-request"
],
"type": "string"
},
{
"description": "Never ask the user to approve commands. Failures are immediately returned to the model, and never escalated to the user for approval.",
"enum": [
"never"
],
"type": "string"
}
]
},
"ForcedLoginMethod": {
"enum": [
"chatgpt",
"api"
],
"type": "string"
},
"Profile": {
"properties": {
"approvalPolicy": {
"anyOf": [
{
"$ref": "#/definitions/AskForApproval"
},
{
"type": "null"
}
]
},
"chatgptBaseUrl": {
"type": [
"string",
"null"
]
},
"model": {
"type": [
"string",
"null"
]
},
"modelProvider": {
"type": [
"string",
"null"
]
},
"modelReasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
]
},
"modelReasoningSummary": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningSummary"
},
{
"type": "null"
}
]
},
"modelVerbosity": {
"anyOf": [
{
"$ref": "#/definitions/Verbosity"
},
{
"type": "null"
}
]
}
},
"type": "object"
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"ReasoningSummary": {
"description": "A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries",
"oneOf": [
{
"enum": [
"auto",
"concise",
"detailed"
],
"type": "string"
},
{
"description": "Option to disable reasoning summaries.",
"enum": [
"none"
],
"type": "string"
}
]
},
"SandboxMode": {
"enum": [
"read-only",
"workspace-write",
"danger-full-access"
],
"type": "string"
},
"SandboxSettings": {
"properties": {
"excludeSlashTmp": {
"type": [
"boolean",
"null"
]
},
"excludeTmpdirEnvVar": {
"type": [
"boolean",
"null"
]
},
"networkAccess": {
"type": [
"boolean",
"null"
]
},
"writableRoots": {
"default": [],
"items": {
"$ref": "#/definitions/AbsolutePathBuf"
},
"type": "array"
}
},
"type": "object"
},
"Tools": {
"properties": {
"viewImage": {
"type": [
"boolean",
"null"
]
},
"webSearch": {
"type": [
"boolean",
"null"
]
}
},
"type": "object"
},
"UserSavedConfig": {
"properties": {
"approvalPolicy": {
"anyOf": [
{
"$ref": "#/definitions/AskForApproval"
},
{
"type": "null"
}
]
},
"forcedChatgptWorkspaceId": {
"type": [
"string",
"null"
]
},
"forcedLoginMethod": {
"anyOf": [
{
"$ref": "#/definitions/ForcedLoginMethod"
},
{
"type": "null"
}
]
},
"model": {
"type": [
"string",
"null"
]
},
"modelReasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
]
},
"modelReasoningSummary": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningSummary"
},
{
"type": "null"
}
]
},
"modelVerbosity": {
"anyOf": [
{
"$ref": "#/definitions/Verbosity"
},
{
"type": "null"
}
]
},
"profile": {
"type": [
"string",
"null"
]
},
"profiles": {
"additionalProperties": {
"$ref": "#/definitions/Profile"
},
"type": "object"
},
"sandboxMode": {
"anyOf": [
{
"$ref": "#/definitions/SandboxMode"
},
{
"type": "null"
}
]
},
"sandboxSettings": {
"anyOf": [
{
"$ref": "#/definitions/SandboxSettings"
},
{
"type": "null"
}
]
},
"tools": {
"anyOf": [
{
"$ref": "#/definitions/Tools"
},
{
"type": "null"
}
]
}
},
"required": [
"profiles"
],
"type": "object"
},
"Verbosity": {
"description": "Controls output length/detail on GPT-5 models via the Responses API. Serialized with lowercase values to match the OpenAI API.",
"enum": [
"low",
"medium",
"high"
],
"type": "string"
}
},
"properties": {
"config": {
"$ref": "#/definitions/UserSavedConfig"
}
},
"required": [
"config"
],
"title": "GetUserSavedConfigResponse",
"type": "object"
}

View File

@@ -1,13 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"cwd": {
"type": "string"
}
},
"required": [
"cwd"
],
"title": "GitDiffToRemoteParams",
"type": "object"
}

View File

@@ -1,22 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"GitSha": {
"type": "string"
}
},
"properties": {
"diff": {
"type": "string"
},
"sha": {
"$ref": "#/definitions/GitSha"
}
},
"required": [
"diff",
"sha"
],
"title": "GitDiffToRemoteResponse",
"type": "object"
}

View File

@@ -1,57 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"ClientInfo": {
"properties": {
"name": {
"type": "string"
},
"title": {
"type": [
"string",
"null"
]
},
"version": {
"type": "string"
}
},
"required": [
"name",
"version"
],
"type": "object"
},
"InitializeCapabilities": {
"description": "Client-declared capabilities negotiated during initialize.",
"properties": {
"experimentalApi": {
"default": false,
"description": "Opt into receiving experimental API methods and fields.",
"type": "boolean"
}
},
"type": "object"
}
},
"properties": {
"capabilities": {
"anyOf": [
{
"$ref": "#/definitions/InitializeCapabilities"
},
{
"type": "null"
}
]
},
"clientInfo": {
"$ref": "#/definitions/ClientInfo"
}
},
"required": [
"clientInfo"
],
"title": "InitializeParams",
"type": "object"
}

View File

@@ -1,13 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"userAgent": {
"type": "string"
}
},
"required": [
"userAgent"
],
"title": "InitializeResponse",
"type": "object"
}

View File

@@ -1,18 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"ThreadId": {
"type": "string"
}
},
"properties": {
"conversationId": {
"$ref": "#/definitions/ThreadId"
}
},
"required": [
"conversationId"
],
"title": "InterruptConversationParams",
"type": "object"
}

View File

@@ -1,23 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"TurnAbortReason": {
"enum": [
"interrupted",
"replaced",
"review_ended"
],
"type": "string"
}
},
"properties": {
"abortReason": {
"$ref": "#/definitions/TurnAbortReason"
}
},
"required": [
"abortReason"
],
"title": "InterruptConversationResponse",
"type": "object"
}

Some files were not shown because too many files have changed in this diff Show More