Compare commits

..

1 Commits

Author SHA1 Message Date
Ahmed Ibrahim
8aa5e7770c fmt + clippy: codex-core deterministic shell tool tests, conflict cleanup 2025-09-10 23:42:57 -07:00
1320 changed files with 52751 additions and 165593 deletions

View File

@@ -1,6 +1,6 @@
[codespell]
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl
check-hidden = true
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
ignore-words-list = ratatui,ser

View File

@@ -20,14 +20,6 @@ body:
attributes:
label: What version of Codex is running?
description: Copy the output of `codex --version`
validations:
required: true
- type: input
id: plan
attributes:
label: What subscription do you have?
validations:
required: true
- type: input
id: model
attributes:
@@ -40,18 +32,11 @@ body:
description: |
For MacOS and Linux: copy the output of `uname -mprs`
For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console
- type: textarea
id: actual
attributes:
label: What issue are you seeing?
description: Please include the full error messages and prompts with PII redacted. If possible, please provide text instead of a screenshot.
validations:
required: true
- type: textarea
id: steps
attributes:
label: What steps can reproduce the bug?
description: Explain the bug and provide a code snippet that can reproduce it. Please include session id, token limit usage, context window usage if applicable.
description: Explain the bug and provide a code snippet that can reproduce it.
validations:
required: true
- type: textarea
@@ -59,6 +44,11 @@ body:
attributes:
label: What is the expected behavior?
description: If possible, please provide text instead of a screenshot.
- type: textarea
id: actual
attributes:
label: What do you see instead?
description: If possible, please provide text instead of a screenshot.
- type: textarea
id: notes
attributes:

View File

@@ -2,6 +2,7 @@ name: 🎁 Feature Request
description: Propose a new feature for Codex
labels:
- enhancement
- needs triage
body:
- type: markdown
attributes:
@@ -18,6 +19,11 @@ body:
label: What feature would you like to see?
validations:
required: true
- type: textarea
id: author
attributes:
label: Are you interested in implementing this feature?
description: Please wait for acknowledgement before implementing or opening a PR.
- type: textarea
id: notes
attributes:

View File

@@ -14,21 +14,11 @@ body:
id: version
attributes:
label: What version of the VS Code extension are you using?
validations:
required: true
- type: input
id: plan
attributes:
label: What subscription do you have?
validations:
required: true
- type: input
id: ide
attributes:
label: Which IDE are you using?
description: Like `VS Code`, `Cursor`, `Windsurf`, etc.
validations:
required: true
- type: input
id: platform
attributes:
@@ -36,18 +26,11 @@ body:
description: |
For MacOS and Linux: copy the output of `uname -mprs`
For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console
- type: textarea
id: actual
attributes:
label: What issue are you seeing?
description: Please include the full error messages and prompts with PII redacted. If possible, please provide text instead of a screenshot.
validations:
required: true
- type: textarea
id: steps
attributes:
label: What steps can reproduce the bug?
description: Explain the bug and provide a code snippet that can reproduce it. Please include session id, token limit usage, context window usage if applicable.
description: Explain the bug and provide a code snippet that can reproduce it.
validations:
required: true
- type: textarea
@@ -55,6 +38,11 @@ body:
attributes:
label: What is the expected behavior?
description: If possible, please provide text instead of a screenshot.
- type: textarea
id: actual
attributes:
label: What do you see instead?
description: If possible, please provide text instead of a screenshot.
- type: textarea
id: notes
attributes:

View File

@@ -1,3 +1,3 @@
model = "gpt-5.1"
model = "gpt-5"
# Consider setting [mcp_servers] here!

View File

@@ -27,34 +27,6 @@
"path": "codex.exe"
}
}
},
"codex-responses-api-proxy": {
"platforms": {
"macos-aarch64": {
"regex": "^codex-responses-api-proxy-aarch64-apple-darwin\\.zst$",
"path": "codex-responses-api-proxy"
},
"macos-x86_64": {
"regex": "^codex-responses-api-proxy-x86_64-apple-darwin\\.zst$",
"path": "codex-responses-api-proxy"
},
"linux-x86_64": {
"regex": "^codex-responses-api-proxy-x86_64-unknown-linux-musl\\.zst$",
"path": "codex-responses-api-proxy"
},
"linux-aarch64": {
"regex": "^codex-responses-api-proxy-aarch64-unknown-linux-musl\\.zst$",
"path": "codex-responses-api-proxy"
},
"windows-x86_64": {
"regex": "^codex-responses-api-proxy-x86_64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex-responses-api-proxy.exe"
},
"windows-aarch64": {
"regex": "^codex-responses-api-proxy-aarch64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex-responses-api-proxy.exe"
}
}
}
}
}

View File

@@ -1,18 +0,0 @@
You are an assistant that triages new GitHub issues by identifying potential duplicates.
You will receive the following JSON files located in the current working directory:
- `codex-current-issue.json`: JSON object describing the newly created issue (fields: number, title, body).
- `codex-existing-issues.json`: JSON array of recent issues (each element includes number, title, body, createdAt).
Instructions:
- Load both files as JSON and review their contents carefully. The codex-existing-issues.json file is large, ensure you explore all of it.
- Compare the current issue against the existing issues to find up to five that appear to describe the same underlying problem or request.
- Only consider an issue a potential duplicate if there is a clear overlap in symptoms, feature requests, reproduction steps, or error messages.
- Prioritize newer issues when similarity is comparable.
- Ignore pull requests and issues whose similarity is tenuous.
- When unsure, prefer returning fewer matches.
Output requirements:
- Respond with a JSON array of issue numbers (integers), ordered from most likely duplicate to least.
- Include at most five numbers.
- If you find no plausible duplicates, respond with `[]`.

View File

@@ -1,26 +0,0 @@
You are an assistant that reviews GitHub issues for the repository.
Your job is to choose the most appropriate existing labels for the issue described later in this prompt.
Follow these rules:
- Only pick labels out of the list below.
- Prefer a small set of precise labels over many broad ones.
- If none of the labels fit, respond with an empty JSON array: []
- Output must be a JSON array of label names (strings) with no additional commentary.
Labels to apply:
1. bug — Reproducible defects in Codex products (CLI, VS Code extension, web, auth).
2. enhancement — Feature requests or usability improvements that ask for new capabilities, better ergonomics, or quality-of-life tweaks.
3. extension — VS Code (or other IDE) extension-specific issues.
4. windows-os — Bugs or friction specific to Windows environments (PowerShell behavior, path handling, copy/paste, OS-specific auth or tooling failures).
5. mcp — Topics involving Model Context Protocol servers/clients.
6. codex-web — Issues targeting the Codex web UI/Cloud experience.
8. azure — Problems or requests tied to Azure OpenAI deployments.
9. documentation — Updates or corrections needed in docs/README/config references (broken links, missing examples, outdated keys, clarification requests).
10. model-behavior — Undesirable LLM behavior: forgetting goals, refusing work, hallucinating environment details, quota misreports, or other reasoning/performance anomalies.
Issue information is available in environment variables:
ISSUE_NUMBER
ISSUE_TITLE
ISSUE_BODY
REPO_FULL_NAME

View File

@@ -4,5 +4,3 @@ Before opening this Pull Request, please read the dedicated "Contributing" markd
https://github.com/openai/codex/blob/main/docs/contributing.md
If your PR conforms to our contribution guidelines, replace this text with a detailed and high quality description of your changes.
Include a link to a bug report or enhancement request.

View File

@@ -1,26 +0,0 @@
name: cargo-deny
on:
pull_request:
push:
branches:
- main
jobs:
cargo-deny:
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./codex-rs
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Run cargo-deny
uses: EmbarkStudios/cargo-deny-action@v1
with:
rust-version: stable
manifest-path: ./codex-rs/Cargo.toml

View File

@@ -1,7 +1,7 @@
name: ci
on:
pull_request: {}
pull_request: { branches: [main] }
push: { branches: [main] }
jobs:
@@ -12,7 +12,7 @@ jobs:
NODE_OPTIONS: --max-old-space-size=4096
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Setup pnpm
uses: pnpm/action-setup@v4
@@ -27,29 +27,12 @@ jobs:
- name: Install dependencies
run: pnpm install --frozen-lockfile
# stage_npm_packages.py requires DotSlash when staging releases.
- uses: facebook/install-dotslash@v2
# Run all tasks using workspace filters
- name: Stage npm package
id: stage_npm_package
- name: Ensure staging a release works.
env:
GH_TOKEN: ${{ github.token }}
run: |
set -euo pipefail
CODEX_VERSION=0.40.0
OUTPUT_DIR="${RUNNER_TEMP}"
python3 ./scripts/stage_npm_packages.py \
--release-version "$CODEX_VERSION" \
--package codex \
--output-dir "$OUTPUT_DIR"
PACK_OUTPUT="${OUTPUT_DIR}/codex-npm-${CODEX_VERSION}.tgz"
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"
- name: Upload staged npm package artifact
uses: actions/upload-artifact@v5
with:
name: codex-npm-staging
path: ${{ steps.stage_npm_package.outputs.pack_output }}
run: ./codex-cli/scripts/stage_release.sh
- name: Ensure root README.md contains only ASCII and certain Unicode code points
run: ./scripts/asciicheck.py README.md
@@ -60,6 +43,3 @@ jobs:
run: ./scripts/asciicheck.py codex-cli/README.md
- name: Check codex-cli/README ToC
run: python3 scripts/readme_toc.py codex-cli/README.md
- name: Prettier (run `pnpm run format:fix` to fix)
run: pnpm run format

View File

@@ -13,37 +13,17 @@ permissions:
jobs:
cla:
# Only run the CLA assistant for the canonical openai repo so forks are not blocked
# and contributors who signed previously do not receive duplicate CLA notifications.
if: ${{ github.repository_owner == 'openai' }}
runs-on: ubuntu-latest
steps:
- uses: contributor-assistant/github-action@v2.6.1
# Run on close only if the PR was merged. This will lock the PR to preserve
# the CLA agreement. We don't want to lock PRs that have been closed without
# merging because the contributor may want to respond with additional comments.
# This action has a "lock-pullrequest-aftermerge" option that can be set to false,
# but that would unconditionally skip locking even in cases where the PR was merged.
if: |
(
github.event_name == 'pull_request_target' &&
(
github.event.action == 'opened' ||
github.event.action == 'synchronize' ||
(github.event.action == 'closed' && github.event.pull_request.merged == true)
)
) ||
(
github.event_name == 'issue_comment' &&
(
github.event.comment.body == 'recheck' ||
github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA'
)
)
github.event_name == 'pull_request_target' ||
github.event.comment.body == 'recheck' ||
github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
path-to-document: https://github.com/openai/codex/blob/main/docs/CLA.md
path-to-signatures: signatures/cla.json
branch: cla-signatures
allowlist: codex,dependabot,dependabot[bot],github-actions[bot]
allowlist: dependabot[bot]

View File

@@ -1,105 +0,0 @@
name: Close stale contributor PRs
on:
workflow_dispatch:
schedule:
- cron: "0 6 * * *"
permissions:
contents: read
issues: write
pull-requests: write
jobs:
close-stale-contributor-prs:
runs-on: ubuntu-latest
steps:
- name: Close inactive PRs from contributors
uses: actions/github-script@v8
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const DAYS_INACTIVE = 14;
const cutoff = new Date(Date.now() - DAYS_INACTIVE * 24 * 60 * 60 * 1000);
const { owner, repo } = context.repo;
const dryRun = false;
const stalePrs = [];
core.info(`Dry run mode: ${dryRun}`);
const prs = await github.paginate(github.rest.pulls.list, {
owner,
repo,
state: "open",
per_page: 100,
sort: "updated",
direction: "asc",
});
for (const pr of prs) {
const lastUpdated = new Date(pr.updated_at);
if (lastUpdated > cutoff) {
core.info(`PR ${pr.number} is fresh`);
continue;
}
if (!pr.user || pr.user.type !== "User") {
core.info(`PR ${pr.number} wasn't created by a user`);
continue;
}
let permission;
try {
const permissionResponse = await github.rest.repos.getCollaboratorPermissionLevel({
owner,
repo,
username: pr.user.login,
});
permission = permissionResponse.data.permission;
} catch (error) {
if (error.status === 404) {
core.info(`Author ${pr.user.login} is not a collaborator; skipping #${pr.number}`);
continue;
}
throw error;
}
const hasContributorAccess = ["admin", "maintain", "write"].includes(permission);
if (!hasContributorAccess) {
core.info(`Author ${pr.user.login} has ${permission} access; skipping #${pr.number}`);
continue;
}
stalePrs.push(pr);
}
if (!stalePrs.length) {
core.info("No stale contributor pull requests found.");
return;
}
for (const pr of stalePrs) {
const issue_number = pr.number;
const closeComment = `Closing this pull request because it has had no updates for more than ${DAYS_INACTIVE} days. If you plan to continue working on it, feel free to reopen or open a new PR.`;
if (dryRun) {
core.info(`[dry-run] Would close contributor PR #${issue_number} from ${pr.user.login}`);
continue;
}
await github.rest.issues.createComment({
owner,
repo,
issue_number,
body: closeComment,
});
await github.rest.pulls.update({
owner,
repo,
pull_number: issue_number,
state: "closed",
});
core.info(`Closed contributor PR #${issue_number} from ${pr.user.login}`);
}

View File

@@ -18,10 +18,10 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Annotate locations with typos
uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1
- name: Codespell
uses: codespell-project/actions-codespell@8f01853be192eb0f849a5c7d721450e7a467c579 # v2.2
uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2
with:
ignore_words_file: .codespellignore

View File

@@ -1,139 +0,0 @@
name: Issue Deduplicator
on:
issues:
types:
- opened
- labeled
jobs:
gather-duplicates:
name: Identify potential duplicates
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate') }}
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
codex_output: ${{ steps.codex.outputs.final-message }}
steps:
- uses: actions/checkout@v6
- name: Prepare Codex inputs
env:
GH_TOKEN: ${{ github.token }}
run: |
set -eo pipefail
CURRENT_ISSUE_FILE=codex-current-issue.json
EXISTING_ISSUES_FILE=codex-existing-issues.json
gh issue list --repo "${{ github.repository }}" \
--json number,title,body,createdAt \
--limit 1000 \
--state all \
--search "sort:created-desc" \
| jq '.' \
> "$EXISTING_ISSUES_FILE"
gh issue view "${{ github.event.issue.number }}" \
--repo "${{ github.repository }}" \
--json number,title,body \
| jq '.' \
> "$CURRENT_ISSUE_FILE"
- id: codex
uses: openai/codex-action@main
with:
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
allow-users: "*"
prompt: |
You are an assistant that triages new GitHub issues by identifying potential duplicates.
You will receive the following JSON files located in the current working directory:
- `codex-current-issue.json`: JSON object describing the newly created issue (fields: number, title, body).
- `codex-existing-issues.json`: JSON array of recent issues (each element includes number, title, body, createdAt).
Instructions:
- Compare the current issue against the existing issues to find up to five that appear to describe the same underlying problem or request.
- Focus on the underlying intent and context of each issue—such as reported symptoms, feature requests, reproduction steps, or error messages—rather than relying solely on string similarity or synthetic metrics.
- After your analysis, validate your results in 1-2 lines explaining your decision to return the selected matches.
- When unsure, prefer returning fewer matches.
- Include at most five numbers.
output-schema: |
{
"type": "object",
"properties": {
"issues": {
"type": "array",
"items": {
"type": "string"
}
},
"reason": { "type": "string" }
},
"required": ["issues", "reason"],
"additionalProperties": false
}
comment-on-issue:
name: Comment with potential duplicates
needs: gather-duplicates
if: ${{ needs.gather-duplicates.result != 'skipped' }}
runs-on: ubuntu-latest
permissions:
contents: read
issues: write
steps:
- name: Comment on issue
uses: actions/github-script@v8
env:
CODEX_OUTPUT: ${{ needs.gather-duplicates.outputs.codex_output }}
with:
github-token: ${{ github.token }}
script: |
const raw = process.env.CODEX_OUTPUT ?? '';
let parsed;
try {
parsed = JSON.parse(raw);
} catch (error) {
core.info(`Codex output was not valid JSON. Raw output: ${raw}`);
core.info(`Parse error: ${error.message}`);
return;
}
const issues = Array.isArray(parsed?.issues) ? parsed.issues : [];
const currentIssueNumber = String(context.payload.issue.number);
console.log(`Current issue number: ${currentIssueNumber}`);
console.log(issues);
const filteredIssues = issues.filter((value) => String(value) !== currentIssueNumber);
if (filteredIssues.length === 0) {
core.info('Codex reported no potential duplicates.');
return;
}
const lines = [
'Potential duplicates detected. Please review them and close your issue if it is a duplicate.',
'',
...filteredIssues.map((value) => `- #${String(value)}`),
'',
'*Powered by [Codex Action](https://github.com/openai/codex-action)*'];
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.issue.number,
body: lines.join("\n"),
});
- name: Remove codex-deduplicate label
if: ${{ always() && github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate' }}
env:
GH_TOKEN: ${{ github.token }}
GH_REPO: ${{ github.repository }}
run: |
gh issue edit "${{ github.event.issue.number }}" --remove-label codex-deduplicate || true
echo "Attempted to remove label: codex-deduplicate"

View File

@@ -1,130 +0,0 @@
name: Issue Labeler
on:
issues:
types:
- opened
- labeled
jobs:
gather-labels:
name: Generate label suggestions
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label') }}
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
codex_output: ${{ steps.codex.outputs.final-message }}
steps:
- uses: actions/checkout@v6
- id: codex
uses: openai/codex-action@main
with:
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
allow-users: "*"
prompt: |
You are an assistant that reviews GitHub issues for the repository.
Your job is to choose the most appropriate labels for the issue described later in this prompt.
Follow these rules:
- Add one (and only one) of the following three labels to distinguish the type of issue. Default to "bug" if unsure.
1. bug — Reproducible defects in Codex products (CLI, VS Code extension, web, auth).
2. enhancement — Feature requests or usability improvements that ask for new capabilities, better ergonomics, or quality-of-life tweaks.
3. documentation — Updates or corrections needed in docs/README/config references (broken links, missing examples, outdated keys, clarification requests).
- If applicable, add one of the following labels to specify which sub-product or product surface the issue relates to.
1. CLI — the Codex command line interface.
2. extension — VS Code (or other IDE) extension-specific issues.
3. codex-web — Issues targeting the Codex web UI/Cloud experience.
4. github-action — Issues with the Codex GitHub action.
5. iOS — Issues with the Codex iOS app.
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
2. mcp — Topics involving Model Context Protocol servers/clients.
3. mcp-server — Problems related to the codex mcp-server command, where codex runs as an MCP server.
4. azure — Problems or requests tied to Azure OpenAI deployments.
5. model-behavior — Undesirable LLM behavior: forgetting goals, refusing work, hallucinating environment details, quota misreports, or other reasoning/performance anomalies.
6. code-review — Issues related to the code review feature or functionality.
7. auth - Problems related to authentication, login, or access tokens.
8. codex-exec - Problems related to the "codex exec" command or functionality.
9. context-management - Problems related to compaction, context windows, or available context reporting.
10. custom-model - Problems that involve using custom model providers, local models, or OSS models.
11. rate-limits - Problems related to token limits, rate limits, or token usage reporting.
12. sandbox - Issues related to local sandbox environments or tool call approvals to override sandbox restrictions.
13. tool-calls - Problems related to specific tool call invocations including unexpected errors, failures, or hangs.
14. TUI - Problems with the terminal user interface (TUI) including keyboard shortcuts, copy & pasting, menus, or screen update issues.
Issue number: ${{ github.event.issue.number }}
Issue title:
${{ github.event.issue.title }}
Issue body:
${{ github.event.issue.body }}
Repository full name:
${{ github.repository }}
output-schema: |
{
"type": "object",
"properties": {
"labels": {
"type": "array",
"items": {
"type": "string"
}
}
},
"required": ["labels"],
"additionalProperties": false
}
apply-labels:
name: Apply labels from Codex output
needs: gather-labels
if: ${{ needs.gather-labels.result != 'skipped' }}
runs-on: ubuntu-latest
permissions:
contents: read
issues: write
env:
GH_TOKEN: ${{ github.token }}
GH_REPO: ${{ github.repository }}
ISSUE_NUMBER: ${{ github.event.issue.number }}
CODEX_OUTPUT: ${{ needs.gather-labels.outputs.codex_output }}
steps:
- name: Apply labels
run: |
json=${CODEX_OUTPUT//$'\r'/}
if [ -z "$json" ]; then
echo "Codex produced no output. Skipping label application."
exit 0
fi
if ! printf '%s' "$json" | jq -e 'type == "object" and (.labels | type == "array")' >/dev/null 2>&1; then
echo "Codex output did not include a labels array. Raw output: $json"
exit 0
fi
labels=$(printf '%s' "$json" | jq -r '.labels[] | tostring')
if [ -z "$labels" ]; then
echo "Codex returned an empty array. Nothing to do."
exit 0
fi
cmd=(gh issue edit "$ISSUE_NUMBER")
while IFS= read -r label; do
cmd+=(--add-label "$label")
done <<< "$labels"
"${cmd[@]}" || true
- name: Remove codex-label trigger
if: ${{ always() && github.event.action == 'labeled' && github.event.label.name == 'codex-label' }}
run: |
gh issue edit "$ISSUE_NUMBER" --remove-label codex-label || true
echo "Attempted to remove label: codex-label"

View File

@@ -9,7 +9,7 @@ on:
# CI builds in debug (dev) for faster signal.
jobs:
# --- Detect what changed to detect which tests to run (always runs) -------------------------------------
# --- Detect what changed (always runs) -------------------------------------
changed:
name: Detect changed areas
runs-on: ubuntu-24.04
@@ -17,7 +17,7 @@ jobs:
codex: ${{ steps.detect.outputs.codex }}
workflows: ${{ steps.detect.outputs.workflows }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Detect changed paths (no external action)
@@ -56,14 +56,12 @@ jobs:
run:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.90
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
with:
components: rustfmt
- name: cargo fmt
run: cargo fmt -- --config imports_granularity=Item --check
- name: Verify codegen for mcp-types
run: ./mcp-types/check_lib_rs.py
cargo_shear:
name: cargo shear
@@ -74,9 +72,9 @@ jobs:
run:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.90
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: cargo-shear
version: 1.5.1
@@ -84,8 +82,8 @@ jobs:
run: cargo shear
# --- CI to validate on different os/targets --------------------------------
lint_build:
name: Lint/Build — ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
lint_build_test:
name: ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
needs: changed
@@ -94,11 +92,6 @@ jobs:
defaults:
run:
working-directory: codex-rs
env:
# Speed up repeated builds across CI runs by caching compiled objects (non-Windows).
USE_SCCACHE: ${{ startsWith(matrix.runner, 'windows') && 'false' || 'true' }}
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
strategy:
fail-fast: false
@@ -147,116 +140,26 @@ jobs:
profile: release
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.90
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
with:
targets: ${{ matrix.target }}
components: clippy
- name: Compute lockfile hash
id: lockhash
working-directory: codex-rs
shell: bash
run: |
set -euo pipefail
echo "hash=$(sha256sum Cargo.lock | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
echo "toolchain_hash=$(sha256sum rust-toolchain.toml | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
# Explicit cache restore: split cargo home vs target, so we can
# avoid caching the large target dir on the gnu-dev job.
- name: Restore cargo home cache
id: cache_cargo_home_restore
uses: actions/cache/restore@v4
- uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
# Install and restore sccache cache
- name: Install sccache
if: ${{ env.USE_SCCACHE == 'true' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: sccache
version: 0.7.5
- name: Configure sccache backend
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: |
set -euo pipefail
if [[ -n "${ACTIONS_CACHE_URL:-}" && -n "${ACTIONS_RUNTIME_TOKEN:-}" ]]; then
echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV"
echo "Using sccache GitHub backend"
else
echo "SCCACHE_GHA_ENABLED=false" >> "$GITHUB_ENV"
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> "$GITHUB_ENV"
echo "Using sccache local disk + actions/cache fallback"
fi
- name: Enable sccache wrapper
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV"
- name: Restore sccache cache (fallback)
if: ${{ env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true' }}
id: cache_sccache_restore
uses: actions/cache/restore@v4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
restore-keys: |
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Prepare APT cache directories (musl)
shell: bash
run: |
set -euo pipefail
sudo mkdir -p /var/cache/apt/archives /var/lib/apt/lists
sudo chown -R "$USER:$USER" /var/cache/apt /var/lib/apt/lists
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Restore APT cache (musl)
id: cache_apt_restore
uses: actions/cache/restore@v4
with:
path: |
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
${{ github.workspace }}/codex-rs/target/
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
DEBIAN_FRONTEND: noninteractive
shell: bash
run: |
set -euo pipefail
sudo apt-get -y update -o Acquire::Retries=3
sudo apt-get -y install --no-install-recommends musl-tools pkg-config
- name: Install cargo-chef
if: ${{ matrix.profile == 'release' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: cargo-chef
version: 0.1.71
- name: Pre-warm dependency cache (cargo-chef)
if: ${{ matrix.profile == 'release' }}
shell: bash
run: |
set -euo pipefail
RECIPE="${RUNNER_TEMP}/chef-recipe.json"
cargo chef prepare --recipe-path "$RECIPE"
cargo chef cook --recipe-path "$RECIPE" --target ${{ matrix.target }} --release --all-features
sudo apt install -y musl-tools pkg-config && sudo rm -rf /var/lib/apt/lists/*
- name: cargo clippy
id: clippy
@@ -275,222 +178,34 @@ jobs:
find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 \
| xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo check --profile ${{ matrix.profile }}'
# Save caches explicitly; make non-fatal so cache packaging
# never fails the overall job. Only save when key wasn't hit.
- name: Save cargo home cache
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
- name: Save sccache cache (fallback)
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
continue-on-error: true
uses: actions/cache/save@v4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
- name: sccache stats
if: always() && env.USE_SCCACHE == 'true'
continue-on-error: true
run: sccache --show-stats || true
- name: sccache summary
if: always() && env.USE_SCCACHE == 'true'
shell: bash
run: |
{
echo "### sccache stats — ${{ matrix.target }} (${{ matrix.profile }})";
echo;
echo '```';
sccache --show-stats || true;
echo '```';
} >> "$GITHUB_STEP_SUMMARY"
- name: Save APT cache (musl)
if: always() && !cancelled() && (matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl') && steps.cache_apt_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v4
with:
path: |
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
# Fail the job if any of the previous steps failed.
- name: verify all steps passed
if: |
steps.clippy.outcome == 'failure' ||
steps.cargo_check_all_crates.outcome == 'failure'
run: |
echo "One or more checks failed (clippy or cargo_check_all_crates). See logs for details."
exit 1
tests:
name: Tests — ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
needs: changed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
defaults:
run:
working-directory: codex-rs
env:
# Speed up repeated builds across CI runs by caching compiled objects (non-Windows).
USE_SCCACHE: ${{ startsWith(matrix.runner, 'windows') && 'false' || 'true' }}
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
strategy:
fail-fast: false
matrix:
include:
- runner: macos-14
target: aarch64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
- runner: windows-latest
target: x86_64-pc-windows-msvc
profile: dev
- runner: windows-11-arm
target: aarch64-pc-windows-msvc
profile: dev
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
- name: Compute lockfile hash
id: lockhash
working-directory: codex-rs
shell: bash
run: |
set -euo pipefail
echo "hash=$(sha256sum Cargo.lock | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
echo "toolchain_hash=$(sha256sum rust-toolchain.toml | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
- name: Restore cargo home cache
id: cache_cargo_home_restore
uses: actions/cache/restore@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- name: Install sccache
if: ${{ env.USE_SCCACHE == 'true' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: sccache
version: 0.7.5
- name: Configure sccache backend
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: |
set -euo pipefail
if [[ -n "${ACTIONS_CACHE_URL:-}" && -n "${ACTIONS_RUNTIME_TOKEN:-}" ]]; then
echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV"
echo "Using sccache GitHub backend"
else
echo "SCCACHE_GHA_ENABLED=false" >> "$GITHUB_ENV"
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> "$GITHUB_ENV"
echo "Using sccache local disk + actions/cache fallback"
fi
- name: Enable sccache wrapper
if: ${{ env.USE_SCCACHE == 'true' }}
shell: bash
run: echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV"
- name: Restore sccache cache (fallback)
if: ${{ env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true' }}
id: cache_sccache_restore
uses: actions/cache/restore@v4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
restore-keys: |
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
- uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: nextest
version: 0.9.103
- name: tests
id: test
run: cargo nextest run --all-features --no-fail-fast --target ${{ matrix.target }} --cargo-profile ci-test
# Tests take too long for release builds to run them on every PR.
if: ${{ matrix.profile != 'release' }}
continue-on-error: true
run: cargo nextest run --all-features --no-fail-fast --target ${{ matrix.target }}
env:
RUST_BACKTRACE: 1
NEXTEST_STATUS_LEVEL: leak
- name: Save cargo home cache
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
- name: Save sccache cache (fallback)
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
continue-on-error: true
uses: actions/cache/save@v4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
- name: sccache stats
if: always() && env.USE_SCCACHE == 'true'
continue-on-error: true
run: sccache --show-stats || true
- name: sccache summary
if: always() && env.USE_SCCACHE == 'true'
shell: bash
# Fail the job if any of the previous steps failed.
- name: verify all steps passed
if: |
steps.clippy.outcome == 'failure' ||
steps.cargo_check_all_crates.outcome == 'failure' ||
steps.test.outcome == 'failure'
run: |
{
echo "### sccache stats — ${{ matrix.target }} (tests)";
echo;
echo '```';
sccache --show-stats || true;
echo '```';
} >> "$GITHUB_STEP_SUMMARY"
- name: verify tests passed
if: steps.test.outcome == 'failure'
run: |
echo "Tests failed. See logs for details."
echo "One or more checks failed (clippy, cargo_check_all_crates, or test). See logs for details."
exit 1
# --- Gatherer job that you mark as the ONLY required status -----------------
results:
name: CI results (required)
needs: [changed, general, cargo_shear, lint_build, tests]
needs: [changed, general, cargo_shear, lint_build_test]
if: always()
runs-on: ubuntu-24.04
steps:
@@ -499,8 +214,7 @@ jobs:
run: |
echo "general: ${{ needs.general.result }}"
echo "shear : ${{ needs.cargo_shear.result }}"
echo "lint : ${{ needs.lint_build.result }}"
echo "tests : ${{ needs.tests.result }}"
echo "matrix : ${{ needs.lint_build_test.result }}"
# If nothing relevant changed (PR touching only root README, etc.),
# declare success regardless of other jobs.
@@ -512,10 +226,4 @@ jobs:
# Otherwise require the jobs to have succeeded
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
[[ '${{ needs.cargo_shear.result }}' == 'success' ]] || { echo 'cargo_shear failed'; exit 1; }
[[ '${{ needs.lint_build.result }}' == 'success' ]] || { echo 'lint_build failed'; exit 1; }
[[ '${{ needs.tests.result }}' == 'success' ]] || { echo 'tests failed'; exit 1; }
- name: sccache summary note
if: always()
run: |
echo "Per-job sccache stats are attached to each matrix job's Step Summary."
[[ '${{ needs.lint_build_test.result }}' == 'success' ]] || { echo 'matrix failed'; exit 1; }

View File

@@ -19,7 +19,7 @@ jobs:
tag-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Validate tag matches Cargo.toml version
shell: bash
@@ -47,7 +47,7 @@ jobs:
build:
needs: tag-check
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
name: ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
defaults:
@@ -58,9 +58,9 @@ jobs:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
- runner: macos-14
target: aarch64-apple-darwin
- runner: macos-15-xlarge
- runner: macos-14
target: x86_64-apple-darwin
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
@@ -76,8 +76,8 @@ jobs:
target: aarch64-pc-windows-msvc
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.90
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
with:
targets: ${{ matrix.target }}
@@ -94,180 +94,10 @@ jobs:
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
run: |
sudo apt-get update
sudo apt-get install -y musl-tools pkg-config
sudo apt install -y musl-tools pkg-config
- name: Cargo build
run: cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
name: Configure Apple code signing
shell: bash
env:
KEYCHAIN_PASSWORD: actions
APPLE_CERTIFICATE: ${{ secrets.APPLE_CERTIFICATE_P12 }}
APPLE_CERTIFICATE_PASSWORD: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
run: |
set -euo pipefail
if [[ -z "${APPLE_CERTIFICATE:-}" ]]; then
echo "APPLE_CERTIFICATE is required for macOS signing"
exit 1
fi
if [[ -z "${APPLE_CERTIFICATE_PASSWORD:-}" ]]; then
echo "APPLE_CERTIFICATE_PASSWORD is required for macOS signing"
exit 1
fi
cert_path="${RUNNER_TEMP}/apple_signing_certificate.p12"
echo "$APPLE_CERTIFICATE" | base64 -d > "$cert_path"
keychain_path="${RUNNER_TEMP}/codex-signing.keychain-db"
security create-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
security set-keychain-settings -lut 21600 "$keychain_path"
security unlock-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
keychain_args=()
cleanup_keychain() {
if ((${#keychain_args[@]} > 0)); then
security list-keychains -s "${keychain_args[@]}" || true
security default-keychain -s "${keychain_args[0]}" || true
else
security list-keychains -s || true
fi
if [[ -f "$keychain_path" ]]; then
security delete-keychain "$keychain_path" || true
fi
}
while IFS= read -r keychain; do
[[ -n "$keychain" ]] && keychain_args+=("$keychain")
done < <(security list-keychains | sed 's/^[[:space:]]*//;s/[[:space:]]*$//;s/"//g')
if ((${#keychain_args[@]} > 0)); then
security list-keychains -s "$keychain_path" "${keychain_args[@]}"
else
security list-keychains -s "$keychain_path"
fi
security default-keychain -s "$keychain_path"
security import "$cert_path" -k "$keychain_path" -P "$APPLE_CERTIFICATE_PASSWORD" -T /usr/bin/codesign -T /usr/bin/security
security set-key-partition-list -S apple-tool:,apple: -s -k "$KEYCHAIN_PASSWORD" "$keychain_path" > /dev/null
codesign_hashes=()
while IFS= read -r hash; do
[[ -n "$hash" ]] && codesign_hashes+=("$hash")
done < <(security find-identity -v -p codesigning "$keychain_path" \
| sed -n 's/.*\([0-9A-F]\{40\}\).*/\1/p' \
| sort -u)
if ((${#codesign_hashes[@]} == 0)); then
echo "No signing identities found in $keychain_path"
cleanup_keychain
rm -f "$cert_path"
exit 1
fi
if ((${#codesign_hashes[@]} > 1)); then
echo "Multiple signing identities found in $keychain_path:"
printf ' %s\n' "${codesign_hashes[@]}"
cleanup_keychain
rm -f "$cert_path"
exit 1
fi
APPLE_CODESIGN_IDENTITY="${codesign_hashes[0]}"
rm -f "$cert_path"
echo "APPLE_CODESIGN_IDENTITY=$APPLE_CODESIGN_IDENTITY" >> "$GITHUB_ENV"
echo "APPLE_CODESIGN_KEYCHAIN=$keychain_path" >> "$GITHUB_ENV"
echo "::add-mask::$APPLE_CODESIGN_IDENTITY"
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
name: Sign macOS binaries
shell: bash
run: |
set -euo pipefail
if [[ -z "${APPLE_CODESIGN_IDENTITY:-}" ]]; then
echo "APPLE_CODESIGN_IDENTITY is required for macOS signing"
exit 1
fi
keychain_args=()
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" && -f "${APPLE_CODESIGN_KEYCHAIN}" ]]; then
keychain_args+=(--keychain "${APPLE_CODESIGN_KEYCHAIN}")
fi
for binary in codex codex-responses-api-proxy; do
path="target/${{ matrix.target }}/release/${binary}"
codesign --force --options runtime --timestamp --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$path"
done
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
name: Notarize macOS binaries
shell: bash
env:
APPLE_NOTARIZATION_KEY_P8: ${{ secrets.APPLE_NOTARIZATION_KEY_P8 }}
APPLE_NOTARIZATION_KEY_ID: ${{ secrets.APPLE_NOTARIZATION_KEY_ID }}
APPLE_NOTARIZATION_ISSUER_ID: ${{ secrets.APPLE_NOTARIZATION_ISSUER_ID }}
run: |
set -euo pipefail
for var in APPLE_NOTARIZATION_KEY_P8 APPLE_NOTARIZATION_KEY_ID APPLE_NOTARIZATION_ISSUER_ID; do
if [[ -z "${!var:-}" ]]; then
echo "$var is required for notarization"
exit 1
fi
done
notary_key_path="${RUNNER_TEMP}/notarytool.key.p8"
echo "$APPLE_NOTARIZATION_KEY_P8" | base64 -d > "$notary_key_path"
cleanup_notary() {
rm -f "$notary_key_path"
}
trap cleanup_notary EXIT
notarize_binary() {
local binary="$1"
local source_path="target/${{ matrix.target }}/release/${binary}"
local archive_path="${RUNNER_TEMP}/${binary}.zip"
if [[ ! -f "$source_path" ]]; then
echo "Binary $source_path not found"
exit 1
fi
rm -f "$archive_path"
ditto -c -k --keepParent "$source_path" "$archive_path"
submission_json=$(xcrun notarytool submit "$archive_path" \
--key "$notary_key_path" \
--key-id "$APPLE_NOTARIZATION_KEY_ID" \
--issuer "$APPLE_NOTARIZATION_ISSUER_ID" \
--output-format json \
--wait)
status=$(printf '%s\n' "$submission_json" | jq -r '.status // "Unknown"')
submission_id=$(printf '%s\n' "$submission_json" | jq -r '.id // ""')
if [[ -z "$submission_id" ]]; then
echo "Failed to retrieve submission ID for $binary"
exit 1
fi
echo "::notice title=Notarization::$binary submission ${submission_id} completed with status ${status}"
if [[ "$status" != "Accepted" ]]; then
echo "Notarization failed for ${binary} (submission ${submission_id}, status ${status})"
exit 1
fi
}
notarize_binary "codex"
notarize_binary "codex-responses-api-proxy"
run: cargo build --target ${{ matrix.target }} --release --bin codex
- name: Stage artifacts
shell: bash
@@ -277,10 +107,8 @@ jobs:
if [[ "${{ matrix.runner }}" == windows* ]]; then
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$dest/codex-responses-api-proxy-${{ matrix.target }}.exe"
else
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy "$dest/codex-responses-api-proxy-${{ matrix.target }}"
fi
- if: ${{ matrix.runner == 'windows-11-arm' }}
@@ -295,15 +123,6 @@ jobs:
# ${{ matrix.target }}
dest="dist/${{ matrix.target }}"
# We want to ship the raw Windows executables in the GitHub Release
# in addition to the compressed archives. Keep the originals for
# Windows targets; remove them elsewhere to limit the number of
# artifacts that end up in the GitHub Release.
keep_originals=false
if [[ "${{ matrix.runner }}" == windows* ]]; then
keep_originals=true
fi
# For compatibility with environments that lack the `zstd` tool we
# additionally create a `.tar.gz` for all platforms and `.zip` for
# Windows alongside every single binary that we publish. The end result is:
@@ -333,37 +152,10 @@ jobs:
# Also create .zst (existing behaviour) *and* remove the original
# uncompressed binary to keep the directory small.
zstd_args=(-T0 -19)
if [[ "${keep_originals}" == false ]]; then
zstd_args+=(--rm)
fi
zstd "${zstd_args[@]}" "$dest/$base"
zstd -T0 -19 --rm "$dest/$base"
done
- name: Remove signing keychain
if: ${{ always() && matrix.runner == 'macos-15-xlarge' }}
shell: bash
env:
APPLE_CODESIGN_KEYCHAIN: ${{ env.APPLE_CODESIGN_KEYCHAIN }}
run: |
set -euo pipefail
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" ]]; then
keychain_args=()
while IFS= read -r keychain; do
[[ "$keychain" == "$APPLE_CODESIGN_KEYCHAIN" ]] && continue
[[ -n "$keychain" ]] && keychain_args+=("$keychain")
done < <(security list-keychains | sed 's/^[[:space:]]*//;s/[[:space:]]*$//;s/"//g')
if ((${#keychain_args[@]} > 0)); then
security list-keychains -s "${keychain_args[@]}"
security default-keychain -s "${keychain_args[0]}"
fi
if [[ -f "$APPLE_CODESIGN_KEYCHAIN" ]]; then
security delete-keychain "$APPLE_CODESIGN_KEYCHAIN"
fi
fi
- uses: actions/upload-artifact@v5
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.target }}
# Upload the per-binary .zst files as well as the new .tar.gz
@@ -371,33 +163,14 @@ jobs:
path: |
codex-rs/dist/${{ matrix.target }}/*
shell-tool-mcp:
name: shell-tool-mcp
needs: tag-check
uses: ./.github/workflows/shell-tool-mcp.yml
with:
release-tag: ${{ github.ref_name }}
publish: true
secrets: inherit
release:
needs:
- build
- shell-tool-mcp
needs: build
name: release
runs-on: ubuntu-latest
permissions:
contents: write
actions: read
outputs:
version: ${{ steps.release_name.outputs.name }}
tag: ${{ github.ref_name }}
should_publish_npm: ${{ steps.npm_publish_settings.outputs.should_publish }}
npm_tag: ${{ steps.npm_publish_settings.outputs.npm_tag }}
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v5
- uses: actions/download-artifact@v4
with:
@@ -406,14 +179,6 @@ jobs:
- name: List
run: ls -R dist/
# This is a temporary fix: we should modify shell-tool-mcp.yml so these
# files do not end up in dist/ in the first place.
- name: Delete entries from dist/ that should not go in the release
run: |
rm -rf dist/shell-tool-mcp*
ls -R dist/
- name: Define release name
id: release_name
run: |
@@ -422,49 +187,21 @@ jobs:
version="${GITHUB_REF_NAME#rust-v}"
echo "name=${version}" >> $GITHUB_OUTPUT
- name: Determine npm publish settings
id: npm_publish_settings
env:
VERSION: ${{ steps.release_name.outputs.name }}
run: |
set -euo pipefail
version="${VERSION}"
if [[ "${version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "should_publish=true" >> "$GITHUB_OUTPUT"
echo "npm_tag=" >> "$GITHUB_OUTPUT"
elif [[ "${version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+-alpha\.[0-9]+$ ]]; then
echo "should_publish=true" >> "$GITHUB_OUTPUT"
echo "npm_tag=alpha" >> "$GITHUB_OUTPUT"
else
echo "should_publish=false" >> "$GITHUB_OUTPUT"
echo "npm_tag=" >> "$GITHUB_OUTPUT"
fi
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Setup Node.js for npm packaging
uses: actions/setup-node@v5
with:
node-version: 22
- name: Install dependencies
run: pnpm install --frozen-lockfile
# stage_npm_packages.py requires DotSlash when staging releases.
- uses: facebook/install-dotslash@v2
- name: Stage npm packages
- name: Stage npm package
env:
GH_TOKEN: ${{ github.token }}
run: |
./scripts/stage_npm_packages.py \
set -euo pipefail
TMP_DIR="${RUNNER_TEMP}/npm-stage"
python3 codex-cli/scripts/stage_rust_release.py \
--release-version "${{ steps.release_name.outputs.name }}" \
--package codex \
--package codex-responses-api-proxy \
--package codex-sdk
--tmp "${TMP_DIR}"
mkdir -p dist/npm
# Produce an npm-ready tarball using `npm pack` and store it in dist/npm.
# We then rename it to a stable name used by our publishing script.
(cd "$TMP_DIR" && npm pack --pack-destination "${GITHUB_WORKSPACE}/dist/npm")
mv "${GITHUB_WORKSPACE}"/dist/npm/*.tgz \
"${GITHUB_WORKSPACE}/dist/npm/codex-npm-${{ steps.release_name.outputs.name }}.tgz"
- name: Create GitHub Release
uses: softprops/action-gh-release@v2
@@ -482,90 +219,3 @@ jobs:
with:
tag: ${{ github.ref_name }}
config: .github/dotslash-config.json
# Publish to npm using OIDC authentication.
# July 31, 2025: https://github.blog/changelog/2025-07-31-npm-trusted-publishing-with-oidc-is-generally-available/
# npm docs: https://docs.npmjs.com/trusted-publishers
publish-npm:
# Publish to npm for stable releases and alpha pre-releases with numeric suffixes.
if: ${{ needs.release.outputs.should_publish_npm == 'true' }}
name: publish-npm
needs: release
runs-on: ubuntu-latest
permissions:
id-token: write # Required for OIDC
contents: read
steps:
- name: Setup Node.js
uses: actions/setup-node@v5
with:
node-version: 22
registry-url: "https://registry.npmjs.org"
scope: "@openai"
# Trusted publishing requires npm CLI version 11.5.1 or later.
- name: Update npm
run: npm install -g npm@latest
- name: Download npm tarballs from release
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -euo pipefail
version="${{ needs.release.outputs.version }}"
tag="${{ needs.release.outputs.tag }}"
mkdir -p dist/npm
gh release download "$tag" \
--repo "${GITHUB_REPOSITORY}" \
--pattern "codex-npm-${version}.tgz" \
--dir dist/npm
gh release download "$tag" \
--repo "${GITHUB_REPOSITORY}" \
--pattern "codex-responses-api-proxy-npm-${version}.tgz" \
--dir dist/npm
gh release download "$tag" \
--repo "${GITHUB_REPOSITORY}" \
--pattern "codex-sdk-npm-${version}.tgz" \
--dir dist/npm
# No NODE_AUTH_TOKEN needed because we use OIDC.
- name: Publish to npm
env:
VERSION: ${{ needs.release.outputs.version }}
NPM_TAG: ${{ needs.release.outputs.npm_tag }}
run: |
set -euo pipefail
tag_args=()
if [[ -n "${NPM_TAG}" ]]; then
tag_args+=(--tag "${NPM_TAG}")
fi
tarballs=(
"codex-npm-${VERSION}.tgz"
"codex-responses-api-proxy-npm-${VERSION}.tgz"
"codex-sdk-npm-${VERSION}.tgz"
)
for tarball in "${tarballs[@]}"; do
npm publish "${GITHUB_WORKSPACE}/dist/npm/${tarball}" "${tag_args[@]}"
done
update-branch:
name: Update latest-alpha-cli branch
permissions:
contents: write
needs: release
runs-on: ubuntu-latest
steps:
- name: Update latest-alpha-cli branch
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -euo pipefail
gh api \
repos/${GITHUB_REPOSITORY}/git/refs/heads/latest-alpha-cli \
-X PATCH \
-f sha="${GITHUB_SHA}" \
-F force=true

View File

@@ -1,43 +0,0 @@
name: sdk
on:
push:
branches: [main]
pull_request: {}
jobs:
sdks:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v6
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v5
with:
node-version: 22
cache: pnpm
- uses: dtolnay/rust-toolchain@1.90
- name: build codex
run: cargo build --bin codex
working-directory: codex-rs
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Build SDK packages
run: pnpm -r --filter ./sdk/typescript run build
- name: Lint SDK packages
run: pnpm -r --filter ./sdk/typescript run lint
- name: Test SDK packages
run: pnpm -r --filter ./sdk/typescript run test

View File

@@ -1,48 +0,0 @@
name: shell-tool-mcp CI
on:
push:
paths:
- "shell-tool-mcp/**"
- ".github/workflows/shell-tool-mcp-ci.yml"
- "pnpm-lock.yaml"
- "pnpm-workspace.yaml"
pull_request:
paths:
- "shell-tool-mcp/**"
- ".github/workflows/shell-tool-mcp-ci.yml"
- "pnpm-lock.yaml"
- "pnpm-workspace.yaml"
env:
NODE_VERSION: 22
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v6
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v5
with:
node-version: ${{ env.NODE_VERSION }}
cache: "pnpm"
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Format check
run: pnpm --filter @openai/codex-shell-tool-mcp run format
- name: Run tests
run: pnpm --filter @openai/codex-shell-tool-mcp test
- name: Build
run: pnpm --filter @openai/codex-shell-tool-mcp run build

View File

@@ -1,405 +0,0 @@
name: shell-tool-mcp
on:
workflow_call:
inputs:
release-version:
description: Version to publish (x.y.z or x.y.z-alpha.N). Defaults to GITHUB_REF_NAME when it starts with rust-v.
required: false
type: string
release-tag:
description: Tag name to use when downloading release artifacts (defaults to rust-v<version>).
required: false
type: string
publish:
description: Whether to publish to npm when the version is releasable.
required: false
default: true
type: boolean
env:
NODE_VERSION: 22
jobs:
metadata:
runs-on: ubuntu-latest
outputs:
version: ${{ steps.compute.outputs.version }}
release_tag: ${{ steps.compute.outputs.release_tag }}
should_publish: ${{ steps.compute.outputs.should_publish }}
npm_tag: ${{ steps.compute.outputs.npm_tag }}
steps:
- name: Compute version and tags
id: compute
run: |
set -euo pipefail
version="${{ inputs.release-version }}"
release_tag="${{ inputs.release-tag }}"
if [[ -z "$version" ]]; then
if [[ -n "$release_tag" && "$release_tag" =~ ^rust-v.+ ]]; then
version="${release_tag#rust-v}"
elif [[ "${GITHUB_REF_NAME:-}" =~ ^rust-v.+ ]]; then
version="${GITHUB_REF_NAME#rust-v}"
release_tag="${GITHUB_REF_NAME}"
else
echo "release-version is required when GITHUB_REF_NAME is not a rust-v tag."
exit 1
fi
fi
if [[ -z "$release_tag" ]]; then
release_tag="rust-v${version}"
fi
npm_tag=""
should_publish="false"
if [[ "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
should_publish="true"
elif [[ "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+-alpha\.[0-9]+$ ]]; then
should_publish="true"
npm_tag="alpha"
fi
echo "version=${version}" >> "$GITHUB_OUTPUT"
echo "release_tag=${release_tag}" >> "$GITHUB_OUTPUT"
echo "npm_tag=${npm_tag}" >> "$GITHUB_OUTPUT"
echo "should_publish=${should_publish}" >> "$GITHUB_OUTPUT"
rust-binaries:
name: Build Rust - ${{ matrix.target }}
needs: metadata
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
defaults:
run:
working-directory: codex-rs
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
- runner: macos-15-xlarge
target: x86_64-apple-darwin
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
install_musl: true
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
install_musl: true
steps:
- name: Checkout repository
uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
- if: ${{ matrix.install_musl }}
name: Install musl build dependencies
run: |
sudo apt-get update
sudo apt-get install -y musl-tools pkg-config
- name: Build exec server binaries
run: cargo build --release --target ${{ matrix.target }} --bin codex-exec-mcp-server --bin codex-execve-wrapper
- name: Stage exec server binaries
run: |
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}"
mkdir -p "$dest"
cp "target/${{ matrix.target }}/release/codex-exec-mcp-server" "$dest/"
cp "target/${{ matrix.target }}/release/codex-execve-wrapper" "$dest/"
- uses: actions/upload-artifact@v5
with:
name: shell-tool-mcp-rust-${{ matrix.target }}
path: artifacts/**
if-no-files-found: error
bash-linux:
name: Build Bash (Linux) - ${{ matrix.variant }} - ${{ matrix.target }}
needs: metadata
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
container:
image: ${{ matrix.image }}
strategy:
fail-fast: false
matrix:
include:
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: ubuntu-24.04
image: ubuntu:24.04
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: ubuntu-22.04
image: ubuntu:22.04
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: debian-12
image: debian:12
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: debian-11
image: debian:11
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
variant: centos-9
image: quay.io/centos/centos:stream9
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: ubuntu-24.04
image: arm64v8/ubuntu:24.04
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: ubuntu-22.04
image: arm64v8/ubuntu:22.04
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: ubuntu-20.04
image: arm64v8/ubuntu:20.04
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: debian-12
image: arm64v8/debian:12
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: debian-11
image: arm64v8/debian:11
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
variant: centos-9
image: quay.io/centos/centos:stream9
steps:
- name: Install build prerequisites
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get install -y git build-essential bison autoconf gettext
elif command -v dnf >/dev/null 2>&1; then
dnf install -y git gcc gcc-c++ make bison autoconf gettext
elif command -v yum >/dev/null 2>&1; then
yum install -y git gcc gcc-c++ make bison autoconf gettext
else
echo "Unsupported package manager in container"
exit 1
fi
- name: Checkout repository
uses: actions/checkout@v6
- name: Build patched Bash
shell: bash
run: |
set -euo pipefail
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
cd /tmp/bash
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/bash-exec-wrapper.patch"
./configure --without-bash-malloc
cores="$(command -v nproc >/dev/null 2>&1 && nproc || getconf _NPROCESSORS_ONLN)"
make -j"${cores}"
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/bash/${{ matrix.variant }}"
mkdir -p "$dest"
cp bash "$dest/bash"
- uses: actions/upload-artifact@v5
with:
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
path: artifacts/**
if-no-files-found: error
bash-darwin:
name: Build Bash (macOS) - ${{ matrix.variant }} - ${{ matrix.target }}
needs: metadata
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
target: aarch64-apple-darwin
variant: macos-15
- runner: macos-14
target: aarch64-apple-darwin
variant: macos-14
steps:
- name: Checkout repository
uses: actions/checkout@v6
- name: Build patched Bash
shell: bash
run: |
set -euo pipefail
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
cd /tmp/bash
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/bash-exec-wrapper.patch"
./configure --without-bash-malloc
cores="$(getconf _NPROCESSORS_ONLN)"
make -j"${cores}"
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/bash/${{ matrix.variant }}"
mkdir -p "$dest"
cp bash "$dest/bash"
- uses: actions/upload-artifact@v5
with:
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
path: artifacts/**
if-no-files-found: error
package:
name: Package npm module
needs:
- metadata
- rust-binaries
- bash-linux
- bash-darwin
runs-on: ubuntu-latest
env:
PACKAGE_VERSION: ${{ needs.metadata.outputs.version }}
steps:
- name: Checkout repository
uses: actions/checkout@v6
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.8.1
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v5
with:
node-version: ${{ env.NODE_VERSION }}
- name: Install JavaScript dependencies
run: pnpm install --frozen-lockfile
- name: Build (shell-tool-mcp)
run: pnpm --filter @openai/codex-shell-tool-mcp run build
- name: Download build artifacts
uses: actions/download-artifact@v4
with:
path: artifacts
- name: Assemble staging directory
id: staging
shell: bash
run: |
set -euo pipefail
staging="${STAGING_DIR}"
mkdir -p "$staging" "$staging/vendor"
cp shell-tool-mcp/README.md "$staging/"
cp shell-tool-mcp/package.json "$staging/"
cp -R shell-tool-mcp/bin "$staging/"
found_vendor="false"
shopt -s nullglob
for vendor_dir in artifacts/*/vendor; do
rsync -av "$vendor_dir/" "$staging/vendor/"
found_vendor="true"
done
if [[ "$found_vendor" == "false" ]]; then
echo "No vendor payloads were downloaded."
exit 1
fi
node - <<'NODE'
import fs from "node:fs";
import path from "node:path";
const stagingDir = process.env.STAGING_DIR;
const version = process.env.PACKAGE_VERSION;
const pkgPath = path.join(stagingDir, "package.json");
const pkg = JSON.parse(fs.readFileSync(pkgPath, "utf8"));
pkg.version = version;
fs.writeFileSync(pkgPath, JSON.stringify(pkg, null, 2) + "\n");
NODE
echo "dir=$staging" >> "$GITHUB_OUTPUT"
env:
STAGING_DIR: ${{ runner.temp }}/shell-tool-mcp
- name: Ensure binaries are executable
run: |
set -euo pipefail
staging="${{ steps.staging.outputs.dir }}"
chmod +x \
"$staging"/vendor/*/codex-exec-mcp-server \
"$staging"/vendor/*/codex-execve-wrapper \
"$staging"/vendor/*/bash/*/bash
- name: Create npm tarball
shell: bash
run: |
set -euo pipefail
mkdir -p dist/npm
staging="${{ steps.staging.outputs.dir }}"
pack_info=$(cd "$staging" && npm pack --ignore-scripts --json --pack-destination "${GITHUB_WORKSPACE}/dist/npm")
filename=$(PACK_INFO="$pack_info" node -e 'const data = JSON.parse(process.env.PACK_INFO); console.log(data[0].filename);')
mv "dist/npm/${filename}" "dist/npm/codex-shell-tool-mcp-npm-${PACKAGE_VERSION}.tgz"
- uses: actions/upload-artifact@v5
with:
name: codex-shell-tool-mcp-npm
path: dist/npm/codex-shell-tool-mcp-npm-${{ env.PACKAGE_VERSION }}.tgz
if-no-files-found: error
publish:
name: Publish npm package
needs:
- metadata
- package
if: ${{ inputs.publish && needs.metadata.outputs.should_publish == 'true' }}
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
steps:
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.8.1
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v5
with:
node-version: ${{ env.NODE_VERSION }}
registry-url: https://registry.npmjs.org
scope: "@openai"
- name: Update npm
run: npm install -g npm@latest
- name: Download npm tarball
uses: actions/download-artifact@v4
with:
name: codex-shell-tool-mcp-npm
path: dist/npm
- name: Publish to npm
env:
NPM_TAG: ${{ needs.metadata.outputs.npm_tag }}
VERSION: ${{ needs.metadata.outputs.version }}
shell: bash
run: |
set -euo pipefail
tag_args=()
if [[ -n "${NPM_TAG}" ]]; then
tag_args+=(--tag "${NPM_TAG}")
fi
npm publish "dist/npm/codex-shell-tool-mcp-npm-${VERSION}.tgz" "${tag_args[@]}"

4
.gitignore vendored
View File

@@ -30,7 +30,6 @@ result
# cli tools
CLAUDE.md
.claude/
AGENTS.override.md
# caches
.cache/
@@ -64,9 +63,6 @@ apply_patch/
# coverage
coverage/
# personal files
personal/
# os
.DS_Store
Thumbs.db

View File

@@ -3,7 +3,6 @@
"rust-analyzer.check.command": "clippy",
"rust-analyzer.check.extraArgs": ["--all-features", "--tests"],
"rust-analyzer.rustfmt.extraArgs": ["--config", "imports_granularity=Item"],
"rust-analyzer.cargo.targetDir": "${workspaceFolder}/codex-rs/target/rust-analyzer",
"[rust]": {
"editor.defaultFormatter": "rust-lang.rust-analyzer",
"editor.formatOnSave": true,

View File

@@ -4,22 +4,14 @@ In the codex-rs folder where the rust code lives:
- Crate names are prefixed with `codex-`. For example, the `core` folder's crate is named `codex-core`
- When using format! and you can inline variables into {}, always do that.
- Install any commands the repo relies on (for example `just`, `rg`, or `cargo-insta`) if they aren't already available before running instructions here.
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`.
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
- Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate.
- Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
- Do not use unsigned integer even if the number cannot be negative.
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspacewide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests:
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
## TUI style conventions
@@ -35,7 +27,6 @@ See `codex-rs/tui/styles.md`.
- Desired: vec![" └ ".into(), "M".red(), " ".dim(), "tui/src/app.rs".dim()]
### TUI Styling (ratatui)
- Prefer Stylize helpers: use "text".dim(), .bold(), .cyan(), .italic(), .underlined() instead of manual Style where possible.
- Prefer simple conversions: use "text".into() for spans and vec![…].into() for lines; when inference is ambiguous (e.g., Paragraph::new/Cell::from), use Line::from(spans) or Span::from(text).
- Computed styles: if the Style is computed at runtime, using `Span::styled` is OK (`Span::from(text).set_style(style)` is also acceptable).
@@ -47,7 +38,6 @@ See `codex-rs/tui/styles.md`.
- Compactness: prefer the form that stays on one line after rustfmt; if only one of Line::from(vec![…]) or vec![…].into() avoids wrapping, choose that. If both wrap, pick the one with fewer wrapped lines.
### Text wrapping
- Always use textwrap::wrap to wrap plain strings.
- If you have a ratatui Line and you want to wrap it, use the helpers in tui/src/wrapping.rs, e.g. word_wrap_lines / word_wrap_line.
- If you need to indent wrapped lines, use the initial_indent / subsequent_indent options from RtOptions if you can, rather than writing custom logic.
@@ -69,36 +59,8 @@ This repo uses snapshot tests (via `insta`), especially in `codex-rs/tui`, to va
- `cargo insta accept -p codex-tui`
If you dont have the tool:
- `cargo install cargo-insta`
### Test assertions
- Tests should use pretty_assertions::assert_eq for clearer diffs. Import this at the top of the test module if it isn't already.
### Integration tests (core)
- Prefer the utilities in `core_test_support::responses` when writing end-to-end Codex tests.
- All `mount_sse*` helpers return a `ResponseMock`; hold onto it so you can assert against outbound `/responses` POST bodies.
- Use `ResponseMock::single_request()` when a test should only issue one POST, or `ResponseMock::requests()` to inspect every captured `ResponsesRequest`.
- `ResponsesRequest` exposes helpers (`body_json`, `input`, `function_call_output`, `custom_tool_call_output`, `call_output`, `header`, `path`, `query_param`) so assertions can target structured payloads instead of manual JSON digging.
- Build SSE payloads with the provided `ev_*` constructors and the `sse(...)`.
- Prefer `wait_for_event` over `wait_for_event_with_timeout`.
- Prefer `mount_sse_once` over `mount_sse_once_match` or `mount_sse_sequence`
- Typical pattern:
```rust
let mock = responses::mount_sse_once(&server, responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
responses::ev_completed("resp-1"),
])).await;
codex.submit(Op::UserTurn { ... }).await?;
// Assert request body if needed.
let request = mock.single_request();
// assert using request.function_call_output(call_id) or request.json_body() or other helpers.
```

View File

@@ -1 +1 @@
The changelog can be found on the [releases page](https://github.com/openai/codex/releases).
The changelog can be found on the [releases page](https://github.com/openai/codex/releases)

View File

@@ -1,9 +1,8 @@
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install --cask codex</code></p>
<h1 align="center">OpenAI Codex CLI</h1>
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
</br>
</br>If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE</a>
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a></p>
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install codex</code></p>
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, see <a href="https://chatgpt.com/codex">chatgpt.com/codex</a>.</p>
<p align="center">
<img src="./.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />
@@ -24,7 +23,7 @@ npm install -g @openai/codex
Alternatively, if you use Homebrew:
```shell
brew install --cask codex
brew install codex
```
Then simply run `codex` to get started:
@@ -33,8 +32,6 @@ Then simply run `codex` to get started:
codex
```
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-upgrade-codex-isnt-upgrading-me).
<details>
<summary>You can also go to the <a href="https://github.com/openai/codex/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
@@ -63,37 +60,29 @@ You can also use Codex with an API key, but this requires [additional setup](./d
### Model Context Protocol (MCP)
Codex can access MCP servers. To configure them, refer to the [config docs](./docs/config.md#mcp_servers).
Codex CLI supports [MCP servers](./docs/advanced.md#model-context-protocol-mcp). Enable by adding an `mcp_servers` section to your `~/.codex/config.toml`.
### Configuration
Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md).
### Execpolicy
See the [Execpolicy quickstart](./docs/execpolicy.md) to set up rules that govern what commands Codex can execute.
---
### Docs & FAQ
- [**Getting started**](./docs/getting-started.md)
- [CLI usage](./docs/getting-started.md#cli-usage)
- [Slash Commands](./docs/slash_commands.md)
- [Running with a prompt as input](./docs/getting-started.md#running-with-a-prompt-as-input)
- [Example prompts](./docs/getting-started.md#example-prompts)
- [Custom prompts](./docs/prompts.md)
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
- [**Configuration**](./docs/config.md)
- [Example config](./docs/example-config.md)
- [Configuration](./docs/config.md)
- [**Sandbox & approvals**](./docs/sandbox.md)
- [**Execpolicy quickstart**](./docs/execpolicy.md)
- [**Authentication**](./docs/authentication.md)
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
- [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine)
- **Automating Codex**
- [GitHub Action](https://github.com/openai/codex-action)
- [TypeScript SDK](./sdk/typescript/README.md)
- [Non-interactive mode (`codex exec`)](./docs/exec.md)
- [**Advanced**](./docs/advanced.md)
- [Non-interactive / CI mode](./docs/advanced.md#non-interactive--ci-mode)
- [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging)
- [Model Context Protocol (MCP)](./docs/advanced.md#model-context-protocol-mcp)
- [**Zero data retention (ZDR)**](./docs/zdr.md)
@@ -110,3 +99,4 @@ See the [Execpolicy quickstart](./docs/execpolicy.md) to set up rules that gover
## License
This repository is licensed under the [Apache-2.0 License](LICENSE).

View File

@@ -4,7 +4,7 @@
header = """
# Changelog
You can install any of these versions: `npm install -g @openai/codex@<version>`
You can install any of these versions: `npm install -g codex@version`
"""
body = """

View File

@@ -1 +1,7 @@
/vendor/
# Added by ./scripts/install_native_deps.sh
/bin/codex-aarch64-apple-darwin
/bin/codex-aarch64-unknown-linux-musl
/bin/codex-linux-sandbox-arm64
/bin/codex-linux-sandbox-x64
/bin/codex-x86_64-apple-darwin
/bin/codex-x86_64-unknown-linux-musl

View File

@@ -208,7 +208,7 @@ The hardening mechanism Codex uses depends on your OS:
| Requirement | Details |
| --------------------------- | --------------------------------------------------------------- |
| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
| Node.js | **16 or newer** (Node 20 LTS recommended) |
| Node.js | **22 or newer** (LTS recommended) |
| Git (optional, recommended) | 2.23+ for built-in PR helpers |
| RAM | 4-GB minimum (8-GB recommended) |
@@ -513,7 +513,7 @@ Codex runs model-generated commands in a sandbox. If a proposed command or file
<details>
<summary>Does it work on Windows?</summary>
Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - Codex is regularly tested on macOS and Linux with Node 20+, and also supports Node 16.
Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - Codex has been tested on macOS and Linux with Node 22.
</details>

70
codex-cli/bin/codex.js Normal file → Executable file
View File

@@ -1,8 +1,6 @@
#!/usr/bin/env node
// Unified entry point for the Codex CLI.
import { spawn } from "node:child_process";
import { existsSync } from "fs";
import path from "path";
import { fileURLToPath } from "url";
@@ -42,10 +40,10 @@ switch (platform) {
case "win32":
switch (arch) {
case "x64":
targetTriple = "x86_64-pc-windows-msvc";
targetTriple = "x86_64-pc-windows-msvc.exe";
break;
case "arm64":
targetTriple = "aarch64-pc-windows-msvc";
targetTriple = "aarch64-pc-windows-msvc.exe";
break;
default:
break;
@@ -59,16 +57,31 @@ if (!targetTriple) {
throw new Error(`Unsupported platform: ${platform} (${arch})`);
}
const vendorRoot = path.join(__dirname, "..", "vendor");
const archRoot = path.join(vendorRoot, targetTriple);
const codexBinaryName = process.platform === "win32" ? "codex.exe" : "codex";
const binaryPath = path.join(archRoot, "codex", codexBinaryName);
const binaryPath = path.join(__dirname, "..", "bin", `codex-${targetTriple}`);
// Use an asynchronous spawn instead of spawnSync so that Node is able to
// respond to signals (e.g. Ctrl-C / SIGINT) while the native binary is
// executing. This allows us to forward those signals to the child process
// and guarantees that when either the child terminates or the parent
// receives a fatal signal, both processes exit in a predictable manner.
const { spawn } = await import("child_process");
async function tryImport(moduleName) {
try {
// eslint-disable-next-line node/no-unsupported-features/es-syntax
return await import(moduleName);
} catch (err) {
return null;
}
}
async function resolveRgDir() {
const ripgrep = await tryImport("@vscode/ripgrep");
if (!ripgrep?.rgPath) {
return null;
}
return path.dirname(ripgrep.rgPath);
}
function getUpdatedPath(newDirs) {
const pathSep = process.platform === "win32" ? ";" : ":";
@@ -80,49 +93,16 @@ function getUpdatedPath(newDirs) {
return updatedPath;
}
/**
* Use heuristics to detect the package manager that was used to install Codex
* in order to give the user a hint about how to update it.
*/
function detectPackageManager() {
const userAgent = process.env.npm_config_user_agent || "";
if (/\bbun\//.test(userAgent)) {
return "bun";
}
const execPath = process.env.npm_execpath || "";
if (execPath.includes("bun")) {
return "bun";
}
if (
process.env.BUN_INSTALL ||
process.env.BUN_INSTALL_GLOBAL_DIR ||
process.env.BUN_INSTALL_BIN_DIR
) {
return "bun";
}
return userAgent ? "npm" : null;
}
const additionalDirs = [];
const pathDir = path.join(archRoot, "path");
if (existsSync(pathDir)) {
additionalDirs.push(pathDir);
const rgDir = await resolveRgDir();
if (rgDir) {
additionalDirs.push(rgDir);
}
const updatedPath = getUpdatedPath(additionalDirs);
const env = { ...process.env, PATH: updatedPath };
const packageManagerEnvVar =
detectPackageManager() === "bun"
? "CODEX_MANAGED_BY_BUN"
: "CODEX_MANAGED_BY_NPM";
env[packageManagerEnvVar] = "1";
const child = spawn(binaryPath, process.argv.slice(2), {
stdio: "inherit",
env,
env: { ...process.env, PATH: updatedPath, CODEX_MANAGED_BY_NPM: "1" },
});
child.on("error", (err) => {

View File

@@ -1,79 +0,0 @@
#!/usr/bin/env dotslash
{
"name": "rg",
"platforms": {
"macos-aarch64": {
"size": 1787248,
"hash": "blake3",
"digest": "8d9942032585ea8ee805937634238d9aee7b210069f4703c88fbe568e26fb78a",
"format": "tar.gz",
"path": "ripgrep-14.1.1-aarch64-apple-darwin/rg",
"providers": [
{
"url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-aarch64-apple-darwin.tar.gz"
}
]
},
"linux-aarch64": {
"size": 2047405,
"hash": "blake3",
"digest": "0b670b8fa0a3df2762af2fc82cc4932f684ca4c02dbd1260d4f3133fd4b2a515",
"format": "tar.gz",
"path": "ripgrep-14.1.1-aarch64-unknown-linux-gnu/rg",
"providers": [
{
"url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-aarch64-unknown-linux-gnu.tar.gz"
}
]
},
"macos-x86_64": {
"size": 2082672,
"hash": "blake3",
"digest": "e9b862fc8da3127f92791f0ff6a799504154ca9d36c98bf3e60a81c6b1f7289e",
"format": "tar.gz",
"path": "ripgrep-14.1.1-x86_64-apple-darwin/rg",
"providers": [
{
"url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-x86_64-apple-darwin.tar.gz"
}
]
},
"linux-x86_64": {
"size": 2566310,
"hash": "blake3",
"digest": "f73cca4e54d78c31f832c7f6e2c0b4db8b04fa3eaa747915727d570893dbee76",
"format": "tar.gz",
"path": "ripgrep-14.1.1-x86_64-unknown-linux-musl/rg",
"providers": [
{
"url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-x86_64-unknown-linux-musl.tar.gz"
}
]
},
"windows-x86_64": {
"size": 2058893,
"hash": "blake3",
"digest": "a8ce1a6fed4f8093ee997e57f33254e94b2cd18e26358b09db599c89882eadbd",
"format": "zip",
"path": "ripgrep-14.1.1-x86_64-pc-windows-msvc/rg.exe",
"providers": [
{
"url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-x86_64-pc-windows-msvc.zip"
}
]
},
"windows-aarch64": {
"size": 1667740,
"hash": "blake3",
"digest": "47b971a8c4fca1d23a4e7c19bd4d88465ebc395598458133139406d3bf85f3fa",
"format": "zip",
"path": "rg.exe",
"providers": [
{
"url": "https://github.com/microsoft/ripgrep-prebuilt/releases/download/v13.0.0-13/ripgrep-v13.0.0-13-aarch64-pc-windows-msvc.zip"
}
]
}
}
}

View File

@@ -2,16 +2,117 @@
"name": "@openai/codex",
"version": "0.0.0-dev",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@openai/codex",
"version": "0.0.0-dev",
"license": "Apache-2.0",
"dependencies": {
"@vscode/ripgrep": "^1.15.14"
},
"bin": {
"codex": "bin/codex.js"
},
"engines": {
"node": ">=16"
"node": ">=20"
}
},
"node_modules/@vscode/ripgrep": {
"version": "1.15.14",
"resolved": "https://registry.npmjs.org/@vscode/ripgrep/-/ripgrep-1.15.14.tgz",
"integrity": "sha512-/G1UJPYlm+trBWQ6cMO3sv6b8D1+G16WaJH1/DSqw32JOVlzgZbLkDxRyzIpTpv30AcYGMkCf5tUqGlW6HbDWw==",
"hasInstallScript": true,
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^7.0.2",
"proxy-from-env": "^1.1.0",
"yauzl": "^2.9.2"
}
},
"node_modules/agent-base": {
"version": "7.1.4",
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz",
"integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==",
"license": "MIT",
"engines": {
"node": ">= 14"
}
},
"node_modules/buffer-crc32": {
"version": "0.2.13",
"resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz",
"integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==",
"license": "MIT",
"engines": {
"node": "*"
}
},
"node_modules/debug": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz",
"integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==",
"license": "MIT",
"dependencies": {
"ms": "^2.1.3"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/fd-slicer": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz",
"integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==",
"license": "MIT",
"dependencies": {
"pend": "~1.2.0"
}
},
"node_modules/https-proxy-agent": {
"version": "7.0.6",
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz",
"integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==",
"license": "MIT",
"dependencies": {
"agent-base": "^7.1.2",
"debug": "4"
},
"engines": {
"node": ">= 14"
}
},
"node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT"
},
"node_modules/pend": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz",
"integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==",
"license": "MIT"
},
"node_modules/proxy-from-env": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
"license": "MIT"
},
"node_modules/yauzl": {
"version": "2.10.0",
"resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz",
"integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==",
"license": "MIT",
"dependencies": {
"buffer-crc32": "~0.2.3",
"fd-slicer": "~1.1.0"
}
}
}

View File

@@ -7,15 +7,20 @@
},
"type": "module",
"engines": {
"node": ">=16"
"node": ">=20"
},
"files": [
"bin",
"vendor"
"dist"
],
"repository": {
"type": "git",
"url": "git+https://github.com/openai/codex.git",
"directory": "codex-cli"
"url": "git+https://github.com/openai/codex.git"
},
"dependencies": {
"@vscode/ripgrep": "^1.15.14"
},
"devDependencies": {
"prettier": "^3.3.3"
}
}

View File

@@ -1,19 +1,9 @@
# npm releases
Use the staging helper in the repo root to generate npm tarballs for a release. For
example, to stage the CLI, responses proxy, and SDK packages for version `0.6.0`:
Run the following:
To build the 0.2.x or later version of the npm module, which runs the Rust version of the CLI, build it as follows:
```bash
./scripts/stage_npm_packages.py \
--release-version 0.6.0 \
--package codex \
--package codex-responses-api-proxy \
--package codex-sdk
./codex-cli/scripts/stage_rust_release.py --release-version 0.6.0
```
This downloads the native artifacts once, hydrates `vendor/` for each package, and writes
tarballs to `dist/npm/`.
If you need to invoke `build_npm_package.py` directly, run
`codex-cli/scripts/install_native_deps.py` first and pass `--vendor-src` pointing to the
directory that contains the populated `vendor/` tree.

View File

@@ -1,308 +0,0 @@
#!/usr/bin/env python3
"""Stage and optionally package the @openai/codex npm module."""
import argparse
import json
import shutil
import subprocess
import sys
import tempfile
from pathlib import Path
SCRIPT_DIR = Path(__file__).resolve().parent
CODEX_CLI_ROOT = SCRIPT_DIR.parent
REPO_ROOT = CODEX_CLI_ROOT.parent
RESPONSES_API_PROXY_NPM_ROOT = REPO_ROOT / "codex-rs" / "responses-api-proxy" / "npm"
CODEX_SDK_ROOT = REPO_ROOT / "sdk" / "typescript"
PACKAGE_NATIVE_COMPONENTS: dict[str, list[str]] = {
"codex": ["codex", "rg"],
"codex-responses-api-proxy": ["codex-responses-api-proxy"],
"codex-sdk": ["codex"],
}
COMPONENT_DEST_DIR: dict[str, str] = {
"codex": "codex",
"codex-responses-api-proxy": "codex-responses-api-proxy",
"rg": "path",
}
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Build or stage the Codex CLI npm package.")
parser.add_argument(
"--package",
choices=("codex", "codex-responses-api-proxy", "codex-sdk"),
default="codex",
help="Which npm package to stage (default: codex).",
)
parser.add_argument(
"--version",
help="Version number to write to package.json inside the staged package.",
)
parser.add_argument(
"--release-version",
help=(
"Version to stage for npm release."
),
)
parser.add_argument(
"--staging-dir",
type=Path,
help=(
"Directory to stage the package contents. Defaults to a new temporary directory "
"if omitted. The directory must be empty when provided."
),
)
parser.add_argument(
"--tmp",
dest="staging_dir",
type=Path,
help=argparse.SUPPRESS,
)
parser.add_argument(
"--pack-output",
type=Path,
help="Path where the generated npm tarball should be written.",
)
parser.add_argument(
"--vendor-src",
type=Path,
help="Directory containing pre-installed native binaries to bundle (vendor root).",
)
return parser.parse_args()
def main() -> int:
args = parse_args()
package = args.package
version = args.version
release_version = args.release_version
if release_version:
if version and version != release_version:
raise RuntimeError("--version and --release-version must match when both are provided.")
version = release_version
if not version:
raise RuntimeError("Must specify --version or --release-version.")
staging_dir, created_temp = prepare_staging_dir(args.staging_dir)
try:
stage_sources(staging_dir, version, package)
vendor_src = args.vendor_src.resolve() if args.vendor_src else None
native_components = PACKAGE_NATIVE_COMPONENTS.get(package, [])
if native_components:
if vendor_src is None:
components_str = ", ".join(native_components)
raise RuntimeError(
"Native components "
f"({components_str}) required for package '{package}'. Provide --vendor-src "
"pointing to a directory containing pre-installed binaries."
)
copy_native_binaries(vendor_src, staging_dir, native_components)
if release_version:
staging_dir_str = str(staging_dir)
if package == "codex":
print(
f"Staged version {version} for release in {staging_dir_str}\n\n"
"Verify the CLI:\n"
f" node {staging_dir_str}/bin/codex.js --version\n"
f" node {staging_dir_str}/bin/codex.js --help\n\n"
)
elif package == "codex-responses-api-proxy":
print(
f"Staged version {version} for release in {staging_dir_str}\n\n"
"Verify the responses API proxy:\n"
f" node {staging_dir_str}/bin/codex-responses-api-proxy.js --help\n\n"
)
else:
print(
f"Staged version {version} for release in {staging_dir_str}\n\n"
"Verify the SDK contents:\n"
f" ls {staging_dir_str}/dist\n"
f" ls {staging_dir_str}/vendor\n"
" node -e \"import('./dist/index.js').then(() => console.log('ok'))\"\n\n"
)
else:
print(f"Staged package in {staging_dir}")
if args.pack_output is not None:
output_path = run_npm_pack(staging_dir, args.pack_output)
print(f"npm pack output written to {output_path}")
finally:
if created_temp:
# Preserve the staging directory for further inspection.
pass
return 0
def prepare_staging_dir(staging_dir: Path | None) -> tuple[Path, bool]:
if staging_dir is not None:
staging_dir = staging_dir.resolve()
staging_dir.mkdir(parents=True, exist_ok=True)
if any(staging_dir.iterdir()):
raise RuntimeError(f"Staging directory {staging_dir} is not empty.")
return staging_dir, False
temp_dir = Path(tempfile.mkdtemp(prefix="codex-npm-stage-"))
return temp_dir, True
def stage_sources(staging_dir: Path, version: str, package: str) -> None:
if package == "codex":
bin_dir = staging_dir / "bin"
bin_dir.mkdir(parents=True, exist_ok=True)
shutil.copy2(CODEX_CLI_ROOT / "bin" / "codex.js", bin_dir / "codex.js")
rg_manifest = CODEX_CLI_ROOT / "bin" / "rg"
if rg_manifest.exists():
shutil.copy2(rg_manifest, bin_dir / "rg")
readme_src = REPO_ROOT / "README.md"
if readme_src.exists():
shutil.copy2(readme_src, staging_dir / "README.md")
package_json_path = CODEX_CLI_ROOT / "package.json"
elif package == "codex-responses-api-proxy":
bin_dir = staging_dir / "bin"
bin_dir.mkdir(parents=True, exist_ok=True)
launcher_src = RESPONSES_API_PROXY_NPM_ROOT / "bin" / "codex-responses-api-proxy.js"
shutil.copy2(launcher_src, bin_dir / "codex-responses-api-proxy.js")
readme_src = RESPONSES_API_PROXY_NPM_ROOT / "README.md"
if readme_src.exists():
shutil.copy2(readme_src, staging_dir / "README.md")
package_json_path = RESPONSES_API_PROXY_NPM_ROOT / "package.json"
elif package == "codex-sdk":
package_json_path = CODEX_SDK_ROOT / "package.json"
stage_codex_sdk_sources(staging_dir)
else:
raise RuntimeError(f"Unknown package '{package}'.")
with open(package_json_path, "r", encoding="utf-8") as fh:
package_json = json.load(fh)
package_json["version"] = version
if package == "codex-sdk":
scripts = package_json.get("scripts")
if isinstance(scripts, dict):
scripts.pop("prepare", None)
files = package_json.get("files")
if isinstance(files, list):
if "vendor" not in files:
files.append("vendor")
else:
package_json["files"] = ["dist", "vendor"]
with open(staging_dir / "package.json", "w", encoding="utf-8") as out:
json.dump(package_json, out, indent=2)
out.write("\n")
def run_command(cmd: list[str], cwd: Path | None = None) -> None:
print("+", " ".join(cmd))
subprocess.run(cmd, cwd=cwd, check=True)
def stage_codex_sdk_sources(staging_dir: Path) -> None:
package_root = CODEX_SDK_ROOT
run_command(["pnpm", "install", "--frozen-lockfile"], cwd=package_root)
run_command(["pnpm", "run", "build"], cwd=package_root)
dist_src = package_root / "dist"
if not dist_src.exists():
raise RuntimeError("codex-sdk build did not produce a dist directory.")
shutil.copytree(dist_src, staging_dir / "dist")
readme_src = package_root / "README.md"
if readme_src.exists():
shutil.copy2(readme_src, staging_dir / "README.md")
license_src = REPO_ROOT / "LICENSE"
if license_src.exists():
shutil.copy2(license_src, staging_dir / "LICENSE")
def copy_native_binaries(vendor_src: Path, staging_dir: Path, components: list[str]) -> None:
vendor_src = vendor_src.resolve()
if not vendor_src.exists():
raise RuntimeError(f"Vendor source directory not found: {vendor_src}")
components_set = {component for component in components if component in COMPONENT_DEST_DIR}
if not components_set:
return
vendor_dest = staging_dir / "vendor"
if vendor_dest.exists():
shutil.rmtree(vendor_dest)
vendor_dest.mkdir(parents=True, exist_ok=True)
for target_dir in vendor_src.iterdir():
if not target_dir.is_dir():
continue
dest_target_dir = vendor_dest / target_dir.name
dest_target_dir.mkdir(parents=True, exist_ok=True)
for component in components_set:
dest_dir_name = COMPONENT_DEST_DIR.get(component)
if dest_dir_name is None:
continue
src_component_dir = target_dir / dest_dir_name
if not src_component_dir.exists():
raise RuntimeError(
f"Missing native component '{component}' in vendor source: {src_component_dir}"
)
dest_component_dir = dest_target_dir / dest_dir_name
if dest_component_dir.exists():
shutil.rmtree(dest_component_dir)
shutil.copytree(src_component_dir, dest_component_dir)
def run_npm_pack(staging_dir: Path, output_path: Path) -> Path:
output_path = output_path.resolve()
output_path.parent.mkdir(parents=True, exist_ok=True)
with tempfile.TemporaryDirectory(prefix="codex-npm-pack-") as pack_dir_str:
pack_dir = Path(pack_dir_str)
stdout = subprocess.check_output(
["npm", "pack", "--json", "--pack-destination", str(pack_dir)],
cwd=staging_dir,
text=True,
)
try:
pack_output = json.loads(stdout)
except json.JSONDecodeError as exc:
raise RuntimeError("Failed to parse npm pack output.") from exc
if not pack_output:
raise RuntimeError("npm pack did not produce an output tarball.")
tarball_name = pack_output[0].get("filename") or pack_output[0].get("name")
if not tarball_name:
raise RuntimeError("Unable to determine npm pack output filename.")
tarball_path = pack_dir / tarball_name
if not tarball_path.exists():
raise RuntimeError(f"Expected npm pack output not found: {tarball_path}")
shutil.move(str(tarball_path), output_path)
return output_path
if __name__ == "__main__":
import sys
sys.exit(main())

View File

@@ -1,383 +0,0 @@
#!/usr/bin/env python3
"""Install Codex native binaries (Rust CLI plus ripgrep helpers)."""
import argparse
import json
import os
import shutil
import subprocess
import tarfile
import tempfile
import zipfile
from dataclasses import dataclass
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from typing import Iterable, Sequence
from urllib.parse import urlparse
from urllib.request import urlopen
SCRIPT_DIR = Path(__file__).resolve().parent
CODEX_CLI_ROOT = SCRIPT_DIR.parent
DEFAULT_WORKFLOW_URL = "https://github.com/openai/codex/actions/runs/17952349351" # rust-v0.40.0
VENDOR_DIR_NAME = "vendor"
RG_MANIFEST = CODEX_CLI_ROOT / "bin" / "rg"
BINARY_TARGETS = (
"x86_64-unknown-linux-musl",
"aarch64-unknown-linux-musl",
"x86_64-apple-darwin",
"aarch64-apple-darwin",
"x86_64-pc-windows-msvc",
"aarch64-pc-windows-msvc",
)
@dataclass(frozen=True)
class BinaryComponent:
artifact_prefix: str # matches the artifact filename prefix (e.g. codex-<target>.zst)
dest_dir: str # directory under vendor/<target>/ where the binary is installed
binary_basename: str # executable name inside dest_dir (before optional .exe)
BINARY_COMPONENTS = {
"codex": BinaryComponent(
artifact_prefix="codex",
dest_dir="codex",
binary_basename="codex",
),
"codex-responses-api-proxy": BinaryComponent(
artifact_prefix="codex-responses-api-proxy",
dest_dir="codex-responses-api-proxy",
binary_basename="codex-responses-api-proxy",
),
}
RG_TARGET_PLATFORM_PAIRS: list[tuple[str, str]] = [
("x86_64-unknown-linux-musl", "linux-x86_64"),
("aarch64-unknown-linux-musl", "linux-aarch64"),
("x86_64-apple-darwin", "macos-x86_64"),
("aarch64-apple-darwin", "macos-aarch64"),
("x86_64-pc-windows-msvc", "windows-x86_64"),
("aarch64-pc-windows-msvc", "windows-aarch64"),
]
RG_TARGET_TO_PLATFORM = {target: platform for target, platform in RG_TARGET_PLATFORM_PAIRS}
DEFAULT_RG_TARGETS = [target for target, _ in RG_TARGET_PLATFORM_PAIRS]
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Install native Codex binaries.")
parser.add_argument(
"--workflow-url",
help=(
"GitHub Actions workflow URL that produced the artifacts. Defaults to a "
"known good run when omitted."
),
)
parser.add_argument(
"--component",
dest="components",
action="append",
choices=tuple(list(BINARY_COMPONENTS) + ["rg"]),
help=(
"Limit installation to the specified components."
" May be repeated. Defaults to 'codex' and 'rg'."
),
)
parser.add_argument(
"root",
nargs="?",
type=Path,
help=(
"Directory containing package.json for the staged package. If omitted, the "
"repository checkout is used."
),
)
return parser.parse_args()
def main() -> int:
args = parse_args()
codex_cli_root = (args.root or CODEX_CLI_ROOT).resolve()
vendor_dir = codex_cli_root / VENDOR_DIR_NAME
vendor_dir.mkdir(parents=True, exist_ok=True)
components = args.components or ["codex", "rg"]
workflow_url = (args.workflow_url or DEFAULT_WORKFLOW_URL).strip()
if not workflow_url:
workflow_url = DEFAULT_WORKFLOW_URL
workflow_id = workflow_url.rstrip("/").split("/")[-1]
print(f"Downloading native artifacts from workflow {workflow_id}...")
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
artifacts_dir = Path(artifacts_dir_str)
_download_artifacts(workflow_id, artifacts_dir)
install_binary_components(
artifacts_dir,
vendor_dir,
BINARY_TARGETS,
[name for name in components if name in BINARY_COMPONENTS],
)
if "rg" in components:
print("Fetching ripgrep binaries...")
fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST)
print(f"Installed native dependencies into {vendor_dir}")
return 0
def fetch_rg(
vendor_dir: Path,
targets: Sequence[str] | None = None,
*,
manifest_path: Path,
) -> list[Path]:
"""Download ripgrep binaries described by the DotSlash manifest."""
if targets is None:
targets = DEFAULT_RG_TARGETS
if not manifest_path.exists():
raise FileNotFoundError(f"DotSlash manifest not found: {manifest_path}")
manifest = _load_manifest(manifest_path)
platforms = manifest.get("platforms", {})
vendor_dir.mkdir(parents=True, exist_ok=True)
targets = list(targets)
if not targets:
return []
task_configs: list[tuple[str, str, dict]] = []
for target in targets:
platform_key = RG_TARGET_TO_PLATFORM.get(target)
if platform_key is None:
raise ValueError(f"Unsupported ripgrep target '{target}'.")
platform_info = platforms.get(platform_key)
if platform_info is None:
raise RuntimeError(f"Platform '{platform_key}' not found in manifest {manifest_path}.")
task_configs.append((target, platform_key, platform_info))
results: dict[str, Path] = {}
max_workers = min(len(task_configs), max(1, (os.cpu_count() or 1)))
print("Installing ripgrep binaries for targets: " + ", ".join(targets))
with ThreadPoolExecutor(max_workers=max_workers) as executor:
future_map = {
executor.submit(
_fetch_single_rg,
vendor_dir,
target,
platform_key,
platform_info,
manifest_path,
): target
for target, platform_key, platform_info in task_configs
}
for future in as_completed(future_map):
target = future_map[future]
results[target] = future.result()
print(f" installed ripgrep for {target}")
return [results[target] for target in targets]
def _download_artifacts(workflow_id: str, dest_dir: Path) -> None:
cmd = [
"gh",
"run",
"download",
"--dir",
str(dest_dir),
"--repo",
"openai/codex",
workflow_id,
]
subprocess.check_call(cmd)
def install_binary_components(
artifacts_dir: Path,
vendor_dir: Path,
targets: Iterable[str],
component_names: Sequence[str],
) -> None:
selected_components = [BINARY_COMPONENTS[name] for name in component_names if name in BINARY_COMPONENTS]
if not selected_components:
return
targets = list(targets)
if not targets:
return
for component in selected_components:
print(
f"Installing {component.binary_basename} binaries for targets: "
+ ", ".join(targets)
)
max_workers = min(len(targets), max(1, (os.cpu_count() or 1)))
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = {
executor.submit(
_install_single_binary,
artifacts_dir,
vendor_dir,
target,
component,
): target
for target in targets
}
for future in as_completed(futures):
installed_path = future.result()
print(f" installed {installed_path}")
def _install_single_binary(
artifacts_dir: Path,
vendor_dir: Path,
target: str,
component: BinaryComponent,
) -> Path:
artifact_subdir = artifacts_dir / target
archive_name = _archive_name_for_target(component.artifact_prefix, target)
archive_path = artifact_subdir / archive_name
if not archive_path.exists():
raise FileNotFoundError(f"Expected artifact not found: {archive_path}")
dest_dir = vendor_dir / target / component.dest_dir
dest_dir.mkdir(parents=True, exist_ok=True)
binary_name = (
f"{component.binary_basename}.exe" if "windows" in target else component.binary_basename
)
dest = dest_dir / binary_name
dest.unlink(missing_ok=True)
extract_archive(archive_path, "zst", None, dest)
if "windows" not in target:
dest.chmod(0o755)
return dest
def _archive_name_for_target(artifact_prefix: str, target: str) -> str:
if "windows" in target:
return f"{artifact_prefix}-{target}.exe.zst"
return f"{artifact_prefix}-{target}.zst"
def _fetch_single_rg(
vendor_dir: Path,
target: str,
platform_key: str,
platform_info: dict,
manifest_path: Path,
) -> Path:
providers = platform_info.get("providers", [])
if not providers:
raise RuntimeError(f"No providers listed for platform '{platform_key}' in {manifest_path}.")
url = providers[0]["url"]
archive_format = platform_info.get("format", "zst")
archive_member = platform_info.get("path")
dest_dir = vendor_dir / target / "path"
dest_dir.mkdir(parents=True, exist_ok=True)
is_windows = platform_key.startswith("win")
binary_name = "rg.exe" if is_windows else "rg"
dest = dest_dir / binary_name
with tempfile.TemporaryDirectory() as tmp_dir_str:
tmp_dir = Path(tmp_dir_str)
archive_filename = os.path.basename(urlparse(url).path)
download_path = tmp_dir / archive_filename
_download_file(url, download_path)
dest.unlink(missing_ok=True)
extract_archive(download_path, archive_format, archive_member, dest)
if not is_windows:
dest.chmod(0o755)
return dest
def _download_file(url: str, dest: Path) -> None:
dest.parent.mkdir(parents=True, exist_ok=True)
with urlopen(url) as response, open(dest, "wb") as out:
shutil.copyfileobj(response, out)
def extract_archive(
archive_path: Path,
archive_format: str,
archive_member: str | None,
dest: Path,
) -> None:
dest.parent.mkdir(parents=True, exist_ok=True)
if archive_format == "zst":
output_path = archive_path.parent / dest.name
subprocess.check_call(
["zstd", "-f", "-d", str(archive_path), "-o", str(output_path)]
)
shutil.move(str(output_path), dest)
return
if archive_format == "tar.gz":
if not archive_member:
raise RuntimeError("Missing 'path' for tar.gz archive in DotSlash manifest.")
with tarfile.open(archive_path, "r:gz") as tar:
try:
member = tar.getmember(archive_member)
except KeyError as exc:
raise RuntimeError(
f"Entry '{archive_member}' not found in archive {archive_path}."
) from exc
tar.extract(member, path=archive_path.parent, filter="data")
extracted = archive_path.parent / archive_member
shutil.move(str(extracted), dest)
return
if archive_format == "zip":
if not archive_member:
raise RuntimeError("Missing 'path' for zip archive in DotSlash manifest.")
with zipfile.ZipFile(archive_path) as archive:
try:
with archive.open(archive_member) as src, open(dest, "wb") as out:
shutil.copyfileobj(src, out)
except KeyError as exc:
raise RuntimeError(
f"Entry '{archive_member}' not found in archive {archive_path}."
) from exc
return
raise RuntimeError(f"Unsupported archive format '{archive_format}'.")
def _load_manifest(manifest_path: Path) -> dict:
cmd = ["dotslash", "--", "parse", str(manifest_path)]
stdout = subprocess.check_output(cmd, text=True)
try:
manifest = json.loads(stdout)
except json.JSONDecodeError as exc:
raise RuntimeError(f"Invalid DotSlash manifest output from {manifest_path}.") from exc
if not isinstance(manifest, dict):
raise RuntimeError(
f"Unexpected DotSlash manifest structure for {manifest_path}: {type(manifest)!r}"
)
return manifest
if __name__ == "__main__":
import sys
sys.exit(main())

View File

@@ -0,0 +1,94 @@
#!/usr/bin/env bash
# Install native runtime dependencies for codex-cli.
#
# Usage
# install_native_deps.sh [--workflow-url URL] [CODEX_CLI_ROOT]
#
# The optional RELEASE_ROOT is the path that contains package.json. Omitting
# it installs the binaries into the repository's own bin/ folder to support
# local development.
set -euo pipefail
# ------------------
# Parse arguments
# ------------------
CODEX_CLI_ROOT=""
# Until we start publishing stable GitHub releases, we have to grab the binaries
# from the GitHub Action that created them. Update the URL below to point to the
# appropriate workflow run:
WORKFLOW_URL="https://github.com/openai/codex/actions/runs/17417194663" # rust-v0.28.0
while [[ $# -gt 0 ]]; do
case "$1" in
--workflow-url)
shift || { echo "--workflow-url requires an argument"; exit 1; }
if [ -n "$1" ]; then
WORKFLOW_URL="$1"
fi
;;
*)
if [[ -z "$CODEX_CLI_ROOT" ]]; then
CODEX_CLI_ROOT="$1"
else
echo "Unexpected argument: $1" >&2
exit 1
fi
;;
esac
shift
done
# ----------------------------------------------------------------------------
# Determine where the binaries should be installed.
# ----------------------------------------------------------------------------
if [ -n "$CODEX_CLI_ROOT" ]; then
# The caller supplied a release root directory.
BIN_DIR="$CODEX_CLI_ROOT/bin"
else
# No argument; fall back to the repos own bin directory.
# Resolve the path of this script, then walk up to the repo root.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CODEX_CLI_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
BIN_DIR="$CODEX_CLI_ROOT/bin"
fi
# Make sure the destination directory exists.
mkdir -p "$BIN_DIR"
# ----------------------------------------------------------------------------
# Download and decompress the artifacts from the GitHub Actions workflow.
# ----------------------------------------------------------------------------
WORKFLOW_ID="${WORKFLOW_URL##*/}"
ARTIFACTS_DIR="$(mktemp -d)"
trap 'rm -rf "$ARTIFACTS_DIR"' EXIT
# NB: The GitHub CLI `gh` must be installed and authenticated.
gh run download --dir "$ARTIFACTS_DIR" --repo openai/codex "$WORKFLOW_ID"
# x64 Linux
zstd -d "$ARTIFACTS_DIR/x86_64-unknown-linux-musl/codex-x86_64-unknown-linux-musl.zst" \
-o "$BIN_DIR/codex-x86_64-unknown-linux-musl"
# ARM64 Linux
zstd -d "$ARTIFACTS_DIR/aarch64-unknown-linux-musl/codex-aarch64-unknown-linux-musl.zst" \
-o "$BIN_DIR/codex-aarch64-unknown-linux-musl"
# x64 macOS
zstd -d "$ARTIFACTS_DIR/x86_64-apple-darwin/codex-x86_64-apple-darwin.zst" \
-o "$BIN_DIR/codex-x86_64-apple-darwin"
# ARM64 macOS
zstd -d "$ARTIFACTS_DIR/aarch64-apple-darwin/codex-aarch64-apple-darwin.zst" \
-o "$BIN_DIR/codex-aarch64-apple-darwin"
# x64 Windows
zstd -d "$ARTIFACTS_DIR/x86_64-pc-windows-msvc/codex-x86_64-pc-windows-msvc.exe.zst" \
-o "$BIN_DIR/codex-x86_64-pc-windows-msvc.exe"
# ARM64 Windows
zstd -d "$ARTIFACTS_DIR/aarch64-pc-windows-msvc/codex-aarch64-pc-windows-msvc.exe.zst" \
-o "$BIN_DIR/codex-aarch64-pc-windows-msvc.exe"
echo "Installed native dependencies into $BIN_DIR"

View File

@@ -0,0 +1,120 @@
#!/usr/bin/env bash
# -----------------------------------------------------------------------------
# stage_release.sh
# -----------------------------------------------------------------------------
# Stages an npm release for @openai/codex.
#
# Usage:
#
# --tmp <dir> : Use <dir> instead of a freshly created temp directory.
# -h|--help : Print usage.
#
# -----------------------------------------------------------------------------
set -euo pipefail
# Helper - usage / flag parsing
usage() {
cat <<EOF
Usage: $(basename "$0") [--tmp DIR] [--version VERSION]
Options
--tmp DIR Use DIR to stage the release (defaults to a fresh mktemp dir)
--version Specify the version to release (defaults to a timestamp-based version)
-h, --help Show this help
Legacy positional argument: the first non-flag argument is still interpreted
as the temporary directory (for backwards compatibility) but is deprecated.
EOF
exit "${1:-0}"
}
TMPDIR=""
# Default to a timestamp-based version (keep same scheme as before)
VERSION="$(printf '0.1.%d' "$(date +%y%m%d%H%M)")"
WORKFLOW_URL=""
# Manual flag parser - Bash getopts does not handle GNU long options well.
while [[ $# -gt 0 ]]; do
case "$1" in
--tmp)
shift || { echo "--tmp requires an argument"; usage 1; }
TMPDIR="$1"
;;
--tmp=*)
TMPDIR="${1#*=}"
;;
--version)
shift || { echo "--version requires an argument"; usage 1; }
VERSION="$1"
;;
--workflow-url)
shift || { echo "--workflow-url requires an argument"; exit 1; }
WORKFLOW_URL="$1"
;;
-h|--help)
usage 0
;;
--*)
echo "Unknown option: $1" >&2
usage 1
;;
*)
echo "Unexpected extra argument: $1" >&2
usage 1
;;
esac
shift
done
# Fallback when the caller did not specify a directory.
# If no directory was specified create a fresh temporary one.
if [[ -z "$TMPDIR" ]]; then
TMPDIR="$(mktemp -d)"
fi
# Ensure the directory exists, then resolve to an absolute path.
mkdir -p "$TMPDIR"
TMPDIR="$(cd "$TMPDIR" && pwd)"
# Main build logic
echo "Staging release in $TMPDIR"
# The script lives in codex-cli/scripts/ - change into codex-cli root so that
# relative paths keep working.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CODEX_CLI_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
pushd "$CODEX_CLI_ROOT" >/dev/null
# 1. Build the JS artifacts ---------------------------------------------------
# Paths inside the staged package
mkdir -p "$TMPDIR/bin"
cp -r bin/codex.js "$TMPDIR/bin/codex.js"
cp ../README.md "$TMPDIR" || true # README is one level up - ignore if missing
# Modify package.json - bump version and optionally add the native directory to
# the files array so that the binaries are published to npm.
jq --arg version "$VERSION" \
'.version = $version' \
package.json > "$TMPDIR/package.json"
# 2. Native runtime deps (sandbox plus optional Rust binaries)
./scripts/install_native_deps.sh --workflow-url "$WORKFLOW_URL" "$TMPDIR"
popd >/dev/null
echo "Staged version $VERSION for release in $TMPDIR"
echo "Verify the CLI:"
echo " node ${TMPDIR}/bin/codex.js --version"
echo " node ${TMPDIR}/bin/codex.js --help"
# Print final hint for convenience
echo "Next: cd \"$TMPDIR\" && npm publish"

View File

@@ -0,0 +1,70 @@
#!/usr/bin/env python3
import json
import subprocess
import sys
import argparse
from pathlib import Path
def main() -> int:
parser = argparse.ArgumentParser(
description="""Stage a release for the npm module.
Run this after the GitHub Release has been created and use
`--release-version` to specify the version to release.
Optionally pass `--tmp` to control the temporary staging directory that will be
forwarded to stage_release.sh.
"""
)
parser.add_argument(
"--release-version", required=True, help="Version to release, e.g., 0.3.0"
)
parser.add_argument(
"--tmp",
help="Optional path to stage the npm package; forwarded to stage_release.sh",
)
args = parser.parse_args()
version = args.release_version
gh_run = subprocess.run(
[
"gh",
"run",
"list",
"--branch",
f"rust-v{version}",
"--json",
"workflowName,url,headSha",
"--jq",
'first(.[] | select(.workflowName == "rust-release"))',
],
stdout=subprocess.PIPE,
check=True,
)
gh_run.check_returncode()
workflow = json.loads(gh_run.stdout)
sha = workflow["headSha"]
print(f"should `git checkout {sha}`")
current_dir = Path(__file__).parent.resolve()
cmd = [
str(current_dir / "stage_release.sh"),
"--version",
version,
"--workflow-url",
workflow["url"],
]
if args.tmp:
cmd.extend(["--tmp", args.tmp])
stage_release = subprocess.run(cmd)
stage_release.check_returncode()
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,6 +0,0 @@
[advisories]
ignore = [
"RUSTSEC-2024-0388", # derivative 2.2.0 via starlark; upstream crate is unmaintained
"RUSTSEC-2025-0057", # fxhash 0.2.1 via starlark_map; upstream crate is unmaintained
"RUSTSEC-2024-0436", # paste 1.0.15 via starlark/ratatui; upstream crate is unmaintained
]

View File

@@ -1,5 +0,0 @@
[target.'cfg(all(windows, target_env = "msvc"))']
rustflags = ["-C", "link-arg=/STACK:8388608"]
[target.'cfg(all(windows, target_env = "gnu"))']
rustflags = ["-C", "link-arg=-Wl,--stack,8388608"]

View File

@@ -1,13 +0,0 @@
[profile.default]
# Do not increase, fix your test instead
slow-timeout = { period = "15s", terminate-after = 2 }
[[profile.default.overrides]]
# Do not add new tests here
filter = 'test(rmcp_client) | test(humanlike_typing_1000_chars_appears_live_no_placeholder)'
slow-timeout = { period = "1m", terminate-after = 4 }
[[profile.default.overrides]]
filter = 'test(approval_matrix_covers_all_modes)'
slow-timeout = { period = "30s", terminate-after = 2 }

View File

@@ -1,26 +0,0 @@
name: Cargo audit
on:
pull_request:
push:
branches:
- main
permissions:
contents: read
jobs:
audit:
runs-on: ubuntu-latest
defaults:
run:
working-directory: codex-rs
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install cargo-audit
uses: taiki-e/install-action@v2
with:
tool: cargo-audit
- name: Run cargo audit
run: cargo audit --deny warnings

1
codex-rs/.gitignore vendored
View File

@@ -1,5 +1,4 @@
/target/
/target-*/
# Recommended value of CARGO_TARGET_DIR when using Docker as explained in .devcontainer/README.md.
/target-amd64/

2885
codex-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,48 +1,23 @@
[workspace]
members = [
"backend-client",
"ansi-escape",
"async-utils",
"app-server",
"app-server-protocol",
"app-server-test-client",
"apply-patch",
"arg0",
"feedback",
"codex-backend-openapi-models",
"cloud-tasks",
"cloud-tasks-client",
"cli",
"common",
"core",
"exec",
"exec-server",
"execpolicy",
"execpolicy-legacy",
"keyring-store",
"file-search",
"linux-sandbox",
"lmstudio",
"login",
"mcp-client",
"mcp-server",
"mcp-types",
"ollama",
"process-hardening",
"protocol",
"rmcp-client",
"responses-api-proxy",
"stdio-to-uds",
"otel",
"protocol-ts",
"tui",
"utils/git",
"utils/cache",
"utils/image",
"utils/json-to-toml",
"utils/pty",
"utils/readiness",
"utils/string",
"codex-client",
"codex-api",
]
resolver = "2"
@@ -53,223 +28,15 @@ version = "0.0.0"
# crates created with `cargo new -w ...` automatically inherit the 2024
# edition.
edition = "2024"
license = "Apache-2.0"
[workspace.dependencies]
# Internal
app_test_support = { path = "app-server/tests/common" }
codex-ansi-escape = { path = "ansi-escape" }
codex-api = { path = "codex-api" }
codex-app-server = { path = "app-server" }
codex-app-server-protocol = { path = "app-server-protocol" }
codex-apply-patch = { path = "apply-patch" }
codex-arg0 = { path = "arg0" }
codex-async-utils = { path = "async-utils" }
codex-backend-client = { path = "backend-client" }
codex-chatgpt = { path = "chatgpt" }
codex-client = { path = "codex-client" }
codex-common = { path = "common" }
codex-core = { path = "core" }
codex-exec = { path = "exec" }
codex-execpolicy = { path = "execpolicy" }
codex-feedback = { path = "feedback" }
codex-file-search = { path = "file-search" }
codex-git = { path = "utils/git" }
codex-keyring-store = { path = "keyring-store" }
codex-linux-sandbox = { path = "linux-sandbox" }
codex-lmstudio = { path = "lmstudio" }
codex-login = { path = "login" }
codex-mcp-server = { path = "mcp-server" }
codex-ollama = { path = "ollama" }
codex-otel = { path = "otel" }
codex-process-hardening = { path = "process-hardening" }
codex-protocol = { path = "protocol" }
codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-rmcp-client = { path = "rmcp-client" }
codex-stdio-to-uds = { path = "stdio-to-uds" }
codex-tui = { path = "tui" }
codex-utils-cache = { path = "utils/cache" }
codex-utils-image = { path = "utils/image" }
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
codex-utils-pty = { path = "utils/pty" }
codex-utils-readiness = { path = "utils/readiness" }
codex-utils-string = { path = "utils/string" }
codex-windows-sandbox = { path = "windows-sandbox-rs" }
core_test_support = { path = "core/tests/common" }
mcp-types = { path = "mcp-types" }
mcp_test_support = { path = "mcp-server/tests/common" }
# External
allocative = "0.3.3"
ansi-to-tui = "7.0.0"
anyhow = "1"
arboard = { version = "3", features = ["wayland-data-control"] }
askama = "0.14"
assert_cmd = "2"
assert_matches = "1.5.0"
async-channel = "2.3.1"
async-stream = "0.3.6"
async-trait = "0.1.89"
axum = { version = "0.8", default-features = false }
base64 = "0.22.1"
bytes = "1.10.1"
chardetng = "0.1.17"
chrono = "0.4.42"
clap = "4"
clap_complete = "4"
color-eyre = "0.6.3"
crossterm = "0.28.1"
ctor = "0.5.0"
derive_more = "2"
diffy = "0.4.2"
dirs = "6"
dotenvy = "0.15.7"
dunce = "1.0.4"
encoding_rs = "0.8.35"
env-flags = "0.1.1"
env_logger = "0.11.5"
escargot = "0.5"
eventsource-stream = "0.2.3"
futures = { version = "0.3", default-features = false }
http = "1.3.1"
icu_decimal = "2.1"
icu_locale_core = "2.1"
icu_provider = { version = "2.1", features = ["sync"] }
ignore = "0.4.23"
image = { version = "^0.25.9", default-features = false }
indexmap = "2.12.0"
insta = "1.43.2"
itertools = "0.14.0"
keyring = { version = "3.6", default-features = false }
landlock = "0.4.1"
lazy_static = "1"
libc = "0.2.177"
log = "0.4"
lru = "0.12.5"
maplit = "1.0.2"
mime_guess = "2.0.5"
multimap = "0.10.0"
notify = "8.2.0"
nucleo-matcher = "0.3.1"
once_cell = "1.20.2"
openssl-sys = "*"
opentelemetry = "0.30.0"
opentelemetry-appender-tracing = "0.30.0"
opentelemetry-otlp = "0.30.0"
opentelemetry-semantic-conventions = "0.30.0"
opentelemetry_sdk = "0.30.0"
os_info = "3.12.0"
owo-colors = "4.2.0"
path-absolutize = "3.1.1"
pathdiff = "0.2"
portable-pty = "0.9.0"
predicates = "3"
pretty_assertions = "1.4.1"
pulldown-cmark = "0.10"
rand = "0.9"
ratatui = "0.29.0"
ratatui-macros = "0.6.0"
regex = "1.12.2"
regex-lite = "0.1.7"
reqwest = "0.12"
rmcp = { version = "0.10.0", default-features = false }
schemars = "0.8.22"
seccompiler = "0.5.0"
sentry = "0.34.0"
serde = "1"
serde_json = "1"
serde_yaml = "0.9"
serde_with = "3.16"
serial_test = "3.2.0"
sha1 = "0.10.6"
sha2 = "0.10"
shlex = "1.3.0"
similar = "2.7.0"
socket2 = "0.6.0"
starlark = "0.13.0"
strum = "0.27.2"
strum_macros = "0.27.2"
supports-color = "3.0.2"
sys-locale = "0.3.2"
tempfile = "3.23.0"
test-log = "0.2.18"
textwrap = "0.16.2"
thiserror = "2.0.17"
time = "0.3"
tiny_http = "0.12"
tokio = "1"
tokio-stream = "0.1.17"
tokio-test = "0.4"
tokio-util = "0.7.16"
toml = "0.9.5"
toml_edit = "0.23.5"
tonic = "0.13.1"
tracing = "0.1.43"
tracing-appender = "0.2.3"
tracing-subscriber = "0.3.20"
tracing-test = "0.2.5"
tree-sitter = "0.25.10"
tree-sitter-bash = "0.25"
tree-sitter-highlight = "0.25.10"
ts-rs = "11"
uds_windows = "1.1.0"
unicode-segmentation = "1.12.0"
unicode-width = "0.2"
url = "2"
urlencoding = "2.1"
uuid = "1"
vt100 = "0.16.2"
walkdir = "2.5.0"
webbrowser = "1.0"
which = "6"
wildmatch = "2.5.0"
wiremock = "0.6"
zeroize = "1.8.2"
[workspace.lints]
rust = {}
[workspace.lints.clippy]
expect_used = "deny"
identity_op = "deny"
manual_clamp = "deny"
manual_filter = "deny"
manual_find = "deny"
manual_flatten = "deny"
manual_map = "deny"
manual_memcpy = "deny"
manual_non_exhaustive = "deny"
manual_ok_or = "deny"
manual_range_contains = "deny"
manual_retain = "deny"
manual_strip = "deny"
manual_try_fold = "deny"
manual_unwrap_or = "deny"
needless_borrow = "deny"
needless_borrowed_reference = "deny"
needless_collect = "deny"
needless_late_init = "deny"
needless_option_as_deref = "deny"
needless_question_mark = "deny"
needless_update = "deny"
redundant_clone = "deny"
redundant_closure = "deny"
redundant_closure_for_method_calls = "deny"
redundant_static_lifetimes = "deny"
trivially_copy_pass_by_ref = "deny"
uninlined_format_args = "deny"
unnecessary_filter_map = "deny"
unnecessary_lazy_evaluations = "deny"
unnecessary_sort_by = "deny"
unnecessary_to_owned = "deny"
unwrap_used = "deny"
# cargo-shear cannot see the platform-specific openssl-sys usage, so we
# silence the false positive here instead of deleting a real dependency.
[workspace.metadata.cargo-shear]
ignored = ["icu_provider", "openssl-sys", "codex-utils-readiness"]
[profile.release]
lto = "fat"
# Because we bundle some of these executables with the TypeScript CLI, we
@@ -279,16 +46,6 @@ strip = "symbols"
# See https://github.com/openai/codex/issues/1411 for details.
codegen-units = 1
[profile.ci-test]
debug = 1 # Reduce debug symbol size
inherits = "test"
opt-level = 0
[patch.crates-io]
# Uncomment to debug local changes.
# ratatui = { path = "../../ratatui" }
crossterm = { git = "https://github.com/nornagon/crossterm", branch = "nornagon/color-query" }
ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" }
# Uncomment to debug local changes.
# rmcp = { path = "../../rust-sdk/crates/rmcp" }

View File

@@ -4,23 +4,18 @@ We provide Codex CLI as a standalone, native executable to ensure a zero-depende
## Installing Codex
Today, the easiest way to install Codex is via `npm`:
Today, the easiest way to install Codex is via `npm`, though we plan to publish Codex to other package managers soon.
```shell
npm i -g @openai/codex
npm i -g @openai/codex@native
codex
```
You can also install via Homebrew (`brew install --cask codex`) or download a platform-specific release directly from our [GitHub Releases](https://github.com/openai/codex/releases).
## Documentation quickstart
- First run with Codex? Follow the walkthrough in [`docs/getting-started.md`](../docs/getting-started.md) for prompts, keyboard shortcuts, and session management.
- Already shipping with Codex and want deeper control? Jump to [`docs/advanced.md`](../docs/advanced.md) and the configuration reference at [`docs/config.md`](../docs/config.md).
You can also download a platform-specific release directly from our [GitHub Releases](https://github.com/openai/codex/releases).
## What's new in the Rust CLI
The Rust implementation is now the maintained Codex CLI and serves as the default experience. It includes a number of features that the legacy TypeScript CLI never supported.
While we are [working to close the gap between the TypeScript and Rust implementations of Codex CLI](https://github.com/openai/codex/issues/1262), note that the Rust CLI has a number of features that the TypeScript CLI does not!
### Config
@@ -28,22 +23,14 @@ Codex supports a rich set of configuration options. Note that the Rust CLI uses
### Model Context Protocol Support
#### MCP client
Codex CLI functions as an MCP client that can connect to MCP servers on startup. See the [`mcp_servers`](../docs/config.md#mcp_servers) section in the configuration documentation for details.
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#mcp_servers) for details.
#### MCP server (experimental)
Codex can be launched as an MCP _server_ by running `codex mcp-server`. This allows _other_ MCP clients to use Codex as a tool for another agent.
Use the [`@modelcontextprotocol/inspector`](https://github.com/modelcontextprotocol/inspector) to try it out:
It is still experimental, but you can also launch Codex as an MCP _server_ by running `codex mcp`. Use the [`@modelcontextprotocol/inspector`](https://github.com/modelcontextprotocol/inspector) to try it out:
```shell
npx @modelcontextprotocol/inspector codex mcp-server
npx @modelcontextprotocol/inspector codex mcp
```
Use `codex mcp` to add/list/get/remove MCP server launchers defined in `config.toml`, and `codex mcp-server` to run the MCP server directly.
### Notifications
You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS.
@@ -52,22 +39,39 @@ You can enable notifications by configuring a script that is run whenever the ag
To run Codex non-interactively, run `codex exec PROMPT` (you can also pass the prompt via `stdin`) and Codex will work on your task until it decides that it is done and exits. Output is printed to the terminal directly. You can set the `RUST_LOG` environment variable to see more about what's going on.
### Use `@` for file search
Typing `@` triggers a fuzzy-filename search over the workspace root. Use up/down to select among the results and Tab or Enter to replace the `@` with the selected path. You can use Esc to cancel the search.
### EscEsc to edit a previous message
When the chat composer is empty, press Esc to prime “backtrack” mode. Press Esc again to open a transcript preview highlighting the last user message; press Esc repeatedly to step to older user messages. Press Enter to confirm and Codex will fork the conversation from that point, trim the visible transcript accordingly, and prefill the composer with the selected user message so you can edit and resubmit it.
In the transcript preview, the footer shows an `Esc edit prev` hint while editing is active.
### `--cd`/`-C` flag
Sometimes it is not convenient to `cd` to the directory you want Codex to use as the "working root" before running Codex. Fortunately, `codex` supports a `--cd` option so you can specify whatever folder you want. You can confirm that Codex is honoring `--cd` by double-checking the **workdir** it reports in the TUI at the start of a new session.
### Shell completions
Generate shell completion scripts via:
```shell
codex completion bash
codex completion zsh
codex completion fish
```
### Experimenting with the Codex Sandbox
To test to see what happens when a command is run under the sandbox provided by Codex, we provide the following subcommands in Codex CLI:
```
# macOS
codex sandbox macos [--full-auto] [--log-denials] [COMMAND]...
codex debug seatbelt [--full-auto] [COMMAND]...
# Linux
codex sandbox linux [--full-auto] [COMMAND]...
# Windows
codex sandbox windows [--full-auto] [COMMAND]...
# Legacy aliases
codex debug seatbelt [--full-auto] [--log-denials] [COMMAND]...
codex debug landlock [--full-auto] [COMMAND]...
```

View File

@@ -1,17 +1,16 @@
[package]
edition = "2024"
name = "codex-ansi-escape"
version.workspace = true
edition.workspace = true
license.workspace = true
version = { workspace = true }
[lib]
name = "codex_ansi_escape"
path = "src/lib.rs"
[dependencies]
ansi-to-tui = { workspace = true }
ratatui = { workspace = true, features = [
ansi-to-tui = "7.0.0"
ratatui = { version = "0.29.0", features = [
"unstable-rendered-line-info",
"unstable-widget-ref",
] }
tracing = { workspace = true, features = ["log"] }
tracing = { version = "0.1.41", features = ["log"] }

View File

@@ -3,30 +3,11 @@ use ansi_to_tui::IntoText;
use ratatui::text::Line;
use ratatui::text::Text;
// Expand tabs in a best-effort way for transcript rendering.
// Tabs can interact poorly with left-gutter prefixes in our TUI and CLI
// transcript views (e.g., `nl` separates line numbers from content with a tab).
// Replacing tabs with spaces avoids odd visual artifacts without changing
// semantics for our use cases.
fn expand_tabs(s: &str) -> std::borrow::Cow<'_, str> {
if s.contains('\t') {
// Keep it simple: replace each tab with 4 spaces.
// We do not try to align to tab stops since most usages (like `nl`)
// look acceptable with a fixed substitution and this avoids stateful math
// across spans.
std::borrow::Cow::Owned(s.replace('\t', " "))
} else {
std::borrow::Cow::Borrowed(s)
}
}
/// This function should be used when the contents of `s` are expected to match
/// a single line. If multiple lines are found, a warning is logged and only the
/// first line is returned.
pub fn ansi_escape_line(s: &str) -> Line<'static> {
// Normalize tabs to spaces to avoid odd gutter collisions in transcript mode.
let s = expand_tabs(s);
let text = ansi_escape(&s);
let text = ansi_escape(s);
match text.lines.as_slice() {
[] => "".into(),
[only] => only.clone(),

View File

@@ -1,29 +0,0 @@
[package]
name = "codex-app-server-protocol"
version.workspace = true
edition.workspace = true
license.workspace = true
[lib]
name = "codex_app_server_protocol"
path = "src/lib.rs"
[lints]
workspace = true
[dependencies]
anyhow = { workspace = true }
clap = { workspace = true, features = ["derive"] }
codex-protocol = { workspace = true }
mcp-types = { workspace = true }
schemars = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
strum_macros = { workspace = true }
thiserror = { workspace = true }
ts-rs = { workspace = true }
uuid = { workspace = true, features = ["serde", "v7"] }
[dev-dependencies]
anyhow = { workspace = true }
pretty_assertions = { workspace = true }

View File

@@ -1,943 +0,0 @@
use crate::ClientNotification;
use crate::ClientRequest;
use crate::ServerNotification;
use crate::ServerRequest;
use crate::export_client_notification_schemas;
use crate::export_client_param_schemas;
use crate::export_client_response_schemas;
use crate::export_client_responses;
use crate::export_server_notification_schemas;
use crate::export_server_param_schemas;
use crate::export_server_response_schemas;
use crate::export_server_responses;
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use codex_protocol::protocol::EventMsg;
use schemars::JsonSchema;
use schemars::schema_for;
use serde::Serialize;
use serde_json::Map;
use serde_json::Value;
use std::collections::HashMap;
use std::collections::HashSet;
use std::ffi::OsStr;
use std::fs;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use ts_rs::TS;
const HEADER: &str = "// GENERATED CODE! DO NOT MODIFY BY HAND!\n\n";
#[derive(Clone)]
pub struct GeneratedSchema {
namespace: Option<String>,
logical_name: String,
value: Value,
in_v1_dir: bool,
}
impl GeneratedSchema {
fn namespace(&self) -> Option<&str> {
self.namespace.as_deref()
}
fn logical_name(&self) -> &str {
&self.logical_name
}
fn value(&self) -> &Value {
&self.value
}
}
type JsonSchemaEmitter = fn(&Path) -> Result<GeneratedSchema>;
pub fn generate_types(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
generate_ts(out_dir, prettier)?;
generate_json(out_dir)?;
Ok(())
}
#[derive(Clone, Copy, Debug)]
pub struct GenerateTsOptions {
pub generate_indices: bool,
pub ensure_headers: bool,
pub run_prettier: bool,
}
impl Default for GenerateTsOptions {
fn default() -> Self {
Self {
generate_indices: true,
ensure_headers: true,
run_prettier: true,
}
}
}
pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
generate_ts_with_options(out_dir, prettier, GenerateTsOptions::default())
}
pub fn generate_ts_with_options(
out_dir: &Path,
prettier: Option<&Path>,
options: GenerateTsOptions,
) -> Result<()> {
let v2_out_dir = out_dir.join("v2");
ensure_dir(out_dir)?;
ensure_dir(&v2_out_dir)?;
ClientRequest::export_all_to(out_dir)?;
export_client_responses(out_dir)?;
ClientNotification::export_all_to(out_dir)?;
ServerRequest::export_all_to(out_dir)?;
export_server_responses(out_dir)?;
ServerNotification::export_all_to(out_dir)?;
if options.generate_indices {
generate_index_ts(out_dir)?;
generate_index_ts(&v2_out_dir)?;
}
// Ensure our header is present on all TS files (root + subdirs like v2/).
let mut ts_files = Vec::new();
let should_collect_ts_files =
options.ensure_headers || (options.run_prettier && prettier.is_some());
if should_collect_ts_files {
ts_files = ts_files_in_recursive(out_dir)?;
}
if options.ensure_headers {
for file in &ts_files {
prepend_header_if_missing(file)?;
}
}
// Optionally run Prettier on all generated TS files.
if options.run_prettier
&& let Some(prettier_bin) = prettier
&& !ts_files.is_empty()
{
let status = Command::new(prettier_bin)
.arg("--write")
.arg("--log-level")
.arg("warn")
.args(ts_files.iter().map(|p| p.as_os_str()))
.status()
.with_context(|| format!("Failed to invoke Prettier at {}", prettier_bin.display()))?;
if !status.success() {
return Err(anyhow!("Prettier failed with status {status}"));
}
}
Ok(())
}
pub fn generate_json(out_dir: &Path) -> Result<()> {
ensure_dir(out_dir)?;
let envelope_emitters: &[JsonSchemaEmitter] = &[
|d| write_json_schema_with_return::<crate::RequestId>(d, "RequestId"),
|d| write_json_schema_with_return::<crate::JSONRPCMessage>(d, "JSONRPCMessage"),
|d| write_json_schema_with_return::<crate::JSONRPCRequest>(d, "JSONRPCRequest"),
|d| write_json_schema_with_return::<crate::JSONRPCNotification>(d, "JSONRPCNotification"),
|d| write_json_schema_with_return::<crate::JSONRPCResponse>(d, "JSONRPCResponse"),
|d| write_json_schema_with_return::<crate::JSONRPCError>(d, "JSONRPCError"),
|d| write_json_schema_with_return::<crate::JSONRPCErrorError>(d, "JSONRPCErrorError"),
|d| write_json_schema_with_return::<crate::ClientRequest>(d, "ClientRequest"),
|d| write_json_schema_with_return::<crate::ServerRequest>(d, "ServerRequest"),
|d| write_json_schema_with_return::<crate::ClientNotification>(d, "ClientNotification"),
|d| write_json_schema_with_return::<crate::ServerNotification>(d, "ServerNotification"),
|d| write_json_schema_with_return::<EventMsg>(d, "EventMsg"),
];
let mut schemas: Vec<GeneratedSchema> = Vec::new();
for emit in envelope_emitters {
schemas.push(emit(out_dir)?);
}
schemas.extend(export_client_param_schemas(out_dir)?);
schemas.extend(export_client_response_schemas(out_dir)?);
schemas.extend(export_server_param_schemas(out_dir)?);
schemas.extend(export_server_response_schemas(out_dir)?);
schemas.extend(export_client_notification_schemas(out_dir)?);
schemas.extend(export_server_notification_schemas(out_dir)?);
let bundle = build_schema_bundle(schemas)?;
write_pretty_json(
out_dir.join("codex_app_server_protocol.schemas.json"),
&bundle,
)?;
Ok(())
}
fn build_schema_bundle(schemas: Vec<GeneratedSchema>) -> Result<Value> {
const SPECIAL_DEFINITIONS: &[&str] = &[
"ClientNotification",
"ClientRequest",
"EventMsg",
"ServerNotification",
"ServerRequest",
];
const IGNORED_DEFINITIONS: &[&str] = &["Option<()>"];
let namespaced_types = collect_namespaced_types(&schemas);
let mut definitions = Map::new();
for schema in schemas {
let GeneratedSchema {
namespace,
logical_name,
mut value,
in_v1_dir,
} = schema;
if IGNORED_DEFINITIONS.contains(&logical_name.as_str()) {
continue;
}
if let Some(ref ns) = namespace {
rewrite_refs_to_namespace(&mut value, ns);
}
let mut forced_namespace_refs: Vec<(String, String)> = Vec::new();
if let Value::Object(ref mut obj) = value
&& let Some(defs) = obj.remove("definitions")
&& let Value::Object(defs_obj) = defs
{
for (def_name, mut def_schema) in defs_obj {
if IGNORED_DEFINITIONS.contains(&def_name.as_str()) {
continue;
}
if SPECIAL_DEFINITIONS.contains(&def_name.as_str()) {
continue;
}
annotate_schema(&mut def_schema, Some(def_name.as_str()));
let target_namespace = match namespace {
Some(ref ns) => Some(ns.clone()),
None => namespace_for_definition(&def_name, &namespaced_types)
.cloned()
.filter(|_| !in_v1_dir),
};
if let Some(ref ns) = target_namespace {
if namespace.as_deref() == Some(ns.as_str()) {
rewrite_refs_to_namespace(&mut def_schema, ns);
insert_into_namespace(&mut definitions, ns, def_name.clone(), def_schema)?;
} else if !forced_namespace_refs
.iter()
.any(|(name, existing_ns)| name == &def_name && existing_ns == ns)
{
forced_namespace_refs.push((def_name.clone(), ns.clone()));
}
} else {
definitions.insert(def_name, def_schema);
}
}
}
for (name, ns) in forced_namespace_refs {
rewrite_named_ref_to_namespace(&mut value, &ns, &name);
}
if let Some(ref ns) = namespace {
insert_into_namespace(&mut definitions, ns, logical_name.clone(), value)?;
} else {
definitions.insert(logical_name, value);
}
}
let mut root = Map::new();
root.insert(
"$schema".to_string(),
Value::String("http://json-schema.org/draft-07/schema#".into()),
);
root.insert(
"title".to_string(),
Value::String("CodexAppServerProtocol".into()),
);
root.insert("type".to_string(), Value::String("object".into()));
root.insert("definitions".to_string(), Value::Object(definitions));
Ok(Value::Object(root))
}
fn insert_into_namespace(
definitions: &mut Map<String, Value>,
namespace: &str,
name: String,
schema: Value,
) -> Result<()> {
let entry = definitions
.entry(namespace.to_string())
.or_insert_with(|| Value::Object(Map::new()));
match entry {
Value::Object(map) => {
map.insert(name, schema);
Ok(())
}
_ => Err(anyhow!("expected namespace {namespace} to be an object")),
}
}
fn write_json_schema_with_return<T>(out_dir: &Path, name: &str) -> Result<GeneratedSchema>
where
T: JsonSchema,
{
let file_stem = name.trim();
let schema = schema_for!(T);
let mut schema_value = serde_json::to_value(schema)?;
annotate_schema(&mut schema_value, Some(file_stem));
// If the name looks like a namespaced path (e.g., "v2::Type"), mirror
// the TypeScript layout and write to out_dir/v2/Type.json. Otherwise
// write alongside the legacy files.
let (raw_namespace, logical_name) = split_namespace(file_stem);
let out_path = if let Some(ns) = raw_namespace {
let dir = out_dir.join(ns);
ensure_dir(&dir)?;
dir.join(format!("{logical_name}.json"))
} else {
out_dir.join(format!("{file_stem}.json"))
};
write_pretty_json(out_path, &schema_value)
.with_context(|| format!("Failed to write JSON schema for {file_stem}"))?;
let namespace = match raw_namespace {
Some("v1") | None => None,
Some(ns) => Some(ns.to_string()),
};
Ok(GeneratedSchema {
in_v1_dir: raw_namespace == Some("v1"),
namespace,
logical_name: logical_name.to_string(),
value: schema_value,
})
}
pub(crate) fn write_json_schema<T>(out_dir: &Path, name: &str) -> Result<GeneratedSchema>
where
T: JsonSchema,
{
write_json_schema_with_return::<T>(out_dir, name)
}
fn write_pretty_json(path: PathBuf, value: &impl Serialize) -> Result<()> {
let json = serde_json::to_vec_pretty(value)
.with_context(|| format!("Failed to serialize JSON schema to {}", path.display()))?;
fs::write(&path, json).with_context(|| format!("Failed to write {}", path.display()))?;
Ok(())
}
/// Split a fully-qualified type name like "v2::Type" into its namespace and logical name.
fn split_namespace(name: &str) -> (Option<&str>, &str) {
name.split_once("::")
.map_or((None, name), |(ns, rest)| (Some(ns), rest))
}
/// Recursively rewrite $ref values that point at "#/definitions/..." so that
/// they point to a namespaced location under the bundle.
fn rewrite_refs_to_namespace(value: &mut Value, ns: &str) {
match value {
Value::Object(obj) => {
if let Some(Value::String(r)) = obj.get_mut("$ref")
&& let Some(suffix) = r.strip_prefix("#/definitions/")
{
let prefix = format!("{ns}/");
if !suffix.starts_with(&prefix) {
*r = format!("#/definitions/{ns}/{suffix}");
}
}
for v in obj.values_mut() {
rewrite_refs_to_namespace(v, ns);
}
}
Value::Array(items) => {
for v in items.iter_mut() {
rewrite_refs_to_namespace(v, ns);
}
}
_ => {}
}
}
fn collect_namespaced_types(schemas: &[GeneratedSchema]) -> HashMap<String, String> {
let mut types = HashMap::new();
for schema in schemas {
if let Some(ns) = schema.namespace() {
types
.entry(schema.logical_name().to_string())
.or_insert_with(|| ns.to_string());
if let Some(Value::Object(defs)) = schema.value().get("definitions") {
for key in defs.keys() {
types.entry(key.clone()).or_insert_with(|| ns.to_string());
}
}
if let Some(Value::Object(defs)) = schema.value().get("$defs") {
for key in defs.keys() {
types.entry(key.clone()).or_insert_with(|| ns.to_string());
}
}
}
}
types
}
fn namespace_for_definition<'a>(
name: &str,
types: &'a HashMap<String, String>,
) -> Option<&'a String> {
if let Some(ns) = types.get(name) {
return Some(ns);
}
let trimmed = name.trim_end_matches(|c: char| c.is_ascii_digit());
if trimmed != name {
return types.get(trimmed);
}
None
}
fn variant_definition_name(base: &str, variant: &Value) -> Option<String> {
if let Some(props) = variant.get("properties").and_then(Value::as_object) {
if let Some(method_literal) = literal_from_property(props, "method") {
let pascal = to_pascal_case(method_literal);
return Some(match base {
"ClientRequest" | "ServerRequest" => format!("{pascal}Request"),
"ClientNotification" | "ServerNotification" => format!("{pascal}Notification"),
_ => format!("{pascal}{base}"),
});
}
if let Some(type_literal) = literal_from_property(props, "type") {
let pascal = to_pascal_case(type_literal);
return Some(match base {
"EventMsg" => format!("{pascal}EventMsg"),
_ => format!("{pascal}{base}"),
});
}
if props.len() == 1
&& let Some(key) = props.keys().next()
{
let pascal = to_pascal_case(key);
return Some(format!("{pascal}{base}"));
}
}
if let Some(required) = variant.get("required").and_then(Value::as_array)
&& required.len() == 1
&& let Some(key) = required[0].as_str()
{
let pascal = to_pascal_case(key);
return Some(format!("{pascal}{base}"));
}
None
}
fn literal_from_property<'a>(props: &'a Map<String, Value>, key: &str) -> Option<&'a str> {
props.get(key).and_then(string_literal)
}
fn string_literal(value: &Value) -> Option<&str> {
value.get("const").and_then(Value::as_str).or_else(|| {
value
.get("enum")
.and_then(Value::as_array)
.and_then(|arr| arr.first())
.and_then(Value::as_str)
})
}
fn annotate_schema(value: &mut Value, base: Option<&str>) {
match value {
Value::Object(map) => annotate_object(map, base),
Value::Array(items) => {
for item in items {
annotate_schema(item, base);
}
}
_ => {}
}
}
fn annotate_object(map: &mut Map<String, Value>, base: Option<&str>) {
let owner = map.get("title").and_then(Value::as_str).map(str::to_owned);
if let Some(owner) = owner.as_deref()
&& let Some(Value::Object(props)) = map.get_mut("properties")
{
set_discriminator_titles(props, owner);
}
if let Some(Value::Array(variants)) = map.get_mut("oneOf") {
annotate_variant_list(variants, base);
}
if let Some(Value::Array(variants)) = map.get_mut("anyOf") {
annotate_variant_list(variants, base);
}
if let Some(Value::Object(defs)) = map.get_mut("definitions") {
for (name, schema) in defs.iter_mut() {
annotate_schema(schema, Some(name.as_str()));
}
}
if let Some(Value::Object(defs)) = map.get_mut("$defs") {
for (name, schema) in defs.iter_mut() {
annotate_schema(schema, Some(name.as_str()));
}
}
if let Some(Value::Object(props)) = map.get_mut("properties") {
for value in props.values_mut() {
annotate_schema(value, base);
}
}
if let Some(items) = map.get_mut("items") {
annotate_schema(items, base);
}
if let Some(additional) = map.get_mut("additionalProperties") {
annotate_schema(additional, base);
}
for (key, child) in map.iter_mut() {
match key.as_str() {
"oneOf"
| "anyOf"
| "definitions"
| "$defs"
| "properties"
| "items"
| "additionalProperties" => {}
_ => annotate_schema(child, base),
}
}
}
fn annotate_variant_list(variants: &mut [Value], base: Option<&str>) {
let mut seen = HashSet::new();
for variant in variants.iter() {
if let Some(name) = variant_title(variant) {
seen.insert(name.to_owned());
}
}
for variant in variants.iter_mut() {
let mut variant_name = variant_title(variant).map(str::to_owned);
if variant_name.is_none()
&& let Some(base_name) = base
&& let Some(name) = variant_definition_name(base_name, variant)
{
let mut candidate = name.clone();
let mut index = 2;
while seen.contains(&candidate) {
candidate = format!("{name}{index}");
index += 1;
}
if let Some(obj) = variant.as_object_mut() {
obj.insert("title".into(), Value::String(candidate.clone()));
}
seen.insert(candidate.clone());
variant_name = Some(candidate);
}
if let Some(name) = variant_name.as_deref()
&& let Some(obj) = variant.as_object_mut()
&& let Some(Value::Object(props)) = obj.get_mut("properties")
{
set_discriminator_titles(props, name);
}
annotate_schema(variant, base);
}
}
const DISCRIMINATOR_KEYS: &[&str] = &["type", "method", "mode", "status", "role", "reason"];
fn set_discriminator_titles(props: &mut Map<String, Value>, owner: &str) {
for key in DISCRIMINATOR_KEYS {
if let Some(prop_schema) = props.get_mut(*key)
&& string_literal(prop_schema).is_some()
&& let Value::Object(prop_obj) = prop_schema
{
if prop_obj.contains_key("title") {
continue;
}
let suffix = to_pascal_case(key);
prop_obj.insert("title".into(), Value::String(format!("{owner}{suffix}")));
}
}
}
fn variant_title(value: &Value) -> Option<&str> {
value
.as_object()
.and_then(|obj| obj.get("title"))
.and_then(Value::as_str)
}
fn to_pascal_case(input: &str) -> String {
let mut result = String::new();
let mut capitalize_next = true;
for c in input.chars() {
if c == '_' || c == '-' {
capitalize_next = true;
continue;
}
if capitalize_next {
result.extend(c.to_uppercase());
capitalize_next = false;
} else {
result.push(c);
}
}
result
}
fn ensure_dir(dir: &Path) -> Result<()> {
fs::create_dir_all(dir)
.with_context(|| format!("Failed to create output directory {}", dir.display()))
}
fn rewrite_named_ref_to_namespace(value: &mut Value, ns: &str, name: &str) {
let direct = format!("#/definitions/{name}");
let prefixed = format!("{direct}/");
let replacement = format!("#/definitions/{ns}/{name}");
let replacement_prefixed = format!("{replacement}/");
match value {
Value::Object(obj) => {
if let Some(Value::String(reference)) = obj.get_mut("$ref") {
if reference == &direct {
*reference = replacement;
} else if let Some(rest) = reference.strip_prefix(&prefixed) {
*reference = format!("{replacement_prefixed}{rest}");
}
}
for child in obj.values_mut() {
rewrite_named_ref_to_namespace(child, ns, name);
}
}
Value::Array(items) => {
for child in items {
rewrite_named_ref_to_namespace(child, ns, name);
}
}
_ => {}
}
}
fn prepend_header_if_missing(path: &Path) -> Result<()> {
let mut content = String::new();
{
let mut f = fs::File::open(path)
.with_context(|| format!("Failed to open {} for reading", path.display()))?;
f.read_to_string(&mut content)
.with_context(|| format!("Failed to read {}", path.display()))?;
}
if content.starts_with(HEADER) {
return Ok(());
}
let mut f = fs::File::create(path)
.with_context(|| format!("Failed to open {} for writing", path.display()))?;
f.write_all(HEADER.as_bytes())
.with_context(|| format!("Failed to write header to {}", path.display()))?;
f.write_all(content.as_bytes())
.with_context(|| format!("Failed to write content to {}", path.display()))?;
Ok(())
}
fn ts_files_in(dir: &Path) -> Result<Vec<PathBuf>> {
let mut files = Vec::new();
for entry in
fs::read_dir(dir).with_context(|| format!("Failed to read dir {}", dir.display()))?
{
let entry = entry?;
let path = entry.path();
if path.is_file() && path.extension() == Some(OsStr::new("ts")) {
files.push(path);
}
}
files.sort();
Ok(files)
}
fn ts_files_in_recursive(dir: &Path) -> Result<Vec<PathBuf>> {
let mut files = Vec::new();
let mut stack = vec![dir.to_path_buf()];
while let Some(d) = stack.pop() {
for entry in
fs::read_dir(&d).with_context(|| format!("Failed to read dir {}", d.display()))?
{
let entry = entry?;
let path = entry.path();
if path.is_dir() {
stack.push(path);
} else if path.is_file() && path.extension() == Some(OsStr::new("ts")) {
files.push(path);
}
}
}
files.sort();
Ok(files)
}
/// Generate an index.ts file that re-exports all generated types.
/// This allows consumers to import all types from a single file.
fn generate_index_ts(out_dir: &Path) -> Result<PathBuf> {
let mut entries: Vec<String> = Vec::new();
let mut stems: Vec<String> = ts_files_in(out_dir)?
.into_iter()
.filter_map(|p| {
let stem = p.file_stem()?.to_string_lossy().into_owned();
if stem == "index" { None } else { Some(stem) }
})
.collect();
stems.sort();
stems.dedup();
for name in stems {
entries.push(format!("export type {{ {name} }} from \"./{name}\";\n"));
}
// If this is the root out_dir and a ./v2 folder exists with TS files,
// expose it as a namespace to avoid symbol collisions at the root.
let v2_dir = out_dir.join("v2");
let has_v2_ts = ts_files_in(&v2_dir).map(|v| !v.is_empty()).unwrap_or(false);
if has_v2_ts {
entries.push("export * as v2 from \"./v2\";\n".to_string());
}
let mut content =
String::with_capacity(HEADER.len() + entries.iter().map(String::len).sum::<usize>());
content.push_str(HEADER);
for line in &entries {
content.push_str(line);
}
let index_path = out_dir.join("index.ts");
let mut f = fs::File::create(&index_path)
.with_context(|| format!("Failed to create {}", index_path.display()))?;
f.write_all(content.as_bytes())
.with_context(|| format!("Failed to write {}", index_path.display()))?;
Ok(index_path)
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::Result;
use std::collections::BTreeSet;
use std::fs;
use std::path::PathBuf;
use uuid::Uuid;
#[test]
fn generated_ts_has_no_optional_nullable_fields() -> Result<()> {
// Assert that there are no types of the form "?: T | null" in the generated TS files.
let output_dir = std::env::temp_dir().join(format!("codex_ts_types_{}", Uuid::now_v7()));
fs::create_dir(&output_dir)?;
struct TempDirGuard(PathBuf);
impl Drop for TempDirGuard {
fn drop(&mut self) {
let _ = fs::remove_dir_all(&self.0);
}
}
let _guard = TempDirGuard(output_dir.clone());
// Avoid doing more work than necessary to keep the test from timing out.
let options = GenerateTsOptions {
generate_indices: false,
ensure_headers: false,
run_prettier: false,
};
generate_ts_with_options(&output_dir, None, options)?;
let mut undefined_offenders = Vec::new();
let mut optional_nullable_offenders = BTreeSet::new();
let mut stack = vec![output_dir];
while let Some(dir) = stack.pop() {
for entry in fs::read_dir(&dir)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
stack.push(path);
continue;
}
if matches!(path.extension().and_then(|ext| ext.to_str()), Some("ts")) {
let contents = fs::read_to_string(&path)?;
if contents.contains("| undefined") {
undefined_offenders.push(path.clone());
}
const SKIP_PREFIXES: &[&str] = &[
"const ",
"let ",
"var ",
"export const ",
"export let ",
"export var ",
];
let mut search_start = 0;
while let Some(idx) = contents[search_start..].find("| null") {
let abs_idx = search_start + idx;
// Find the property-colon for this field by scanning forward
// from the start of the segment and ignoring nested braces,
// brackets, and parens. This avoids colons inside nested
// type literals like `{ [k in string]?: string }`.
let line_start_idx =
contents[..abs_idx].rfind('\n').map(|i| i + 1).unwrap_or(0);
let mut segment_start_idx = line_start_idx;
if let Some(rel_idx) = contents[line_start_idx..abs_idx].rfind(',') {
segment_start_idx = segment_start_idx.max(line_start_idx + rel_idx + 1);
}
if let Some(rel_idx) = contents[line_start_idx..abs_idx].rfind('{') {
segment_start_idx = segment_start_idx.max(line_start_idx + rel_idx + 1);
}
if let Some(rel_idx) = contents[line_start_idx..abs_idx].rfind('}') {
segment_start_idx = segment_start_idx.max(line_start_idx + rel_idx + 1);
}
// Scan forward for the colon that separates the field name from its type.
let mut level_brace = 0_i32;
let mut level_brack = 0_i32;
let mut level_paren = 0_i32;
let mut in_single = false;
let mut in_double = false;
let mut escape = false;
let mut prop_colon_idx = None;
for (i, ch) in contents[segment_start_idx..abs_idx].char_indices() {
let idx_abs = segment_start_idx + i;
if escape {
escape = false;
continue;
}
match ch {
'\\' => {
// Only treat as escape when inside a string.
if in_single || in_double {
escape = true;
}
}
'\'' => {
if !in_double {
in_single = !in_single;
}
}
'"' => {
if !in_single {
in_double = !in_double;
}
}
'{' if !in_single && !in_double => level_brace += 1,
'}' if !in_single && !in_double => level_brace -= 1,
'[' if !in_single && !in_double => level_brack += 1,
']' if !in_single && !in_double => level_brack -= 1,
'(' if !in_single && !in_double => level_paren += 1,
')' if !in_single && !in_double => level_paren -= 1,
':' if !in_single
&& !in_double
&& level_brace == 0
&& level_brack == 0
&& level_paren == 0 =>
{
prop_colon_idx = Some(idx_abs);
break;
}
_ => {}
}
}
let Some(colon_idx) = prop_colon_idx else {
search_start = abs_idx + 5;
continue;
};
let mut field_prefix = contents[segment_start_idx..colon_idx].trim();
if field_prefix.is_empty() {
search_start = abs_idx + 5;
continue;
}
if let Some(comment_idx) = field_prefix.rfind("*/") {
field_prefix = field_prefix[comment_idx + 2..].trim_start();
}
if field_prefix.is_empty() {
search_start = abs_idx + 5;
continue;
}
if SKIP_PREFIXES
.iter()
.any(|prefix| field_prefix.starts_with(prefix))
{
search_start = abs_idx + 5;
continue;
}
if field_prefix.contains('(') {
search_start = abs_idx + 5;
continue;
}
// If the last non-whitespace before ':' is '?', then this is an
// optional field with a nullable type (i.e., "?: T | null"),
// which we explicitly disallow.
if field_prefix.chars().rev().find(|c| !c.is_whitespace()) == Some('?') {
let line_number =
contents[..abs_idx].chars().filter(|c| *c == '\n').count() + 1;
let offending_line_end = contents[line_start_idx..]
.find('\n')
.map(|i| line_start_idx + i)
.unwrap_or(contents.len());
let offending_snippet =
contents[line_start_idx..offending_line_end].trim();
optional_nullable_offenders.insert(format!(
"{}:{}: {offending_snippet}",
path.display(),
line_number
));
}
search_start = abs_idx + 5;
}
}
}
}
assert!(
undefined_offenders.is_empty(),
"Generated TypeScript still includes unions with `undefined` in {undefined_offenders:?}"
);
// If this assertion fails, it means a field was generated as
// "?: T | null" — i.e., both optional (undefined) and nullable (null).
// We only want either "?: T" or ": T | null".
assert!(
optional_nullable_offenders.is_empty(),
"Generated TypeScript has optional fields with nullable types (disallowed '?: T | null'), add #[ts(optional)] to fix:\n{optional_nullable_offenders:?}"
);
Ok(())
}
}

View File

@@ -1,71 +0,0 @@
//! We do not do true JSON-RPC 2.0, as we neither send nor expect the
//! "jsonrpc": "2.0" field.
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use ts_rs::TS;
pub const JSONRPC_VERSION: &str = "2.0";
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, Hash, Eq, JsonSchema, TS)]
#[serde(untagged)]
pub enum RequestId {
String(String),
#[ts(type = "number")]
Integer(i64),
}
pub type Result = serde_json::Value;
/// Refers to any valid JSON-RPC object that can be decoded off the wire, or encoded to be sent.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
#[serde(untagged)]
pub enum JSONRPCMessage {
Request(JSONRPCRequest),
Notification(JSONRPCNotification),
Response(JSONRPCResponse),
Error(JSONRPCError),
}
/// A request that expects a response.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct JSONRPCRequest {
pub id: RequestId,
pub method: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub params: Option<serde_json::Value>,
}
/// A notification which does not expect a response.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct JSONRPCNotification {
pub method: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub params: Option<serde_json::Value>,
}
/// A successful (non-error) response to a request.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct JSONRPCResponse {
pub id: RequestId,
pub result: Result,
}
/// A response to a request that indicates an error occurred.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct JSONRPCError {
pub error: JSONRPCErrorError,
pub id: RequestId,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct JSONRPCErrorError {
pub code: i64,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub data: Option<serde_json::Value>,
pub message: String,
}

View File

@@ -1,12 +0,0 @@
mod export;
mod jsonrpc_lite;
mod protocol;
pub use export::generate_json;
pub use export::generate_ts;
pub use export::generate_types;
pub use jsonrpc_lite::*;
pub use protocol::common::*;
pub use protocol::thread_history::*;
pub use protocol::v1::*;
pub use protocol::v2::*;

View File

@@ -1,821 +0,0 @@
use std::path::Path;
use crate::JSONRPCNotification;
use crate::JSONRPCRequest;
use crate::RequestId;
use crate::export::GeneratedSchema;
use crate::export::write_json_schema;
use crate::protocol::v1;
use crate::protocol::v2;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use strum_macros::Display;
use ts_rs::TS;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema, TS)]
#[ts(type = "string")]
pub struct GitSha(pub String);
impl GitSha {
pub fn new(sha: &str) -> Self {
Self(sha.to_string())
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Display, JsonSchema, TS)]
#[serde(rename_all = "lowercase")]
pub enum AuthMode {
ApiKey,
ChatGPT,
}
/// Generates an `enum ClientRequest` where each variant is a request that the
/// client can send to the server. Each variant has associated `params` and
/// `response` types. Also generates a `export_client_responses()` function to
/// export all response types to TypeScript.
macro_rules! client_request_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident $(=> $wire:literal)? {
params: $(#[$params_meta:meta])* $params:ty,
response: $response:ty,
}
),* $(,)?
) => {
/// Request from the client to the server.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "method", rename_all = "camelCase")]
pub enum ClientRequest {
$(
$(#[$variant_meta])*
$(#[serde(rename = $wire)] #[ts(rename = $wire)])?
$variant {
#[serde(rename = "id")]
request_id: RequestId,
$(#[$params_meta])*
params: $params,
},
)*
}
pub fn export_client_responses(
out_dir: &::std::path::Path,
) -> ::std::result::Result<(), ::ts_rs::ExportError> {
$(
<$response as ::ts_rs::TS>::export_all_to(out_dir)?;
)*
Ok(())
}
#[allow(clippy::vec_init_then_push)]
pub fn export_client_response_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
$(
schemas.push(write_json_schema::<$response>(out_dir, stringify!($response))?);
)*
Ok(schemas)
}
#[allow(clippy::vec_init_then_push)]
pub fn export_client_param_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
$(
schemas.push(write_json_schema::<$params>(out_dir, stringify!($params))?);
)*
Ok(schemas)
}
};
}
client_request_definitions! {
Initialize {
params: v1::InitializeParams,
response: v1::InitializeResponse,
},
/// NEW APIs
// Thread lifecycle
ThreadStart => "thread/start" {
params: v2::ThreadStartParams,
response: v2::ThreadStartResponse,
},
ThreadResume => "thread/resume" {
params: v2::ThreadResumeParams,
response: v2::ThreadResumeResponse,
},
ThreadArchive => "thread/archive" {
params: v2::ThreadArchiveParams,
response: v2::ThreadArchiveResponse,
},
ThreadList => "thread/list" {
params: v2::ThreadListParams,
response: v2::ThreadListResponse,
},
ThreadCompact => "thread/compact" {
params: v2::ThreadCompactParams,
response: v2::ThreadCompactResponse,
},
TurnStart => "turn/start" {
params: v2::TurnStartParams,
response: v2::TurnStartResponse,
},
TurnInterrupt => "turn/interrupt" {
params: v2::TurnInterruptParams,
response: v2::TurnInterruptResponse,
},
ReviewStart => "review/start" {
params: v2::ReviewStartParams,
response: v2::ReviewStartResponse,
},
ModelList => "model/list" {
params: v2::ModelListParams,
response: v2::ModelListResponse,
},
LoginAccount => "account/login/start" {
params: v2::LoginAccountParams,
response: v2::LoginAccountResponse,
},
CancelLoginAccount => "account/login/cancel" {
params: v2::CancelLoginAccountParams,
response: v2::CancelLoginAccountResponse,
},
LogoutAccount => "account/logout" {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::LogoutAccountResponse,
},
GetAccountRateLimits => "account/rateLimits/read" {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::GetAccountRateLimitsResponse,
},
FeedbackUpload => "feedback/upload" {
params: v2::FeedbackUploadParams,
response: v2::FeedbackUploadResponse,
},
/// Execute a command (argv vector) under the server's sandbox.
OneOffCommandExec => "command/exec" {
params: v2::CommandExecParams,
response: v2::CommandExecResponse,
},
ConfigRead => "config/read" {
params: v2::ConfigReadParams,
response: v2::ConfigReadResponse,
},
ConfigValueWrite => "config/value/write" {
params: v2::ConfigValueWriteParams,
response: v2::ConfigWriteResponse,
},
ConfigBatchWrite => "config/batchWrite" {
params: v2::ConfigBatchWriteParams,
response: v2::ConfigWriteResponse,
},
GetAccount => "account/read" {
params: v2::GetAccountParams,
response: v2::GetAccountResponse,
},
/// DEPRECATED APIs below
NewConversation {
params: v1::NewConversationParams,
response: v1::NewConversationResponse,
},
GetConversationSummary {
params: v1::GetConversationSummaryParams,
response: v1::GetConversationSummaryResponse,
},
/// List recorded Codex conversations (rollouts) with optional pagination and search.
ListConversations {
params: v1::ListConversationsParams,
response: v1::ListConversationsResponse,
},
/// Resume a recorded Codex conversation from a rollout file.
ResumeConversation {
params: v1::ResumeConversationParams,
response: v1::ResumeConversationResponse,
},
ArchiveConversation {
params: v1::ArchiveConversationParams,
response: v1::ArchiveConversationResponse,
},
SendUserMessage {
params: v1::SendUserMessageParams,
response: v1::SendUserMessageResponse,
},
SendUserTurn {
params: v1::SendUserTurnParams,
response: v1::SendUserTurnResponse,
},
InterruptConversation {
params: v1::InterruptConversationParams,
response: v1::InterruptConversationResponse,
},
AddConversationListener {
params: v1::AddConversationListenerParams,
response: v1::AddConversationSubscriptionResponse,
},
RemoveConversationListener {
params: v1::RemoveConversationListenerParams,
response: v1::RemoveConversationSubscriptionResponse,
},
GitDiffToRemote {
params: v1::GitDiffToRemoteParams,
response: v1::GitDiffToRemoteResponse,
},
LoginApiKey {
params: v1::LoginApiKeyParams,
response: v1::LoginApiKeyResponse,
},
LoginChatGpt {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::LoginChatGptResponse,
},
// DEPRECATED in favor of CancelLoginAccount
CancelLoginChatGpt {
params: v1::CancelLoginChatGptParams,
response: v1::CancelLoginChatGptResponse,
},
LogoutChatGpt {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::LogoutChatGptResponse,
},
/// DEPRECATED in favor of GetAccount
GetAuthStatus {
params: v1::GetAuthStatusParams,
response: v1::GetAuthStatusResponse,
},
GetUserSavedConfig {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::GetUserSavedConfigResponse,
},
SetDefaultModel {
params: v1::SetDefaultModelParams,
response: v1::SetDefaultModelResponse,
},
GetUserAgent {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::GetUserAgentResponse,
},
UserInfo {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::UserInfoResponse,
},
FuzzyFileSearch {
params: FuzzyFileSearchParams,
response: FuzzyFileSearchResponse,
},
/// Execute a command (argv vector) under the server's sandbox.
ExecOneOffCommand {
params: v1::ExecOneOffCommandParams,
response: v1::ExecOneOffCommandResponse,
},
}
/// Generates an `enum ServerRequest` where each variant is a request that the
/// server can send to the client along with the corresponding params and
/// response types. It also generates helper types used by the app/server
/// infrastructure (payload enum, request constructor, and export helpers).
macro_rules! server_request_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident $(=> $wire:literal)? {
params: $params:ty,
response: $response:ty,
}
),* $(,)?
) => {
/// Request initiated from the server and sent to the client.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "method", rename_all = "camelCase")]
pub enum ServerRequest {
$(
$(#[$variant_meta])*
$(#[serde(rename = $wire)] #[ts(rename = $wire)])?
$variant {
#[serde(rename = "id")]
request_id: RequestId,
params: $params,
},
)*
}
#[derive(Debug, Clone, PartialEq, JsonSchema)]
pub enum ServerRequestPayload {
$( $variant($params), )*
}
impl ServerRequestPayload {
pub fn request_with_id(self, request_id: RequestId) -> ServerRequest {
match self {
$(Self::$variant(params) => ServerRequest::$variant { request_id, params },)*
}
}
}
pub fn export_server_responses(
out_dir: &::std::path::Path,
) -> ::std::result::Result<(), ::ts_rs::ExportError> {
$(
<$response as ::ts_rs::TS>::export_all_to(out_dir)?;
)*
Ok(())
}
#[allow(clippy::vec_init_then_push)]
pub fn export_server_response_schemas(
out_dir: &Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
$(
schemas.push(crate::export::write_json_schema::<$response>(
out_dir,
concat!(stringify!($variant), "Response"),
)?);
)*
Ok(schemas)
}
#[allow(clippy::vec_init_then_push)]
pub fn export_server_param_schemas(
out_dir: &Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
$(
schemas.push(crate::export::write_json_schema::<$params>(
out_dir,
concat!(stringify!($variant), "Params"),
)?);
)*
Ok(schemas)
}
};
}
/// Generates `ServerNotification` enum and helpers, including a JSON Schema
/// exporter for each notification.
macro_rules! server_notification_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident $(=> $wire:literal)? ( $payload:ty )
),* $(,)?
) => {
/// Notification sent from the server to the client.
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ServerNotification {
$(
$(#[$variant_meta])*
$(#[serde(rename = $wire)] #[ts(rename = $wire)] #[strum(serialize = $wire)])?
$variant($payload),
)*
}
impl ServerNotification {
pub fn to_params(self) -> Result<serde_json::Value, serde_json::Error> {
match self {
$(Self::$variant(params) => serde_json::to_value(params),)*
}
}
}
impl TryFrom<JSONRPCNotification> for ServerNotification {
type Error = serde_json::Error;
fn try_from(value: JSONRPCNotification) -> Result<Self, serde_json::Error> {
serde_json::from_value(serde_json::to_value(value)?)
}
}
#[allow(clippy::vec_init_then_push)]
pub fn export_server_notification_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
$(schemas.push(crate::export::write_json_schema::<$payload>(out_dir, stringify!($payload))?);)*
Ok(schemas)
}
};
}
/// Notifications sent from the client to the server.
macro_rules! client_notification_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident $( ( $payload:ty ) )?
),* $(,)?
) => {
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ClientNotification {
$(
$(#[$variant_meta])*
$variant $( ( $payload ) )?,
)*
}
pub fn export_client_notification_schemas(
_out_dir: &::std::path::Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let schemas = Vec::new();
$( $(schemas.push(crate::export::write_json_schema::<$payload>(_out_dir, stringify!($payload))?);)? )*
Ok(schemas)
}
};
}
impl TryFrom<JSONRPCRequest> for ServerRequest {
type Error = serde_json::Error;
fn try_from(value: JSONRPCRequest) -> Result<Self, Self::Error> {
serde_json::from_value(serde_json::to_value(value)?)
}
}
server_request_definitions! {
/// NEW APIs
/// Sent when approval is requested for a specific command execution.
/// This request is used for Turns started via turn/start.
CommandExecutionRequestApproval => "item/commandExecution/requestApproval" {
params: v2::CommandExecutionRequestApprovalParams,
response: v2::CommandExecutionRequestApprovalResponse,
},
/// Sent when approval is requested for a specific file change.
/// This request is used for Turns started via turn/start.
FileChangeRequestApproval => "item/fileChange/requestApproval" {
params: v2::FileChangeRequestApprovalParams,
response: v2::FileChangeRequestApprovalResponse,
},
/// DEPRECATED APIs below
/// Request to approve a patch.
/// This request is used for Turns started via the legacy APIs (i.e. SendUserTurn, SendUserMessage).
ApplyPatchApproval {
params: v1::ApplyPatchApprovalParams,
response: v1::ApplyPatchApprovalResponse,
},
/// Request to exec a command.
/// This request is used for Turns started via the legacy APIs (i.e. SendUserTurn, SendUserMessage).
ExecCommandApproval {
params: v1::ExecCommandApprovalParams,
response: v1::ExecCommandApprovalResponse,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
pub struct FuzzyFileSearchParams {
pub query: String,
pub roots: Vec<String>,
// if provided, will cancel any previous request that used the same value
pub cancellation_token: Option<String>,
}
/// Superset of [`codex_file_search::FileMatch`]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct FuzzyFileSearchResult {
pub root: String,
pub path: String,
pub file_name: String,
pub score: u32,
pub indices: Option<Vec<u32>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct FuzzyFileSearchResponse {
pub files: Vec<FuzzyFileSearchResult>,
}
server_notification_definitions! {
/// NEW NOTIFICATIONS
Error => "error" (v2::ErrorNotification),
ThreadStarted => "thread/started" (v2::ThreadStartedNotification),
ThreadTokenUsageUpdated => "thread/tokenUsage/updated" (v2::ThreadTokenUsageUpdatedNotification),
TurnStarted => "turn/started" (v2::TurnStartedNotification),
TurnCompleted => "turn/completed" (v2::TurnCompletedNotification),
TurnDiffUpdated => "turn/diff/updated" (v2::TurnDiffUpdatedNotification),
TurnPlanUpdated => "turn/plan/updated" (v2::TurnPlanUpdatedNotification),
ItemStarted => "item/started" (v2::ItemStartedNotification),
ItemCompleted => "item/completed" (v2::ItemCompletedNotification),
AgentMessageDelta => "item/agentMessage/delta" (v2::AgentMessageDeltaNotification),
CommandExecutionOutputDelta => "item/commandExecution/outputDelta" (v2::CommandExecutionOutputDeltaNotification),
FileChangeOutputDelta => "item/fileChange/outputDelta" (v2::FileChangeOutputDeltaNotification),
McpToolCallProgress => "item/mcpToolCall/progress" (v2::McpToolCallProgressNotification),
AccountUpdated => "account/updated" (v2::AccountUpdatedNotification),
AccountRateLimitsUpdated => "account/rateLimits/updated" (v2::AccountRateLimitsUpdatedNotification),
ReasoningSummaryTextDelta => "item/reasoning/summaryTextDelta" (v2::ReasoningSummaryTextDeltaNotification),
ReasoningSummaryPartAdded => "item/reasoning/summaryPartAdded" (v2::ReasoningSummaryPartAddedNotification),
ReasoningTextDelta => "item/reasoning/textDelta" (v2::ReasoningTextDeltaNotification),
ContextCompacted => "thread/compacted" (v2::ContextCompactedNotification),
/// Notifies the user of world-writable directories on Windows, which cannot be protected by the sandbox.
WindowsWorldWritableWarning => "windows/worldWritableWarning" (v2::WindowsWorldWritableWarningNotification),
#[serde(rename = "account/login/completed")]
#[ts(rename = "account/login/completed")]
#[strum(serialize = "account/login/completed")]
AccountLoginCompleted(v2::AccountLoginCompletedNotification),
/// DEPRECATED NOTIFICATIONS below
AuthStatusChange(v1::AuthStatusChangeNotification),
/// Deprecated: use `account/login/completed` instead.
LoginChatGptComplete(v1::LoginChatGptCompleteNotification),
SessionConfigured(v1::SessionConfiguredNotification),
}
client_notification_definitions! {
Initialized,
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::Result;
use codex_protocol::ConversationId;
use codex_protocol::account::PlanType;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::AskForApproval;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::path::PathBuf;
#[test]
fn serialize_new_conversation() -> Result<()> {
let request = ClientRequest::NewConversation {
request_id: RequestId::Integer(42),
params: v1::NewConversationParams {
model: Some("gpt-5.1-codex-max".to_string()),
model_provider: None,
profile: None,
cwd: None,
approval_policy: Some(AskForApproval::OnRequest),
sandbox: None,
config: None,
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
include_apply_patch_tool: None,
},
};
assert_eq!(
json!({
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5.1-codex-max",
"modelProvider": null,
"profile": null,
"cwd": null,
"approvalPolicy": "on-request",
"sandbox": null,
"config": null,
"baseInstructions": null,
"includeApplyPatchTool": null
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn conversation_id_serializes_as_plain_string() -> Result<()> {
let id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
assert_eq!(
json!("67e55044-10b1-426f-9247-bb680e5fe0c8"),
serde_json::to_value(id)?
);
Ok(())
}
#[test]
fn conversation_id_deserializes_from_plain_string() -> Result<()> {
let id: ConversationId =
serde_json::from_value(json!("67e55044-10b1-426f-9247-bb680e5fe0c8"))?;
assert_eq!(
ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?,
id,
);
Ok(())
}
#[test]
fn serialize_client_notification() -> Result<()> {
let notification = ClientNotification::Initialized;
// Note there is no "params" field for this notification.
assert_eq!(
json!({
"method": "initialized",
}),
serde_json::to_value(&notification)?,
);
Ok(())
}
#[test]
fn serialize_server_request() -> Result<()> {
let conversation_id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
let params = v1::ExecCommandApprovalParams {
conversation_id,
call_id: "call-42".to_string(),
command: vec!["echo".to_string(), "hello".to_string()],
cwd: PathBuf::from("/tmp"),
reason: Some("because tests".to_string()),
risk: None,
parsed_cmd: vec![ParsedCommand::Unknown {
cmd: "echo hello".to_string(),
}],
};
let request = ServerRequest::ExecCommandApproval {
request_id: RequestId::Integer(7),
params: params.clone(),
};
assert_eq!(
json!({
"method": "execCommandApproval",
"id": 7,
"params": {
"conversationId": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"callId": "call-42",
"command": ["echo", "hello"],
"cwd": "/tmp",
"reason": "because tests",
"risk": null,
"parsedCmd": [
{
"type": "unknown",
"cmd": "echo hello"
}
]
}
}),
serde_json::to_value(&request)?,
);
let payload = ServerRequestPayload::ExecCommandApproval(params);
assert_eq!(payload.request_with_id(RequestId::Integer(7)), request);
Ok(())
}
#[test]
fn serialize_get_account_rate_limits() -> Result<()> {
let request = ClientRequest::GetAccountRateLimits {
request_id: RequestId::Integer(1),
params: None,
};
assert_eq!(
json!({
"method": "account/rateLimits/read",
"id": 1,
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_account_login_api_key() -> Result<()> {
let request = ClientRequest::LoginAccount {
request_id: RequestId::Integer(2),
params: v2::LoginAccountParams::ApiKey {
api_key: "secret".to_string(),
},
};
assert_eq!(
json!({
"method": "account/login/start",
"id": 2,
"params": {
"type": "apiKey",
"apiKey": "secret"
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_account_login_chatgpt() -> Result<()> {
let request = ClientRequest::LoginAccount {
request_id: RequestId::Integer(3),
params: v2::LoginAccountParams::Chatgpt,
};
assert_eq!(
json!({
"method": "account/login/start",
"id": 3,
"params": {
"type": "chatgpt"
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_account_logout() -> Result<()> {
let request = ClientRequest::LogoutAccount {
request_id: RequestId::Integer(4),
params: None,
};
assert_eq!(
json!({
"method": "account/logout",
"id": 4,
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_get_account() -> Result<()> {
let request = ClientRequest::GetAccount {
request_id: RequestId::Integer(5),
params: v2::GetAccountParams {
refresh_token: false,
},
};
assert_eq!(
json!({
"method": "account/read",
"id": 5,
"params": {
"refreshToken": false
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn account_serializes_fields_in_camel_case() -> Result<()> {
let api_key = v2::Account::ApiKey {};
assert_eq!(
json!({
"type": "apiKey",
}),
serde_json::to_value(&api_key)?,
);
let chatgpt = v2::Account::Chatgpt {
email: "user@example.com".to_string(),
plan_type: PlanType::Plus,
};
assert_eq!(
json!({
"type": "chatgpt",
"email": "user@example.com",
"planType": "plus",
}),
serde_json::to_value(&chatgpt)?,
);
Ok(())
}
#[test]
fn serialize_list_models() -> Result<()> {
let request = ClientRequest::ModelList {
request_id: RequestId::Integer(6),
params: v2::ModelListParams::default(),
};
assert_eq!(
json!({
"method": "model/list",
"id": 6,
"params": {
"limit": null,
"cursor": null
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
}

View File

@@ -1,15 +0,0 @@
use crate::protocol::v1;
use crate::protocol::v2;
impl From<v1::ExecOneOffCommandParams> for v2::CommandExecParams {
fn from(value: v1::ExecOneOffCommandParams) -> Self {
Self {
command: value.command,
timeout_ms: value
.timeout_ms
.map(|timeout| i64::try_from(timeout).unwrap_or(60_000)),
cwd: value.cwd,
sandbox_policy: value.sandbox_policy.map(std::convert::Into::into),
}
}
}

View File

@@ -1,8 +0,0 @@
// Module declarations for the app-server protocol namespace.
// Exposes protocol pieces used by `lib.rs` via `pub use protocol::common::*;`.
pub mod common;
mod mappers;
pub mod thread_history;
pub mod v1;
pub mod v2;

View File

@@ -1,413 +0,0 @@
use crate::protocol::v2::ThreadItem;
use crate::protocol::v2::Turn;
use crate::protocol::v2::TurnError;
use crate::protocol::v2::TurnStatus;
use crate::protocol::v2::UserInput;
use codex_protocol::protocol::AgentReasoningEvent;
use codex_protocol::protocol::AgentReasoningRawContentEvent;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::TurnAbortedEvent;
use codex_protocol::protocol::UserMessageEvent;
/// Convert persisted [`EventMsg`] entries into a sequence of [`Turn`] values.
///
/// The purpose of this is to convert the EventMsgs persisted in a rollout file
/// into a sequence of Turns and ThreadItems, which allows the client to render
/// the historical messages when resuming a thread.
pub fn build_turns_from_event_msgs(events: &[EventMsg]) -> Vec<Turn> {
let mut builder = ThreadHistoryBuilder::new();
for event in events {
builder.handle_event(event);
}
builder.finish()
}
struct ThreadHistoryBuilder {
turns: Vec<Turn>,
current_turn: Option<PendingTurn>,
next_turn_index: i64,
next_item_index: i64,
}
impl ThreadHistoryBuilder {
fn new() -> Self {
Self {
turns: Vec::new(),
current_turn: None,
next_turn_index: 1,
next_item_index: 1,
}
}
fn finish(mut self) -> Vec<Turn> {
self.finish_current_turn();
self.turns
}
/// This function should handle all EventMsg variants that can be persisted in a rollout file.
/// See `should_persist_event_msg` in `codex-rs/core/rollout/policy.rs`.
fn handle_event(&mut self, event: &EventMsg) {
match event {
EventMsg::UserMessage(payload) => self.handle_user_message(payload),
EventMsg::AgentMessage(payload) => self.handle_agent_message(payload.message.clone()),
EventMsg::AgentReasoning(payload) => self.handle_agent_reasoning(payload),
EventMsg::AgentReasoningRawContent(payload) => {
self.handle_agent_reasoning_raw_content(payload)
}
EventMsg::TokenCount(_) => {}
EventMsg::EnteredReviewMode(_) => {}
EventMsg::ExitedReviewMode(_) => {}
EventMsg::UndoCompleted(_) => {}
EventMsg::TurnAborted(payload) => self.handle_turn_aborted(payload),
_ => {}
}
}
fn handle_user_message(&mut self, payload: &UserMessageEvent) {
self.finish_current_turn();
let mut turn = self.new_turn();
let id = self.next_item_id();
let content = self.build_user_inputs(payload);
turn.items.push(ThreadItem::UserMessage { id, content });
self.current_turn = Some(turn);
}
fn handle_agent_message(&mut self, text: String) {
if text.is_empty() {
return;
}
let id = self.next_item_id();
self.ensure_turn()
.items
.push(ThreadItem::AgentMessage { id, text });
}
fn handle_agent_reasoning(&mut self, payload: &AgentReasoningEvent) {
if payload.text.is_empty() {
return;
}
// If the last item is a reasoning item, add the new text to the summary.
if let Some(ThreadItem::Reasoning { summary, .. }) = self.ensure_turn().items.last_mut() {
summary.push(payload.text.clone());
return;
}
// Otherwise, create a new reasoning item.
let id = self.next_item_id();
self.ensure_turn().items.push(ThreadItem::Reasoning {
id,
summary: vec![payload.text.clone()],
content: Vec::new(),
});
}
fn handle_agent_reasoning_raw_content(&mut self, payload: &AgentReasoningRawContentEvent) {
if payload.text.is_empty() {
return;
}
// If the last item is a reasoning item, add the new text to the content.
if let Some(ThreadItem::Reasoning { content, .. }) = self.ensure_turn().items.last_mut() {
content.push(payload.text.clone());
return;
}
// Otherwise, create a new reasoning item.
let id = self.next_item_id();
self.ensure_turn().items.push(ThreadItem::Reasoning {
id,
summary: Vec::new(),
content: vec![payload.text.clone()],
});
}
fn handle_turn_aborted(&mut self, _payload: &TurnAbortedEvent) {
let Some(turn) = self.current_turn.as_mut() else {
return;
};
turn.status = TurnStatus::Interrupted;
}
fn finish_current_turn(&mut self) {
if let Some(turn) = self.current_turn.take() {
if turn.items.is_empty() {
return;
}
self.turns.push(turn.into());
}
}
fn new_turn(&mut self) -> PendingTurn {
PendingTurn {
id: self.next_turn_id(),
items: Vec::new(),
error: None,
status: TurnStatus::Completed,
}
}
fn ensure_turn(&mut self) -> &mut PendingTurn {
if self.current_turn.is_none() {
let turn = self.new_turn();
return self.current_turn.insert(turn);
}
if let Some(turn) = self.current_turn.as_mut() {
return turn;
}
unreachable!("current turn must exist after initialization");
}
fn next_turn_id(&mut self) -> String {
let id = format!("turn-{}", self.next_turn_index);
self.next_turn_index += 1;
id
}
fn next_item_id(&mut self) -> String {
let id = format!("item-{}", self.next_item_index);
self.next_item_index += 1;
id
}
fn build_user_inputs(&self, payload: &UserMessageEvent) -> Vec<UserInput> {
let mut content = Vec::new();
if !payload.message.trim().is_empty() {
content.push(UserInput::Text {
text: payload.message.clone(),
});
}
if let Some(images) = &payload.images {
for image in images {
content.push(UserInput::Image { url: image.clone() });
}
}
content
}
}
struct PendingTurn {
id: String,
items: Vec<ThreadItem>,
error: Option<TurnError>,
status: TurnStatus,
}
impl From<PendingTurn> for Turn {
fn from(value: PendingTurn) -> Self {
Self {
id: value.id,
items: value.items,
error: value.error,
status: value.status,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use codex_protocol::protocol::AgentMessageEvent;
use codex_protocol::protocol::AgentReasoningEvent;
use codex_protocol::protocol::AgentReasoningRawContentEvent;
use codex_protocol::protocol::TurnAbortReason;
use codex_protocol::protocol::TurnAbortedEvent;
use codex_protocol::protocol::UserMessageEvent;
use pretty_assertions::assert_eq;
#[test]
fn builds_multiple_turns_with_reasoning_items() {
let events = vec![
EventMsg::UserMessage(UserMessageEvent {
message: "First turn".into(),
images: Some(vec!["https://example.com/one.png".into()]),
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "Hi there".into(),
}),
EventMsg::AgentReasoning(AgentReasoningEvent {
text: "thinking".into(),
}),
EventMsg::AgentReasoningRawContent(AgentReasoningRawContentEvent {
text: "full reasoning".into(),
}),
EventMsg::UserMessage(UserMessageEvent {
message: "Second turn".into(),
images: None,
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "Reply two".into(),
}),
];
let turns = build_turns_from_event_msgs(&events);
assert_eq!(turns.len(), 2);
let first = &turns[0];
assert_eq!(first.id, "turn-1");
assert_eq!(first.status, TurnStatus::Completed);
assert_eq!(first.items.len(), 3);
assert_eq!(
first.items[0],
ThreadItem::UserMessage {
id: "item-1".into(),
content: vec![
UserInput::Text {
text: "First turn".into(),
},
UserInput::Image {
url: "https://example.com/one.png".into(),
}
],
}
);
assert_eq!(
first.items[1],
ThreadItem::AgentMessage {
id: "item-2".into(),
text: "Hi there".into(),
}
);
assert_eq!(
first.items[2],
ThreadItem::Reasoning {
id: "item-3".into(),
summary: vec!["thinking".into()],
content: vec!["full reasoning".into()],
}
);
let second = &turns[1];
assert_eq!(second.id, "turn-2");
assert_eq!(second.items.len(), 2);
assert_eq!(
second.items[0],
ThreadItem::UserMessage {
id: "item-4".into(),
content: vec![UserInput::Text {
text: "Second turn".into()
}],
}
);
assert_eq!(
second.items[1],
ThreadItem::AgentMessage {
id: "item-5".into(),
text: "Reply two".into(),
}
);
}
#[test]
fn splits_reasoning_when_interleaved() {
let events = vec![
EventMsg::UserMessage(UserMessageEvent {
message: "Turn start".into(),
images: None,
}),
EventMsg::AgentReasoning(AgentReasoningEvent {
text: "first summary".into(),
}),
EventMsg::AgentReasoningRawContent(AgentReasoningRawContentEvent {
text: "first content".into(),
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "interlude".into(),
}),
EventMsg::AgentReasoning(AgentReasoningEvent {
text: "second summary".into(),
}),
];
let turns = build_turns_from_event_msgs(&events);
assert_eq!(turns.len(), 1);
let turn = &turns[0];
assert_eq!(turn.items.len(), 4);
assert_eq!(
turn.items[1],
ThreadItem::Reasoning {
id: "item-2".into(),
summary: vec!["first summary".into()],
content: vec!["first content".into()],
}
);
assert_eq!(
turn.items[3],
ThreadItem::Reasoning {
id: "item-4".into(),
summary: vec!["second summary".into()],
content: Vec::new(),
}
);
}
#[test]
fn marks_turn_as_interrupted_when_aborted() {
let events = vec![
EventMsg::UserMessage(UserMessageEvent {
message: "Please do the thing".into(),
images: None,
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "Working...".into(),
}),
EventMsg::TurnAborted(TurnAbortedEvent {
reason: TurnAbortReason::Replaced,
}),
EventMsg::UserMessage(UserMessageEvent {
message: "Let's try again".into(),
images: None,
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "Second attempt complete.".into(),
}),
];
let turns = build_turns_from_event_msgs(&events);
assert_eq!(turns.len(), 2);
let first_turn = &turns[0];
assert_eq!(first_turn.status, TurnStatus::Interrupted);
assert_eq!(first_turn.items.len(), 2);
assert_eq!(
first_turn.items[0],
ThreadItem::UserMessage {
id: "item-1".into(),
content: vec![UserInput::Text {
text: "Please do the thing".into()
}],
}
);
assert_eq!(
first_turn.items[1],
ThreadItem::AgentMessage {
id: "item-2".into(),
text: "Working...".into(),
}
);
let second_turn = &turns[1];
assert_eq!(second_turn.status, TurnStatus::Completed);
assert_eq!(second_turn.items.len(), 2);
assert_eq!(
second_turn.items[0],
ThreadItem::UserMessage {
id: "item-3".into(),
content: vec![UserInput::Text {
text: "Let's try again".into()
}],
}
);
assert_eq!(
second_turn.items[1],
ThreadItem::AgentMessage {
id: "item-4".into(),
text: "Second attempt complete.".into(),
}
);
}
}

View File

@@ -1,462 +0,0 @@
use std::collections::HashMap;
use std::path::PathBuf;
use codex_protocol::ConversationId;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::Verbosity;
use codex_protocol::models::ResponseItem;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::FileChange;
use codex_protocol::protocol::ReviewDecision;
use codex_protocol::protocol::SandboxCommandAssessment;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::TurnAbortReason;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use ts_rs::TS;
use uuid::Uuid;
// Reuse shared types defined in `common.rs`.
use crate::protocol::common::AuthMode;
use crate::protocol::common::GitSha;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InitializeParams {
pub client_info: ClientInfo,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ClientInfo {
pub name: String,
pub title: Option<String>,
pub version: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InitializeResponse {
pub user_agent: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct NewConversationParams {
pub model: Option<String>,
pub model_provider: Option<String>,
pub profile: Option<String>,
pub cwd: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub sandbox: Option<SandboxMode>,
pub config: Option<HashMap<String, serde_json::Value>>,
pub base_instructions: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub developer_instructions: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub compact_prompt: Option<String>,
pub include_apply_patch_tool: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct NewConversationResponse {
pub conversation_id: ConversationId,
pub model: String,
pub reasoning_effort: Option<ReasoningEffort>,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ResumeConversationResponse {
pub conversation_id: ConversationId,
pub model: String,
pub initial_messages: Option<Vec<EventMsg>>,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(untagged)]
pub enum GetConversationSummaryParams {
RolloutPath {
#[serde(rename = "rolloutPath")]
rollout_path: PathBuf,
},
ConversationId {
#[serde(rename = "conversationId")]
conversation_id: ConversationId,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetConversationSummaryResponse {
pub summary: ConversationSummary,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ListConversationsParams {
pub page_size: Option<usize>,
pub cursor: Option<String>,
pub model_providers: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ConversationSummary {
pub conversation_id: ConversationId,
pub path: PathBuf,
pub preview: String,
pub timestamp: Option<String>,
pub model_provider: String,
pub cwd: PathBuf,
pub cli_version: String,
pub source: SessionSource,
pub git_info: Option<ConversationGitInfo>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub struct ConversationGitInfo {
pub sha: Option<String>,
pub branch: Option<String>,
pub origin_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ListConversationsResponse {
pub items: Vec<ConversationSummary>,
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ResumeConversationParams {
pub path: Option<PathBuf>,
pub conversation_id: Option<ConversationId>,
pub history: Option<Vec<ResponseItem>>,
pub overrides: Option<NewConversationParams>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct AddConversationSubscriptionResponse {
#[schemars(with = "String")]
pub subscription_id: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ArchiveConversationParams {
pub conversation_id: ConversationId,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ArchiveConversationResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct RemoveConversationSubscriptionResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginApiKeyParams {
pub api_key: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginApiKeyResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginChatGptResponse {
#[schemars(with = "String")]
pub login_id: Uuid,
pub auth_url: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GitDiffToRemoteResponse {
pub sha: GitSha,
pub diff: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ApplyPatchApprovalParams {
pub conversation_id: ConversationId,
/// Use to correlate this with [codex_core::protocol::PatchApplyBeginEvent]
/// and [codex_core::protocol::PatchApplyEndEvent].
pub call_id: String,
pub file_changes: HashMap<PathBuf, FileChange>,
/// Optional explanatory reason (e.g. request for extra write access).
pub reason: Option<String>,
/// When set, the agent is asking the user to allow writes under this root
/// for the remainder of the session (unclear if this is honored today).
pub grant_root: Option<PathBuf>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ApplyPatchApprovalResponse {
pub decision: ReviewDecision,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecCommandApprovalParams {
pub conversation_id: ConversationId,
/// Use to correlate this with [codex_core::protocol::ExecCommandBeginEvent]
/// and [codex_core::protocol::ExecCommandEndEvent].
pub call_id: String,
pub command: Vec<String>,
pub cwd: PathBuf,
pub reason: Option<String>,
pub risk: Option<SandboxCommandAssessment>,
pub parsed_cmd: Vec<ParsedCommand>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct ExecCommandApprovalResponse {
pub decision: ReviewDecision,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct CancelLoginChatGptParams {
#[schemars(with = "String")]
pub login_id: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GitDiffToRemoteParams {
pub cwd: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct CancelLoginChatGptResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LogoutChatGptParams {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LogoutChatGptResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetAuthStatusParams {
pub include_token: Option<bool>,
pub refresh_token: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecOneOffCommandParams {
pub command: Vec<String>,
pub timeout_ms: Option<u64>,
pub cwd: Option<PathBuf>,
pub sandbox_policy: Option<SandboxPolicy>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecOneOffCommandResponse {
pub exit_code: i32,
pub stdout: String,
pub stderr: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetAuthStatusResponse {
pub auth_method: Option<AuthMode>,
pub auth_token: Option<String>,
pub requires_openai_auth: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetUserAgentResponse {
pub user_agent: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct UserInfoResponse {
pub alleged_user_email: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetUserSavedConfigResponse {
pub config: UserSavedConfig,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetDefaultModelParams {
pub model: Option<String>,
pub reasoning_effort: Option<ReasoningEffort>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetDefaultModelResponse {}
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct UserSavedConfig {
pub approval_policy: Option<AskForApproval>,
pub sandbox_mode: Option<SandboxMode>,
pub sandbox_settings: Option<SandboxSettings>,
pub forced_chatgpt_workspace_id: Option<String>,
pub forced_login_method: Option<ForcedLoginMethod>,
pub model: Option<String>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub tools: Option<Tools>,
pub profile: Option<String>,
pub profiles: HashMap<String, Profile>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct Profile {
pub model: Option<String>,
pub model_provider: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub chatgpt_base_url: Option<String>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct Tools {
pub web_search: Option<bool>,
pub view_image: Option<bool>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SandboxSettings {
#[serde(default)]
pub writable_roots: Vec<PathBuf>,
pub network_access: Option<bool>,
pub exclude_tmpdir_env_var: Option<bool>,
pub exclude_slash_tmp: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserMessageParams {
pub conversation_id: ConversationId,
pub items: Vec<InputItem>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserTurnParams {
pub conversation_id: ConversationId,
pub items: Vec<InputItem>,
pub cwd: PathBuf,
pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy,
pub model: String,
pub effort: Option<ReasoningEffort>,
pub summary: ReasoningSummary,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserTurnResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InterruptConversationParams {
pub conversation_id: ConversationId,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InterruptConversationResponse {
pub abort_reason: TurnAbortReason,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserMessageResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct AddConversationListenerParams {
pub conversation_id: ConversationId,
#[serde(default)]
pub experimental_raw_events: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct RemoveConversationListenerParams {
#[schemars(with = "String")]
pub subscription_id: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[serde(tag = "type", content = "data")]
pub enum InputItem {
Text { text: String },
Image { image_url: String },
LocalImage { path: PathBuf },
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
/// Deprecated in favor of AccountLoginCompletedNotification.
pub struct LoginChatGptCompleteNotification {
#[schemars(with = "String")]
pub login_id: Uuid,
pub success: bool,
pub error: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SessionConfiguredNotification {
pub session_id: ConversationId,
pub model: String,
pub reasoning_effort: Option<ReasoningEffort>,
pub history_log_id: u64,
#[ts(type = "number")]
pub history_entry_count: usize,
pub initial_messages: Option<Vec<EventMsg>>,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
/// Deprecated notification. Use AccountUpdatedNotification instead.
pub struct AuthStatusChangeNotification {
pub auth_method: Option<AuthMode>,
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
[package]
name = "codex-app-server-test-client"
version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
anyhow = { workspace = true }
clap = { workspace = true, features = ["derive", "env"] }
codex-app-server-protocol = { workspace = true }
codex-protocol = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
uuid = { workspace = true, features = ["v4"] }

View File

@@ -1,2 +0,0 @@
# App Server Test Client
Exercises simple `codex app-server` flows end-to-end, logging JSON-RPC messages sent between client and server to stdout.

View File

@@ -1,862 +0,0 @@
use std::collections::VecDeque;
use std::io::BufRead;
use std::io::BufReader;
use std::io::Write;
use std::process::Child;
use std::process::ChildStdin;
use std::process::ChildStdout;
use std::process::Command;
use std::process::Stdio;
use std::thread;
use std::time::Duration;
use anyhow::Context;
use anyhow::Result;
use anyhow::bail;
use clap::Parser;
use clap::Subcommand;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::AddConversationSubscriptionResponse;
use codex_app_server_protocol::ApprovalDecision;
use codex_app_server_protocol::AskForApproval;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::ClientRequest;
use codex_app_server_protocol::CommandExecutionRequestAcceptSettings;
use codex_app_server_protocol::CommandExecutionRequestApprovalParams;
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
use codex_app_server_protocol::FileChangeRequestApprovalParams;
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
use codex_app_server_protocol::GetAccountRateLimitsResponse;
use codex_app_server_protocol::InitializeParams;
use codex_app_server_protocol::InitializeResponse;
use codex_app_server_protocol::InputItem;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginChatGptCompleteNotification;
use codex_app_server_protocol::LoginChatGptResponse;
use codex_app_server_protocol::NewConversationParams;
use codex_app_server_protocol::NewConversationResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SandboxPolicy;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_protocol::ConversationId;
use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;
use serde::Serialize;
use serde::de::DeserializeOwned;
use serde_json::Value;
use uuid::Uuid;
/// Minimal launcher that initializes the Codex app-server and logs the handshake.
#[derive(Parser)]
#[command(author = "Codex", version, about = "Bootstrap Codex app-server", long_about = None)]
struct Cli {
/// Path to the `codex` CLI binary.
#[arg(long, env = "CODEX_BIN", default_value = "codex")]
codex_bin: String,
#[command(subcommand)]
command: CliCommand,
}
#[derive(Subcommand)]
enum CliCommand {
/// Send a user message through the Codex app-server.
SendMessage {
/// User message to send to Codex.
#[arg()]
user_message: String,
},
/// Send a user message through the app-server V2 thread/turn APIs.
SendMessageV2 {
/// User message to send to Codex.
#[arg()]
user_message: String,
},
/// Start a V2 turn that elicits an ExecCommand approval.
#[command(name = "trigger-cmd-approval")]
TriggerCmdApproval {
/// Optional prompt; defaults to a simple python command.
#[arg()]
user_message: Option<String>,
},
/// Start a V2 turn that elicits an ApplyPatch approval.
#[command(name = "trigger-patch-approval")]
TriggerPatchApproval {
/// Optional prompt; defaults to creating a file via apply_patch.
#[arg()]
user_message: Option<String>,
},
/// Start a V2 turn that should not elicit an ExecCommand approval.
#[command(name = "no-trigger-cmd-approval")]
NoTriggerCmdApproval,
/// Send two sequential V2 turns in the same thread to test follow-up behavior.
SendFollowUpV2 {
/// Initial user message for the first turn.
#[arg()]
first_message: String,
/// Follow-up user message for the second turn.
#[arg()]
follow_up_message: String,
},
/// Trigger the ChatGPT login flow and wait for completion.
TestLogin,
/// Fetch the current account rate limits from the Codex app-server.
GetAccountRateLimits,
}
fn main() -> Result<()> {
let Cli { codex_bin, command } = Cli::parse();
match command {
CliCommand::SendMessage { user_message } => send_message(codex_bin, user_message),
CliCommand::SendMessageV2 { user_message } => send_message_v2(codex_bin, user_message),
CliCommand::TriggerCmdApproval { user_message } => {
trigger_cmd_approval(codex_bin, user_message)
}
CliCommand::TriggerPatchApproval { user_message } => {
trigger_patch_approval(codex_bin, user_message)
}
CliCommand::NoTriggerCmdApproval => no_trigger_cmd_approval(codex_bin),
CliCommand::SendFollowUpV2 {
first_message,
follow_up_message,
} => send_follow_up_v2(codex_bin, first_message, follow_up_message),
CliCommand::TestLogin => test_login(codex_bin),
CliCommand::GetAccountRateLimits => get_account_rate_limits(codex_bin),
}
}
fn send_message(codex_bin: String, user_message: String) -> Result<()> {
let mut client = CodexClient::spawn(codex_bin)?;
let initialize = client.initialize()?;
println!("< initialize response: {initialize:?}");
let conversation = client.new_conversation()?;
println!("< newConversation response: {conversation:?}");
let subscription = client.add_conversation_listener(&conversation.conversation_id)?;
println!("< addConversationListener response: {subscription:?}");
let send_response = client.send_user_message(&conversation.conversation_id, &user_message)?;
println!("< sendUserMessage response: {send_response:?}");
client.stream_conversation(&conversation.conversation_id)?;
client.remove_conversation_listener(subscription.subscription_id)?;
Ok(())
}
fn send_message_v2(codex_bin: String, user_message: String) -> Result<()> {
send_message_v2_with_policies(codex_bin, user_message, None, None)
}
fn trigger_cmd_approval(codex_bin: String, user_message: Option<String>) -> Result<()> {
let default_prompt =
"Run `touch /tmp/should-trigger-approval` so I can confirm the file exists.";
let message = user_message.unwrap_or_else(|| default_prompt.to_string());
send_message_v2_with_policies(
codex_bin,
message,
Some(AskForApproval::OnRequest),
Some(SandboxPolicy::ReadOnly),
)
}
fn trigger_patch_approval(codex_bin: String, user_message: Option<String>) -> Result<()> {
let default_prompt =
"Create a file named APPROVAL_DEMO.txt containing a short hello message using apply_patch.";
let message = user_message.unwrap_or_else(|| default_prompt.to_string());
send_message_v2_with_policies(
codex_bin,
message,
Some(AskForApproval::OnRequest),
Some(SandboxPolicy::ReadOnly),
)
}
fn no_trigger_cmd_approval(codex_bin: String) -> Result<()> {
let prompt = "Run `touch should_not_trigger_approval.txt`";
send_message_v2_with_policies(codex_bin, prompt.to_string(), None, None)
}
fn send_message_v2_with_policies(
codex_bin: String,
user_message: String,
approval_policy: Option<AskForApproval>,
sandbox_policy: Option<SandboxPolicy>,
) -> Result<()> {
let mut client = CodexClient::spawn(codex_bin)?;
let initialize = client.initialize()?;
println!("< initialize response: {initialize:?}");
let thread_response = client.thread_start(ThreadStartParams::default())?;
println!("< thread/start response: {thread_response:?}");
let mut turn_params = TurnStartParams {
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text { text: user_message }],
..Default::default()
};
turn_params.approval_policy = approval_policy;
turn_params.sandbox_policy = sandbox_policy;
let turn_response = client.turn_start(turn_params)?;
println!("< turn/start response: {turn_response:?}");
client.stream_turn(&thread_response.thread.id, &turn_response.turn.id)?;
Ok(())
}
fn send_follow_up_v2(
codex_bin: String,
first_message: String,
follow_up_message: String,
) -> Result<()> {
let mut client = CodexClient::spawn(codex_bin)?;
let initialize = client.initialize()?;
println!("< initialize response: {initialize:?}");
let thread_response = client.thread_start(ThreadStartParams::default())?;
println!("< thread/start response: {thread_response:?}");
let first_turn_params = TurnStartParams {
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: first_message,
}],
..Default::default()
};
let first_turn_response = client.turn_start(first_turn_params)?;
println!("< turn/start response (initial): {first_turn_response:?}");
client.stream_turn(&thread_response.thread.id, &first_turn_response.turn.id)?;
let follow_up_params = TurnStartParams {
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: follow_up_message,
}],
..Default::default()
};
let follow_up_response = client.turn_start(follow_up_params)?;
println!("< turn/start response (follow-up): {follow_up_response:?}");
client.stream_turn(&thread_response.thread.id, &follow_up_response.turn.id)?;
Ok(())
}
fn test_login(codex_bin: String) -> Result<()> {
let mut client = CodexClient::spawn(codex_bin)?;
let initialize = client.initialize()?;
println!("< initialize response: {initialize:?}");
let login_response = client.login_chat_gpt()?;
println!("< loginChatGpt response: {login_response:?}");
println!(
"Open the following URL in your browser to continue:\n{}",
login_response.auth_url
);
let completion = client.wait_for_login_completion(&login_response.login_id)?;
println!("< loginChatGptComplete notification: {completion:?}");
if completion.success {
println!("Login succeeded.");
Ok(())
} else {
bail!(
"login failed: {}",
completion
.error
.as_deref()
.unwrap_or("unknown error from loginChatGptComplete")
);
}
}
fn get_account_rate_limits(codex_bin: String) -> Result<()> {
let mut client = CodexClient::spawn(codex_bin)?;
let initialize = client.initialize()?;
println!("< initialize response: {initialize:?}");
let response = client.get_account_rate_limits()?;
println!("< account/rateLimits/read response: {response:?}");
Ok(())
}
struct CodexClient {
child: Child,
stdin: Option<ChildStdin>,
stdout: BufReader<ChildStdout>,
pending_notifications: VecDeque<JSONRPCNotification>,
}
impl CodexClient {
fn spawn(codex_bin: String) -> Result<Self> {
let mut codex_app_server = Command::new(&codex_bin)
.arg("app-server")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::inherit())
.spawn()
.with_context(|| format!("failed to start `{codex_bin}` app-server"))?;
let stdin = codex_app_server
.stdin
.take()
.context("codex app-server stdin unavailable")?;
let stdout = codex_app_server
.stdout
.take()
.context("codex app-server stdout unavailable")?;
Ok(Self {
child: codex_app_server,
stdin: Some(stdin),
stdout: BufReader::new(stdout),
pending_notifications: VecDeque::new(),
})
}
fn initialize(&mut self) -> Result<InitializeResponse> {
let request_id = self.request_id();
let request = ClientRequest::Initialize {
request_id: request_id.clone(),
params: InitializeParams {
client_info: ClientInfo {
name: "codex-toy-app-server".to_string(),
title: Some("Codex Toy App Server".to_string()),
version: env!("CARGO_PKG_VERSION").to_string(),
},
},
};
self.send_request(request, request_id, "initialize")
}
fn new_conversation(&mut self) -> Result<NewConversationResponse> {
let request_id = self.request_id();
let request = ClientRequest::NewConversation {
request_id: request_id.clone(),
params: NewConversationParams::default(),
};
self.send_request(request, request_id, "newConversation")
}
fn add_conversation_listener(
&mut self,
conversation_id: &ConversationId,
) -> Result<AddConversationSubscriptionResponse> {
let request_id = self.request_id();
let request = ClientRequest::AddConversationListener {
request_id: request_id.clone(),
params: AddConversationListenerParams {
conversation_id: *conversation_id,
experimental_raw_events: false,
},
};
self.send_request(request, request_id, "addConversationListener")
}
fn remove_conversation_listener(&mut self, subscription_id: Uuid) -> Result<()> {
let request_id = self.request_id();
let request = ClientRequest::RemoveConversationListener {
request_id: request_id.clone(),
params: codex_app_server_protocol::RemoveConversationListenerParams { subscription_id },
};
self.send_request::<codex_app_server_protocol::RemoveConversationSubscriptionResponse>(
request,
request_id,
"removeConversationListener",
)?;
Ok(())
}
fn send_user_message(
&mut self,
conversation_id: &ConversationId,
message: &str,
) -> Result<SendUserMessageResponse> {
let request_id = self.request_id();
let request = ClientRequest::SendUserMessage {
request_id: request_id.clone(),
params: SendUserMessageParams {
conversation_id: *conversation_id,
items: vec![InputItem::Text {
text: message.to_string(),
}],
},
};
self.send_request(request, request_id, "sendUserMessage")
}
fn thread_start(&mut self, params: ThreadStartParams) -> Result<ThreadStartResponse> {
let request_id = self.request_id();
let request = ClientRequest::ThreadStart {
request_id: request_id.clone(),
params,
};
self.send_request(request, request_id, "thread/start")
}
fn turn_start(&mut self, params: TurnStartParams) -> Result<TurnStartResponse> {
let request_id = self.request_id();
let request = ClientRequest::TurnStart {
request_id: request_id.clone(),
params,
};
self.send_request(request, request_id, "turn/start")
}
fn login_chat_gpt(&mut self) -> Result<LoginChatGptResponse> {
let request_id = self.request_id();
let request = ClientRequest::LoginChatGpt {
request_id: request_id.clone(),
params: None,
};
self.send_request(request, request_id, "loginChatGpt")
}
fn get_account_rate_limits(&mut self) -> Result<GetAccountRateLimitsResponse> {
let request_id = self.request_id();
let request = ClientRequest::GetAccountRateLimits {
request_id: request_id.clone(),
params: None,
};
self.send_request(request, request_id, "account/rateLimits/read")
}
fn stream_conversation(&mut self, conversation_id: &ConversationId) -> Result<()> {
loop {
let notification = self.next_notification()?;
if !notification.method.starts_with("codex/event/") {
continue;
}
if let Some(event) = self.extract_event(notification, conversation_id)? {
match &event.msg {
EventMsg::AgentMessage(event) => {
println!("{}", event.message);
}
EventMsg::AgentMessageDelta(event) => {
print!("{}", event.delta);
std::io::stdout().flush().ok();
}
EventMsg::TaskComplete(event) => {
println!("\n[task complete: {event:?}]");
break;
}
EventMsg::TurnAborted(event) => {
println!("\n[turn aborted: {:?}]", event.reason);
break;
}
EventMsg::Error(event) => {
println!("[error] {event:?}");
}
_ => {
println!("[UNKNOWN EVENT] {:?}", event.msg);
}
}
}
}
Ok(())
}
fn wait_for_login_completion(
&mut self,
expected_login_id: &Uuid,
) -> Result<LoginChatGptCompleteNotification> {
loop {
let notification = self.next_notification()?;
if let Ok(server_notification) = ServerNotification::try_from(notification) {
match server_notification {
ServerNotification::LoginChatGptComplete(completion) => {
if &completion.login_id == expected_login_id {
return Ok(completion);
}
println!(
"[ignoring loginChatGptComplete for unexpected login_id: {}]",
completion.login_id
);
}
ServerNotification::AuthStatusChange(status) => {
println!("< authStatusChange notification: {status:?}");
}
ServerNotification::AccountRateLimitsUpdated(snapshot) => {
println!("< accountRateLimitsUpdated notification: {snapshot:?}");
}
ServerNotification::SessionConfigured(_) => {
// SessionConfigured notifications are unrelated to login; skip.
}
_ => {}
}
}
// Not a server notification (likely a conversation event); keep waiting.
}
}
fn stream_turn(&mut self, thread_id: &str, turn_id: &str) -> Result<()> {
loop {
let notification = self.next_notification()?;
let Ok(server_notification) = ServerNotification::try_from(notification) else {
continue;
};
match server_notification {
ServerNotification::ThreadStarted(payload) => {
if payload.thread.id == thread_id {
println!("< thread/started notification: {:?}", payload.thread);
}
}
ServerNotification::TurnStarted(payload) => {
if payload.turn.id == turn_id {
println!("< turn/started notification: {:?}", payload.turn.status);
}
}
ServerNotification::AgentMessageDelta(delta) => {
print!("{}", delta.delta);
std::io::stdout().flush().ok();
}
ServerNotification::CommandExecutionOutputDelta(delta) => {
print!("{}", delta.delta);
std::io::stdout().flush().ok();
}
ServerNotification::ItemStarted(payload) => {
println!("\n< item started: {:?}", payload.item);
}
ServerNotification::ItemCompleted(payload) => {
println!("< item completed: {:?}", payload.item);
}
ServerNotification::TurnCompleted(payload) => {
if payload.turn.id == turn_id {
println!("\n< turn/completed notification: {:?}", payload.turn.status);
if payload.turn.status == TurnStatus::Failed
&& let Some(error) = payload.turn.error
{
println!("[turn error] {}", error.message);
}
break;
}
}
ServerNotification::McpToolCallProgress(payload) => {
println!("< MCP tool progress: {}", payload.message);
}
_ => {
println!("[UNKNOWN SERVER NOTIFICATION] {server_notification:?}");
}
}
}
Ok(())
}
fn extract_event(
&self,
notification: JSONRPCNotification,
conversation_id: &ConversationId,
) -> Result<Option<Event>> {
let params = notification
.params
.context("event notification missing params")?;
let mut map = match params {
Value::Object(map) => map,
other => bail!("unexpected params shape: {other:?}"),
};
let conversation_value = map
.remove("conversationId")
.context("event missing conversationId")?;
let notification_conversation: ConversationId = serde_json::from_value(conversation_value)
.context("conversationId was not a valid UUID")?;
if &notification_conversation != conversation_id {
return Ok(None);
}
let event_value = Value::Object(map);
let event: Event =
serde_json::from_value(event_value).context("failed to decode event payload")?;
Ok(Some(event))
}
fn send_request<T>(
&mut self,
request: ClientRequest,
request_id: RequestId,
method: &str,
) -> Result<T>
where
T: DeserializeOwned,
{
self.write_request(&request)?;
self.wait_for_response(request_id, method)
}
fn write_request(&mut self, request: &ClientRequest) -> Result<()> {
let request_json = serde_json::to_string(request)?;
let request_pretty = serde_json::to_string_pretty(request)?;
print_multiline_with_prefix("> ", &request_pretty);
if let Some(stdin) = self.stdin.as_mut() {
writeln!(stdin, "{request_json}")?;
stdin
.flush()
.context("failed to flush request to codex app-server")?;
} else {
bail!("codex app-server stdin closed");
}
Ok(())
}
fn wait_for_response<T>(&mut self, request_id: RequestId, method: &str) -> Result<T>
where
T: DeserializeOwned,
{
loop {
let message = self.read_jsonrpc_message()?;
match message {
JSONRPCMessage::Response(JSONRPCResponse { id, result }) => {
if id == request_id {
return serde_json::from_value(result)
.with_context(|| format!("{method} response missing payload"));
}
}
JSONRPCMessage::Error(err) => {
if err.id == request_id {
bail!("{method} failed: {err:?}");
}
}
JSONRPCMessage::Notification(notification) => {
self.pending_notifications.push_back(notification);
}
JSONRPCMessage::Request(request) => {
self.handle_server_request(request)?;
}
}
}
}
fn next_notification(&mut self) -> Result<JSONRPCNotification> {
if let Some(notification) = self.pending_notifications.pop_front() {
return Ok(notification);
}
loop {
let message = self.read_jsonrpc_message()?;
match message {
JSONRPCMessage::Notification(notification) => return Ok(notification),
JSONRPCMessage::Response(_) | JSONRPCMessage::Error(_) => {
// No outstanding requests, so ignore stray responses/errors for now.
continue;
}
JSONRPCMessage::Request(request) => {
self.handle_server_request(request)?;
}
}
}
}
fn read_jsonrpc_message(&mut self) -> Result<JSONRPCMessage> {
loop {
let mut response_line = String::new();
let bytes = self
.stdout
.read_line(&mut response_line)
.context("failed to read from codex app-server")?;
if bytes == 0 {
bail!("codex app-server closed stdout");
}
let trimmed = response_line.trim();
if trimmed.is_empty() {
continue;
}
let parsed: Value =
serde_json::from_str(trimmed).context("response was not valid JSON-RPC")?;
let pretty = serde_json::to_string_pretty(&parsed)?;
print_multiline_with_prefix("< ", &pretty);
let message: JSONRPCMessage = serde_json::from_value(parsed)
.context("response was not a valid JSON-RPC message")?;
return Ok(message);
}
}
fn request_id(&self) -> RequestId {
RequestId::String(Uuid::new_v4().to_string())
}
fn handle_server_request(&mut self, request: JSONRPCRequest) -> Result<()> {
let server_request = ServerRequest::try_from(request)
.context("failed to deserialize ServerRequest from JSONRPCRequest")?;
match server_request {
ServerRequest::CommandExecutionRequestApproval { request_id, params } => {
self.handle_command_execution_request_approval(request_id, params)?;
}
ServerRequest::FileChangeRequestApproval { request_id, params } => {
self.approve_file_change_request(request_id, params)?;
}
other => {
bail!("received unsupported server request: {other:?}");
}
}
Ok(())
}
fn handle_command_execution_request_approval(
&mut self,
request_id: RequestId,
params: CommandExecutionRequestApprovalParams,
) -> Result<()> {
let CommandExecutionRequestApprovalParams {
thread_id,
turn_id,
item_id,
reason,
risk,
} = params;
println!(
"\n< commandExecution approval requested for thread {thread_id}, turn {turn_id}, item {item_id}"
);
if let Some(reason) = reason.as_deref() {
println!("< reason: {reason}");
}
if let Some(risk) = risk.as_ref() {
println!("< risk assessment: {risk:?}");
}
let response = CommandExecutionRequestApprovalResponse {
decision: ApprovalDecision::Accept,
accept_settings: Some(CommandExecutionRequestAcceptSettings { for_session: false }),
};
self.send_server_request_response(request_id, &response)?;
println!("< approved commandExecution request for item {item_id}");
Ok(())
}
fn approve_file_change_request(
&mut self,
request_id: RequestId,
params: FileChangeRequestApprovalParams,
) -> Result<()> {
let FileChangeRequestApprovalParams {
thread_id,
turn_id,
item_id,
reason,
grant_root,
} = params;
println!(
"\n< fileChange approval requested for thread {thread_id}, turn {turn_id}, item {item_id}"
);
if let Some(reason) = reason.as_deref() {
println!("< reason: {reason}");
}
if let Some(grant_root) = grant_root.as_deref() {
println!("< grant root: {}", grant_root.display());
}
let response = FileChangeRequestApprovalResponse {
decision: ApprovalDecision::Accept,
};
self.send_server_request_response(request_id, &response)?;
println!("< approved fileChange request for item {item_id}");
Ok(())
}
fn send_server_request_response<T>(&mut self, request_id: RequestId, response: &T) -> Result<()>
where
T: Serialize,
{
let message = JSONRPCMessage::Response(JSONRPCResponse {
id: request_id,
result: serde_json::to_value(response)?,
});
self.write_jsonrpc_message(message)
}
fn write_jsonrpc_message(&mut self, message: JSONRPCMessage) -> Result<()> {
let payload = serde_json::to_string(&message)?;
let pretty = serde_json::to_string_pretty(&message)?;
print_multiline_with_prefix("> ", &pretty);
if let Some(stdin) = self.stdin.as_mut() {
writeln!(stdin, "{payload}")?;
stdin
.flush()
.context("failed to flush response to codex app-server")?;
return Ok(());
}
bail!("codex app-server stdin closed")
}
}
fn print_multiline_with_prefix(prefix: &str, payload: &str) {
for line in payload.lines() {
println!("{prefix}{line}");
}
}
impl Drop for CodexClient {
fn drop(&mut self) {
let _ = self.stdin.take();
if let Ok(Some(status)) = self.child.try_wait() {
println!("[codex app-server exited: {status}]");
return;
}
thread::sleep(Duration::from_millis(100));
if let Ok(Some(status)) = self.child.try_wait() {
println!("[codex app-server exited: {status}]");
return;
}
let _ = self.child.kill();
let _ = self.child.wait();
}
}

View File

@@ -1,58 +0,0 @@
[package]
name = "codex-app-server"
version.workspace = true
edition.workspace = true
license.workspace = true
[[bin]]
name = "codex-app-server"
path = "src/main.rs"
[lib]
name = "codex_app_server"
path = "src/lib.rs"
[lints]
workspace = true
[dependencies]
anyhow = { workspace = true }
codex-arg0 = { workspace = true }
codex-common = { workspace = true, features = ["cli"] }
codex-core = { workspace = true }
codex-backend-client = { workspace = true }
codex-file-search = { workspace = true }
codex-login = { workspace = true }
codex-protocol = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-feedback = { workspace = true }
codex-utils-json-to-toml = { workspace = true }
chrono = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
sha2 = { workspace = true }
tempfile = { workspace = true }
toml = { workspace = true }
tokio = { workspace = true, features = [
"io-std",
"macros",
"process",
"rt-multi-thread",
"signal",
] }
tracing = { workspace = true, features = ["log"] }
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
opentelemetry-appender-tracing = { workspace = true }
uuid = { workspace = true, features = ["serde", "v7"] }
[dev-dependencies]
app_test_support = { workspace = true }
assert_cmd = { workspace = true }
base64 = { workspace = true }
core_test_support = { workspace = true }
mcp-types = { workspace = true }
os_info = { workspace = true }
pretty_assertions = { workspace = true }
serial_test = { workspace = true }
wiremock = { workspace = true }
shlex = { workspace = true }

View File

@@ -1,444 +0,0 @@
# codex-app-server
`codex app-server` is the interface Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt).
## Table of Contents
- [Protocol](#protocol)
- [Message Schema](#message-schema)
- [Lifecycle Overview](#lifecycle-overview)
- [Initialization](#initialization)
- [Core primitives](#core-primitives)
- [Thread & turn endpoints](#thread--turn-endpoints)
- [Events (work-in-progress)](#events-work-in-progress)
- [Auth endpoints](#auth-endpoints)
## Protocol
Similar to [MCP](https://modelcontextprotocol.io/), `codex app-server` supports bidirectional communication, streaming JSONL over stdio. The protocol is JSON-RPC 2.0, though the `"jsonrpc":"2.0"` header is omitted.
## Message Schema
Currently, you can dump a TypeScript version of the schema using `codex app-server generate-ts`, or a JSON Schema bundle via `codex app-server generate-json-schema`. Each output is specific to the version of Codex you used to run the command, so the generated artifacts are guaranteed to match that version.
```
codex app-server generate-ts --out DIR
codex app-server generate-json-schema --out DIR
```
## Lifecycle Overview
- Initialize once: Immediately after launching the codex app-server process, send an `initialize` request with your client metadata, then emit an `initialized` notification. Any other request before this handshake gets rejected.
- Start (or resume) a thread: Call `thread/start` to open a fresh conversation. The response returns the thread object and youll also get a `thread/started` notification. If youre continuing an existing conversation, call `thread/resume` with its ID instead.
- Begin a turn: To send user input, call `turn/start` with the target `threadId` and the user's input. Optional fields let you override model, cwd, sandbox policy, etc. This immediately returns the new turn object and triggers a `turn/started` notification.
- Stream events: After `turn/start`, keep reading JSON-RPC notifications on stdout. Youll see `item/started`, `item/completed`, deltas like `item/agentMessage/delta`, tool progress, etc. These represent streaming model output plus any side effects (commands, tool calls, reasoning notes).
- Finish the turn: When the model is done (or the turn is interrupted via making the `turn/interrupt` call), the server sends `turn/completed` with the final turn state and token usage.
## Initialization
Clients must send a single `initialize` request before invoking any other method, then acknowledge with an `initialized` notification. The server returns the user agent string it will present to upstream services; subsequent requests issued before initialization receive a `"Not initialized"` error, and repeated `initialize` calls receive an `"Already initialized"` error.
Example:
```json
{ "method": "initialize", "id": 0, "params": {
"clientInfo": { "name": "codex-vscode", "title": "Codex VS Code Extension", "version": "0.1.0" }
} }
{ "id": 0, "result": { "userAgent": "codex-app-server/0.1.0 codex-vscode/0.1.0" } }
{ "method": "initialized" }
```
## Core primitives
We have 3 top level primitives:
- Thread - a conversation between the Codex agent and a user. Each thread contains multiple turns.
- Turn - one turn of the conversation, typically starting with a user message and finishing with an agent message. Each turn contains multiple items.
- Item - represents user inputs and agent outputs as part of the turn, persisted and used as the context for future conversations.
## Thread & turn endpoints
The JSON-RPC API exposes dedicated methods for managing Codex conversations. Threads store long-lived conversation metadata, and turns store the per-message exchange (input → Codex output, including streamed items). Use the thread APIs to create, list, or archive sessions, then drive the conversation with turn APIs and notifications.
### Quick reference
- `thread/start` — create a new thread; emits `thread/started` and auto-subscribes you to turn/item events for that thread.
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it.
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders` filtering.
- `thread/archive` — move a threads rollout file into the archived directory; returns `{}` on success.
- `turn/start` — add user input to a thread and begin Codex generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications.
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
- `review/start` — kick off Codexs automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
### 1) Start or resume a thread
Start a fresh thread when you need a new Codex conversation.
```json
{ "method": "thread/start", "id": 10, "params": {
// Optionally set config settings. If not specified, will use the user's
// current config settings.
"model": "gpt-5.1-codex",
"cwd": "/Users/me/project",
"approvalPolicy": "never",
"sandbox": "workspaceWrite",
} }
{ "id": 10, "result": {
"thread": {
"id": "thr_123",
"preview": "",
"modelProvider": "openai",
"createdAt": 1730910000
}
} }
{ "method": "thread/started", "params": { "thread": { } } }
```
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted:
```json
{ "method": "thread/resume", "id": 11, "params": { "threadId": "thr_123" } }
{ "id": 11, "result": { "thread": { "id": "thr_123", } } }
```
### 2) List threads (pagination & filters)
`thread/list` lets you render a history UI. Pass any combination of:
- `cursor` — opaque string from a prior response; omit for the first page.
- `limit` — server defaults to a reasonable page size if unset.
- `modelProviders` — restrict results to specific providers; unset, null, or an empty array will include all providers.
Example:
```json
{ "method": "thread/list", "id": 20, "params": {
"cursor": null,
"limit": 25,
} }
{ "id": 20, "result": {
"data": [
{ "id": "thr_a", "preview": "Create a TUI", "modelProvider": "openai", "createdAt": 1730831111 },
{ "id": "thr_b", "preview": "Fix tests", "modelProvider": "openai", "createdAt": 1730750000 }
],
"nextCursor": "opaque-token-or-null"
} }
```
When `nextCursor` is `null`, youve reached the final page.
### 3) Archive a thread
Use `thread/archive` to move the persisted rollout (stored as a JSONL file on disk) into the archived sessions directory.
```json
{ "method": "thread/archive", "id": 21, "params": { "threadId": "thr_b" } }
{ "id": 21, "result": {} }
```
An archived thread will not appear in future calls to `thread/list`.
### 4) Start a turn (send user input)
Turns attach user input (text or images) to a thread and trigger Codex generation. The `input` field is a list of discriminated unions:
- `{"type":"text","text":"Explain this diff"}`
- `{"type":"image","url":"https://…png"}`
- `{"type":"localImage","path":"/tmp/screenshot.png"}`
You can optionally specify config overrides on the new turn. If specified, these settings become the default for subsequent turns on the same thread.
```json
{ "method": "turn/start", "id": 30, "params": {
"threadId": "thr_123",
"input": [ { "type": "text", "text": "Run tests" } ],
// Below are optional config overrides
"cwd": "/Users/me/project",
"approvalPolicy": "unlessTrusted",
"sandboxPolicy": {
"mode": "workspaceWrite",
"writableRoots": ["/Users/me/project"],
"networkAccess": true
},
"model": "gpt-5.1-codex",
"effort": "medium",
"summary": "concise"
} }
{ "id": 30, "result": { "turn": {
"id": "turn_456",
"status": "inProgress",
"items": [],
"error": null
} } }
```
### 5) Interrupt an active turn
You can cancel a running Turn with `turn/interrupt`.
```json
{ "method": "turn/interrupt", "id": 31, "params": {
"threadId": "thr_123",
"turnId": "turn_456"
} }
{ "id": 31, "result": {} }
```
The server requests cancellations for running subprocesses, then emits a `turn/completed` event with `status: "interrupted"`. Rely on the `turn/completed` to know when Codex-side cleanup is done.
### 6) Request a code review
Use `review/start` to run Codexs reviewer on the currently checked-out project. The request takes the thread id plus a `target` describing what should be reviewed:
- `{"type":"uncommittedChanges"}` — staged, unstaged, and untracked files.
- `{"type":"baseBranch","branch":"main"}` — diff against the provided branchs upstream (see prompt for the exact `git merge-base`/`git diff` instructions Codex will run).
- `{"type":"commit","sha":"abc1234","title":"Optional subject"}` — review a specific commit.
- `{"type":"custom","instructions":"Free-form reviewer instructions"}` — fallback prompt equivalent to the legacy manual review request.
- `delivery` (`"inline"` or `"detached"`, default `"inline"`) — where the review runs:
- `"inline"`: run the review as a new turn on the existing thread. The responses `reviewThreadId` equals the original `threadId`, and no new `thread/started` notification is emitted.
- `"detached"`: fork a new review thread from the parent conversation and run the review there. The responses `reviewThreadId` is the id of this new review thread, and the server emits a `thread/started` notification for it before streaming review items.
Example request/response:
```json
{ "method": "review/start", "id": 40, "params": {
"threadId": "thr_123",
"delivery": "inline",
"target": { "type": "commit", "sha": "1234567deadbeef", "title": "Polish tui colors" }
} }
{ "id": 40, "result": {
"turn": {
"id": "turn_900",
"status": "inProgress",
"items": [
{ "type": "userMessage", "id": "turn_900", "content": [ { "type": "text", "text": "Review commit 1234567: Polish tui colors" } ] }
],
"error": null
},
"reviewThreadId": "thr_123"
} }
```
For a detached review, use `"delivery": "detached"`. The response is the same shape, but `reviewThreadId` will be the id of the new review thread (different from the original `threadId`). The server also emits a `thread/started` notification for that new thread before streaming the review turn.
Codex streams the usual `turn/started` notification followed by an `item/started`
with an `enteredReviewMode` item so clients can show progress:
```json
{ "method": "item/started", "params": { "item": {
"type": "enteredReviewMode",
"id": "turn_900",
"review": "current changes"
} } }
```
When the reviewer finishes, the server emits `item/started` and `item/completed`
containing an `exitedReviewMode` item with the final review text:
```json
{ "method": "item/completed", "params": { "item": {
"type": "exitedReviewMode",
"id": "turn_900",
"review": "Looks solid overall...\n\n- Prefer Stylize helpers — app.rs:10-20\n ..."
} } }
```
The `review` string is plain text that already bundles the overall explanation plus a bullet list for each structured finding (matching `ThreadItem::ExitedReviewMode` in the generated schema). Use this notification to render the reviewer output in your client.
### 7) One-off command execution
Run a standalone command (argv vector) in the servers sandbox without creating a thread or turn:
```json
{ "method": "command/exec", "id": 32, "params": {
"command": ["ls", "-la"],
"cwd": "/Users/me/project", // optional; defaults to server cwd
"sandboxPolicy": { "type": "workspaceWrite" }, // optional; defaults to user config
"timeoutMs": 10000 // optional; ms timeout; defaults to server timeout
} }
{ "id": 32, "result": { "exitCode": 0, "stdout": "...", "stderr": "" } }
```
Notes:
- Empty `command` arrays are rejected.
- `sandboxPolicy` accepts the same shape used by `turn/start` (e.g., `dangerFullAccess`, `readOnly`, `workspaceWrite` with flags).
- When omitted, `timeoutMs` falls back to the server default.
## Events (work-in-progress)
Event notifications are the server-initiated event stream for thread lifecycles, turn lifecycles, and the items within them. After you start or resume a thread, keep reading stdout for `thread/started`, `turn/*`, and `item/*` notifications.
### Turn events
The app-server streams JSON-RPC notifications while a turn is running. Each turn starts with `turn/started` (initial `turn`) and ends with `turn/completed` (final `turn` status). Token usage events stream separately via `thread/tokenUsage/updated`. Clients subscribe to the events they care about, rendering each item incrementally as updates arrive. The per-item lifecycle is always: `item/started` → zero or more item-specific deltas → `item/completed`.
- `turn/started``{ turn }` with the turn id, empty `items`, and `status: "inProgress"`.
- `turn/completed``{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`.
- `turn/plan/updated``{ turnId, explanation?, plan }` whenever the agent shares or changes its plan; each `plan` entry is `{ step, status }` with `status` in `pending`, `inProgress`, or `completed`.
Today both notifications carry an empty `items` array even when item events were streamed; rely on `item/*` notifications for the canonical item list until this is fixed.
#### Thread items
`ThreadItem` is the tagged union carried in turn responses and `item/*` notifications. Currently we support events for the following items:
- `userMessage``{id, content}` where `content` is a list of user inputs (`text`, `image`, or `localImage`).
- `agentMessage``{id, text}` containing the accumulated agent reply.
- `reasoning``{id, summary, content}` where `summary` holds streamed reasoning summaries (applicable for most OpenAI models) and `content` holds raw reasoning blocks (applicable for e.g. open source models).
- `commandExecution``{id, command, cwd, status, commandActions, aggregatedOutput?, exitCode?, durationMs?}` for sandboxed commands; `status` is `inProgress`, `completed`, `failed`, or `declined`.
- `fileChange``{id, changes, status}` describing proposed edits; `changes` list `{path, kind, diff}` and `status` is `inProgress`, `completed`, `failed`, or `declined`.
- `mcpToolCall``{id, server, tool, status, arguments, result?, error?}` describing MCP calls; `status` is `inProgress`, `completed`, or `failed`.
- `webSearch``{id, query}` for a web search request issued by the agent.
- `compacted` - `{threadId, turnId}` when codex compacts the conversation history. This can happen automatically.
All items emit two shared lifecycle events:
- `item/started` — emits the full `item` when a new unit of work begins so the UI can render it immediately; the `item.id` in this payload matches the `itemId` used by deltas.
- `item/completed` — sends the final `item` once that work finishes (e.g., after a tool call or message completes); treat this as the authoritative state.
There are additional item-specific events:
#### agentMessage
- `item/agentMessage/delta` — appends streamed text for the agent message; concatenate `delta` values for the same `itemId` in order to reconstruct the full reply.
#### reasoning
- `item/reasoning/summaryTextDelta` — streams readable reasoning summaries; `summaryIndex` increments when a new summary section opens.
- `item/reasoning/summaryPartAdded` — marks the boundary between reasoning summary sections for an `itemId`; subsequent `summaryTextDelta` entries share the same `summaryIndex`.
- `item/reasoning/textDelta` — streams raw reasoning text (only applicable for e.g. open source models); use `contentIndex` to group deltas that belong together before showing them in the UI.
#### commandExecution
- `item/commandExecution/outputDelta` — streams stdout/stderr for the command; append deltas in order to render live output alongside `aggregatedOutput` in the final item.
Final `commandExecution` items include parsed `commandActions`, `status`, `exitCode`, and `durationMs` so the UI can summarize what ran and whether it succeeded.
#### fileChange
`fileChange` items contain a `changes` list with `{path, kind, diff}` entries (`kind` is `add`, `delete`, or `update` with an optional `movePath`). The `status` tracks whether apply succeeded (`completed`), failed, or was `declined`.
### Errors
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
`codexErrorInfo` maps to the `CodexErrorInfo` enum. Common values:
- `ContextWindowExceeded`
- `UsageLimitExceeded`
- `HttpConnectionFailed { httpStatusCode? }`: upstream HTTP failures including 4xx/5xx
- `ResponseStreamConnectionFailed { httpStatusCode? }`: failure to connect to the response SSE stream
- `ResponseStreamDisconnected { httpStatusCode? }`: disconnect of the response SSE stream in the middle of a turn before completion
- `ResponseTooManyFailedAttempts { httpStatusCode? }`
- `BadRequest`
- `Unauthorized`
- `SandboxError`
- `InternalServerError`
- `Other`: all unclassified errors
When an upstream HTTP status is available (for example, from the Responses API or a provider), it is forwarded in `httpStatusCode` on the relevant `codexErrorInfo` variant.
## Approvals
Certain actions (shell commands or modifying files) may require explicit user approval depending on the user's config. When `turn/start` is used, the app-server drives an approval flow by sending a server-initiated JSON-RPC request to the client. The client must respond to tell Codex whether to proceed. UIs should present these requests inline with the active turn so users can review the proposed command or diff before choosing.
- Requests include `threadId` and `turnId`—use them to scope UI state to the active conversation.
- Respond with a single `{ "decision": "accept" | "decline" }` payload (plus optional `acceptSettings` on command executions). The server resumes or declines the work and ends the item with `item/completed`.
### Command execution approvals
Order of messages:
1. `item/started` — shows the pending `commandExecution` item with `command`, `cwd`, and other fields so you can render the proposed action.
2. `item/commandExecution/requestApproval` (request) — carries the same `itemId`, `threadId`, `turnId`, optionally `reason` or `risk`, plus `parsedCmd` for friendly display.
3. Client response — `{ "decision": "accept", "acceptSettings": { "forSession": false } }` or `{ "decision": "decline" }`.
4. `item/completed` — final `commandExecution` item with `status: "completed" | "failed" | "declined"` and execution output. Render this as the authoritative result.
### File change approvals
Order of messages:
1. `item/started` — emits a `fileChange` item with `changes` (diff chunk summaries) and `status: "inProgress"`. Show the proposed edits and paths to the user.
2. `item/fileChange/requestApproval` (request) — includes `itemId`, `threadId`, `turnId`, and an optional `reason`.
3. Client response — `{ "decision": "accept" }` or `{ "decision": "decline" }`.
4. `item/completed` — returns the same `fileChange` item with `status` updated to `completed`, `failed`, or `declined` after the patch attempt. Rely on this to show success/failure and finalize the diff state in your UI.
UI guidance for IDEs: surface an approval dialog as soon as the request arrives. The turn will proceed after the server receives a response to the approval request. The terminal `item/completed` notification will be sent with the appropriate status.
## Auth endpoints
The JSON-RPC auth/account surface exposes request/response methods plus server-initiated notifications (no `id`). Use these to determine auth state, start or cancel logins, logout, and inspect ChatGPT rate limits.
### Quick reference
- `account/read` — fetch current account info; optionally refresh tokens.
- `account/login/start` — begin login (`apiKey` or `chatgpt`).
- `account/login/completed` (notify) — emitted when a login attempt finishes (success or error).
- `account/login/cancel` — cancel a pending ChatGPT login by `loginId`.
- `account/logout` — sign out; triggers `account/updated`.
- `account/updated` (notify) — emitted whenever auth mode changes (`authMode`: `apikey`, `chatgpt`, or `null`).
- `account/rateLimits/read` — fetch ChatGPT rate limits; updates arrive via `account/rateLimits/updated` (notify).
### 1) Check auth state
Request:
```json
{ "method": "account/read", "id": 1, "params": { "refreshToken": false } }
```
Response examples:
```json
{ "id": 1, "result": { "account": null, "requiresOpenaiAuth": false } } // No OpenAI auth needed (e.g., OSS/local models)
{ "id": 1, "result": { "account": null, "requiresOpenaiAuth": true } } // OpenAI auth required (typical for OpenAI-hosted models)
{ "id": 1, "result": { "account": { "type": "apiKey" }, "requiresOpenaiAuth": true } }
{ "id": 1, "result": { "account": { "type": "chatgpt", "email": "user@example.com", "planType": "pro" }, "requiresOpenaiAuth": true } }
```
Field notes:
- `refreshToken` (bool): set `true` to force a token refresh.
- `requiresOpenaiAuth` reflects the active provider; when `false`, Codex can run without OpenAI credentials.
### 2) Log in with an API key
1. Send:
```json
{ "method": "account/login/start", "id": 2, "params": { "type": "apiKey", "apiKey": "sk-…" } }
```
2. Expect:
```json
{ "id": 2, "result": { "type": "apiKey" } }
```
3. Notifications:
```json
{ "method": "account/login/completed", "params": { "loginId": null, "success": true, "error": null } }
{ "method": "account/updated", "params": { "authMode": "apikey" } }
```
### 3) Log in with ChatGPT (browser flow)
1. Start:
```json
{ "method": "account/login/start", "id": 3, "params": { "type": "chatgpt" } }
{ "id": 3, "result": { "type": "chatgpt", "loginId": "<uuid>", "authUrl": "https://chatgpt.com/…&redirect_uri=http%3A%2F%2Flocalhost%3A<port>%2Fauth%2Fcallback" } }
```
2. Open `authUrl` in a browser; the app-server hosts the local callback.
3. Wait for notifications:
```json
{ "method": "account/login/completed", "params": { "loginId": "<uuid>", "success": true, "error": null } }
{ "method": "account/updated", "params": { "authMode": "chatgpt" } }
```
### 4) Cancel a ChatGPT login
```json
{ "method": "account/login/cancel", "id": 4, "params": { "loginId": "<uuid>" } }
{ "method": "account/login/completed", "params": { "loginId": "<uuid>", "success": false, "error": "…" } }
```
### 5) Logout
```json
{ "method": "account/logout", "id": 5 }
{ "id": 5, "result": {} }
{ "method": "account/updated", "params": { "authMode": null } }
```
### 6) Rate limits (ChatGPT)
```json
{ "method": "account/rateLimits/read", "id": 6 }
{ "id": 6, "result": { "rateLimits": { "primary": { "usedPercent": 25, "windowDurationMins": 15, "resetsAt": 1730947200 }, "secondary": null } } }
{ "method": "account/rateLimits/updated", "params": { "rateLimits": { … } } }
```
Field notes:
- `usedPercent` is current usage within the OpenAI quota window.
- `windowDurationMins` is the quota window length.
- `resetsAt` is a Unix timestamp (seconds) for the next reset.
### Dev notes
- `codex app-server generate-ts --out <dir>` emits v2 types under `v2/`.
- `codex app-server generate-json-schema --out <dir>` outputs `codex_app_server_protocol.schemas.json`.
- See [“Authentication and authorization” in the config docs](../../docs/config.md#authentication-and-authorization) for configuration knobs.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,974 +0,0 @@
use crate::error_code::INTERNAL_ERROR_CODE;
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
use anyhow::anyhow;
use codex_app_server_protocol::ConfigBatchWriteParams;
use codex_app_server_protocol::ConfigLayer;
use codex_app_server_protocol::ConfigLayerMetadata;
use codex_app_server_protocol::ConfigLayerName;
use codex_app_server_protocol::ConfigReadParams;
use codex_app_server_protocol::ConfigReadResponse;
use codex_app_server_protocol::ConfigValueWriteParams;
use codex_app_server_protocol::ConfigWriteErrorCode;
use codex_app_server_protocol::ConfigWriteResponse;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::MergeStrategy;
use codex_app_server_protocol::OverriddenMetadata;
use codex_app_server_protocol::WriteStatus;
use codex_core::config::ConfigToml;
use codex_core::config_loader::LoadedConfigLayers;
use codex_core::config_loader::LoaderOverrides;
use codex_core::config_loader::load_config_layers_with_overrides;
use codex_core::config_loader::merge_toml_values;
use serde_json::Value as JsonValue;
use serde_json::json;
use sha2::Digest;
use sha2::Sha256;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use tempfile::NamedTempFile;
use tokio::task;
use toml::Value as TomlValue;
const SESSION_FLAGS_SOURCE: &str = "--config";
const MDM_SOURCE: &str = "com.openai.codex/config_toml_base64";
const CONFIG_FILE_NAME: &str = "config.toml";
#[derive(Clone)]
pub(crate) struct ConfigApi {
codex_home: PathBuf,
cli_overrides: Vec<(String, TomlValue)>,
loader_overrides: LoaderOverrides,
}
impl ConfigApi {
pub(crate) fn new(codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>) -> Self {
Self {
codex_home,
cli_overrides,
loader_overrides: LoaderOverrides::default(),
}
}
#[cfg(test)]
fn with_overrides(
codex_home: PathBuf,
cli_overrides: Vec<(String, TomlValue)>,
loader_overrides: LoaderOverrides,
) -> Self {
Self {
codex_home,
cli_overrides,
loader_overrides,
}
}
pub(crate) async fn read(
&self,
params: ConfigReadParams,
) -> Result<ConfigReadResponse, JSONRPCErrorError> {
let layers = self
.load_layers_state()
.await
.map_err(|err| internal_error("failed to read configuration layers", err))?;
let effective = layers.effective_config();
validate_config(&effective).map_err(|err| internal_error("invalid configuration", err))?;
let response = ConfigReadResponse {
config: to_json_value(&effective),
origins: layers.origins(),
layers: params.include_layers.then(|| layers.layers_high_to_low()),
};
Ok(response)
}
pub(crate) async fn write_value(
&self,
params: ConfigValueWriteParams,
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
let edits = vec![(params.key_path, params.value, params.merge_strategy)];
self.apply_edits(params.file_path, params.expected_version, edits)
.await
}
pub(crate) async fn batch_write(
&self,
params: ConfigBatchWriteParams,
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
let edits = params
.edits
.into_iter()
.map(|edit| (edit.key_path, edit.value, edit.merge_strategy))
.collect();
self.apply_edits(params.file_path, params.expected_version, edits)
.await
}
async fn apply_edits(
&self,
file_path: String,
expected_version: Option<String>,
edits: Vec<(String, JsonValue, MergeStrategy)>,
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
let allowed_path = self.codex_home.join(CONFIG_FILE_NAME);
if !paths_match(&allowed_path, &file_path) {
return Err(config_write_error(
ConfigWriteErrorCode::ConfigLayerReadonly,
"Only writes to the user config are allowed",
));
}
let layers = self
.load_layers_state()
.await
.map_err(|err| internal_error("failed to load configuration", err))?;
if let Some(expected) = expected_version.as_deref()
&& expected != layers.user.version
{
return Err(config_write_error(
ConfigWriteErrorCode::ConfigVersionConflict,
"Configuration was modified since last read. Fetch latest version and retry.",
));
}
let mut user_config = layers.user.config.clone();
let mut mutated = false;
let mut parsed_segments = Vec::new();
for (key_path, value, strategy) in edits.into_iter() {
let segments = parse_key_path(&key_path).map_err(|message| {
config_write_error(ConfigWriteErrorCode::ConfigValidationError, message)
})?;
let parsed_value = parse_value(value).map_err(|message| {
config_write_error(ConfigWriteErrorCode::ConfigValidationError, message)
})?;
let changed = apply_merge(&mut user_config, &segments, parsed_value.as_ref(), strategy)
.map_err(|err| match err {
MergeError::PathNotFound => config_write_error(
ConfigWriteErrorCode::ConfigPathNotFound,
"Path not found",
),
MergeError::Validation(message) => {
config_write_error(ConfigWriteErrorCode::ConfigValidationError, message)
}
})?;
mutated |= changed;
parsed_segments.push(segments);
}
validate_config(&user_config).map_err(|err| {
config_write_error(
ConfigWriteErrorCode::ConfigValidationError,
format!("Invalid configuration: {err}"),
)
})?;
let updated_layers = layers.with_user_config(user_config.clone());
let effective = updated_layers.effective_config();
validate_config(&effective).map_err(|err| {
config_write_error(
ConfigWriteErrorCode::ConfigValidationError,
format!("Invalid configuration: {err}"),
)
})?;
if mutated {
self.persist_user_config(&user_config)
.await
.map_err(|err| internal_error("failed to persist config.toml", err))?;
}
let overridden = first_overridden_edit(&updated_layers, &effective, &parsed_segments);
let status = overridden
.as_ref()
.map(|_| WriteStatus::OkOverridden)
.unwrap_or(WriteStatus::Ok);
Ok(ConfigWriteResponse {
status,
version: updated_layers.user.version.clone(),
overridden_metadata: overridden,
})
}
async fn load_layers_state(&self) -> std::io::Result<LayersState> {
let LoadedConfigLayers {
base,
managed_config,
managed_preferences,
} = load_config_layers_with_overrides(&self.codex_home, self.loader_overrides.clone())
.await?;
let user = LayerState::new(
ConfigLayerName::User,
self.codex_home.join(CONFIG_FILE_NAME),
base,
);
let session_flags = LayerState::new(
ConfigLayerName::SessionFlags,
PathBuf::from(SESSION_FLAGS_SOURCE),
{
let mut root = TomlValue::Table(toml::map::Map::new());
for (path, value) in self.cli_overrides.iter() {
apply_override(&mut root, path, value.clone());
}
root
},
);
let system = managed_config.map(|cfg| {
LayerState::new(
ConfigLayerName::System,
system_config_path(&self.codex_home),
cfg,
)
});
let mdm = managed_preferences
.map(|cfg| LayerState::new(ConfigLayerName::Mdm, PathBuf::from(MDM_SOURCE), cfg));
Ok(LayersState {
user,
session_flags,
system,
mdm,
})
}
async fn persist_user_config(&self, user_config: &TomlValue) -> anyhow::Result<()> {
let codex_home = self.codex_home.clone();
let serialized = toml::to_string_pretty(user_config)?;
task::spawn_blocking(move || -> anyhow::Result<()> {
std::fs::create_dir_all(&codex_home)?;
let target = codex_home.join(CONFIG_FILE_NAME);
let tmp = NamedTempFile::new_in(&codex_home)?;
std::fs::write(tmp.path(), serialized.as_bytes())?;
tmp.persist(&target)?;
Ok(())
})
.await
.map_err(|err| anyhow!("config persistence task panicked: {err}"))??;
Ok(())
}
}
fn parse_value(value: JsonValue) -> Result<Option<TomlValue>, String> {
if value.is_null() {
return Ok(None);
}
serde_json::from_value::<TomlValue>(value)
.map(Some)
.map_err(|err| format!("invalid value: {err}"))
}
fn parse_key_path(path: &str) -> Result<Vec<String>, String> {
if path.trim().is_empty() {
return Err("keyPath must not be empty".to_string());
}
Ok(path
.split('.')
.map(std::string::ToString::to_string)
.collect())
}
fn apply_override(target: &mut TomlValue, path: &str, value: TomlValue) {
use toml::value::Table;
let segments: Vec<&str> = path.split('.').collect();
let mut current = target;
for (idx, segment) in segments.iter().enumerate() {
let is_last = idx == segments.len() - 1;
if is_last {
match current {
TomlValue::Table(table) => {
table.insert(segment.to_string(), value);
}
_ => {
let mut table = Table::new();
table.insert(segment.to_string(), value);
*current = TomlValue::Table(table);
}
}
return;
}
match current {
TomlValue::Table(table) => {
current = table
.entry((*segment).to_string())
.or_insert_with(|| TomlValue::Table(Table::new()));
}
_ => {
*current = TomlValue::Table(Table::new());
if let TomlValue::Table(tbl) = current {
current = tbl
.entry((*segment).to_string())
.or_insert_with(|| TomlValue::Table(Table::new()));
}
}
}
}
}
#[derive(Debug)]
enum MergeError {
PathNotFound,
Validation(String),
}
fn apply_merge(
root: &mut TomlValue,
segments: &[String],
value: Option<&TomlValue>,
strategy: MergeStrategy,
) -> Result<bool, MergeError> {
let Some(value) = value else {
return clear_path(root, segments);
};
let Some((last, parents)) = segments.split_last() else {
return Err(MergeError::Validation(
"keyPath must not be empty".to_string(),
));
};
let mut current = root;
for segment in parents {
match current {
TomlValue::Table(table) => {
current = table
.entry(segment.clone())
.or_insert_with(|| TomlValue::Table(toml::map::Map::new()));
}
_ => {
*current = TomlValue::Table(toml::map::Map::new());
if let TomlValue::Table(table) = current {
current = table
.entry(segment.clone())
.or_insert_with(|| TomlValue::Table(toml::map::Map::new()));
}
}
}
}
let table = current.as_table_mut().ok_or_else(|| {
MergeError::Validation("cannot set value on non-table parent".to_string())
})?;
if matches!(strategy, MergeStrategy::Upsert)
&& let Some(existing) = table.get_mut(last)
&& matches!(existing, TomlValue::Table(_))
&& matches!(value, TomlValue::Table(_))
{
merge_toml_values(existing, value);
return Ok(true);
}
let changed = table
.get(last)
.map(|existing| Some(existing) != Some(value))
.unwrap_or(true);
table.insert(last.clone(), value.clone());
Ok(changed)
}
fn clear_path(root: &mut TomlValue, segments: &[String]) -> Result<bool, MergeError> {
let Some((last, parents)) = segments.split_last() else {
return Err(MergeError::Validation(
"keyPath must not be empty".to_string(),
));
};
let mut current = root;
for segment in parents {
match current {
TomlValue::Table(table) => {
current = table.get_mut(segment).ok_or(MergeError::PathNotFound)?;
}
_ => return Err(MergeError::PathNotFound),
}
}
let Some(parent) = current.as_table_mut() else {
return Err(MergeError::PathNotFound);
};
Ok(parent.remove(last).is_some())
}
#[derive(Clone)]
struct LayerState {
name: ConfigLayerName,
source: PathBuf,
config: TomlValue,
version: String,
}
impl LayerState {
fn new(name: ConfigLayerName, source: PathBuf, config: TomlValue) -> Self {
let version = version_for_toml(&config);
Self {
name,
source,
config,
version,
}
}
fn metadata(&self) -> ConfigLayerMetadata {
ConfigLayerMetadata {
name: self.name.clone(),
source: self.source.display().to_string(),
version: self.version.clone(),
}
}
fn as_layer(&self) -> ConfigLayer {
ConfigLayer {
name: self.name.clone(),
source: self.source.display().to_string(),
version: self.version.clone(),
config: to_json_value(&self.config),
}
}
}
#[derive(Clone)]
struct LayersState {
user: LayerState,
session_flags: LayerState,
system: Option<LayerState>,
mdm: Option<LayerState>,
}
impl LayersState {
fn with_user_config(self, user_config: TomlValue) -> Self {
Self {
user: LayerState::new(self.user.name, self.user.source, user_config),
session_flags: self.session_flags,
system: self.system,
mdm: self.mdm,
}
}
fn effective_config(&self) -> TomlValue {
let mut merged = self.user.config.clone();
merge_toml_values(&mut merged, &self.session_flags.config);
if let Some(system) = &self.system {
merge_toml_values(&mut merged, &system.config);
}
if let Some(mdm) = &self.mdm {
merge_toml_values(&mut merged, &mdm.config);
}
merged
}
fn origins(&self) -> HashMap<String, ConfigLayerMetadata> {
let mut origins = HashMap::new();
let mut path = Vec::new();
record_origins(
&self.user.config,
&self.user.metadata(),
&mut path,
&mut origins,
);
record_origins(
&self.session_flags.config,
&self.session_flags.metadata(),
&mut path,
&mut origins,
);
if let Some(system) = &self.system {
record_origins(&system.config, &system.metadata(), &mut path, &mut origins);
}
if let Some(mdm) = &self.mdm {
record_origins(&mdm.config, &mdm.metadata(), &mut path, &mut origins);
}
origins
}
fn layers_high_to_low(&self) -> Vec<ConfigLayer> {
let mut layers = Vec::new();
if let Some(mdm) = &self.mdm {
layers.push(mdm.as_layer());
}
if let Some(system) = &self.system {
layers.push(system.as_layer());
}
layers.push(self.session_flags.as_layer());
layers.push(self.user.as_layer());
layers
}
}
fn record_origins(
value: &TomlValue,
meta: &ConfigLayerMetadata,
path: &mut Vec<String>,
origins: &mut HashMap<String, ConfigLayerMetadata>,
) {
match value {
TomlValue::Table(table) => {
for (key, val) in table {
path.push(key.clone());
record_origins(val, meta, path, origins);
path.pop();
}
}
TomlValue::Array(items) => {
for (idx, item) in items.iter().enumerate() {
path.push(idx.to_string());
record_origins(item, meta, path, origins);
path.pop();
}
}
_ => {
if !path.is_empty() {
origins.insert(path.join("."), meta.clone());
}
}
}
}
fn to_json_value(value: &TomlValue) -> JsonValue {
serde_json::to_value(value).unwrap_or(JsonValue::Null)
}
fn validate_config(value: &TomlValue) -> Result<(), toml::de::Error> {
let _: ConfigToml = value.clone().try_into()?;
Ok(())
}
fn version_for_toml(value: &TomlValue) -> String {
let json = to_json_value(value);
let canonical = canonical_json(&json);
let serialized = serde_json::to_vec(&canonical).unwrap_or_default();
let mut hasher = Sha256::new();
hasher.update(serialized);
let hash = hasher.finalize();
let hex = hash
.iter()
.map(|byte| format!("{byte:02x}"))
.collect::<String>();
format!("sha256:{hex}")
}
fn canonical_json(value: &JsonValue) -> JsonValue {
match value {
JsonValue::Object(map) => {
let mut sorted = serde_json::Map::new();
let mut keys = map.keys().cloned().collect::<Vec<_>>();
keys.sort();
for key in keys {
if let Some(val) = map.get(&key) {
sorted.insert(key, canonical_json(val));
}
}
JsonValue::Object(sorted)
}
JsonValue::Array(items) => JsonValue::Array(items.iter().map(canonical_json).collect()),
other => other.clone(),
}
}
fn paths_match(expected: &Path, provided: &str) -> bool {
let provided_path = PathBuf::from(provided);
if let (Ok(expanded_expected), Ok(expanded_provided)) =
(expected.canonicalize(), provided_path.canonicalize())
{
return expanded_expected == expanded_provided;
}
expected == provided_path
}
fn value_at_path<'a>(root: &'a TomlValue, segments: &[String]) -> Option<&'a TomlValue> {
let mut current = root;
for segment in segments {
match current {
TomlValue::Table(table) => {
current = table.get(segment)?;
}
TomlValue::Array(items) => {
let idx: usize = segment.parse().ok()?;
current = items.get(idx)?;
}
_ => return None,
}
}
Some(current)
}
fn override_message(layer: &ConfigLayerName) -> String {
match layer {
ConfigLayerName::Mdm => "Overridden by managed policy (mdm)".to_string(),
ConfigLayerName::System => "Overridden by managed config (system)".to_string(),
ConfigLayerName::SessionFlags => "Overridden by session flags".to_string(),
ConfigLayerName::User => "Overridden by user config".to_string(),
}
}
fn compute_override_metadata(
layers: &LayersState,
effective: &TomlValue,
segments: &[String],
) -> Option<OverriddenMetadata> {
let user_value = value_at_path(&layers.user.config, segments);
let effective_value = value_at_path(effective, segments);
if user_value.is_some() && user_value == effective_value {
return None;
}
if user_value.is_none() && effective_value.is_none() {
return None;
}
let effective_layer = find_effective_layer(layers, segments);
let overriding_layer = effective_layer.unwrap_or_else(|| layers.user.metadata());
let message = override_message(&overriding_layer.name);
Some(OverriddenMetadata {
message,
overriding_layer,
effective_value: effective_value
.map(to_json_value)
.unwrap_or(JsonValue::Null),
})
}
fn first_overridden_edit(
layers: &LayersState,
effective: &TomlValue,
edits: &[Vec<String>],
) -> Option<OverriddenMetadata> {
for segments in edits {
if let Some(meta) = compute_override_metadata(layers, effective, segments) {
return Some(meta);
}
}
None
}
fn find_effective_layer(layers: &LayersState, segments: &[String]) -> Option<ConfigLayerMetadata> {
let check =
|state: &LayerState| value_at_path(&state.config, segments).map(|_| state.metadata());
if let Some(mdm) = &layers.mdm
&& let Some(meta) = check(mdm)
{
return Some(meta);
}
if let Some(system) = &layers.system
&& let Some(meta) = check(system)
{
return Some(meta);
}
if let Some(meta) = check(&layers.session_flags) {
return Some(meta);
}
check(&layers.user)
}
fn system_config_path(codex_home: &Path) -> PathBuf {
if let Ok(path) = std::env::var("CODEX_MANAGED_CONFIG_PATH") {
return PathBuf::from(path);
}
#[cfg(unix)]
{
let _ = codex_home;
PathBuf::from("/etc/codex/managed_config.toml")
}
#[cfg(not(unix))]
{
codex_home.join("managed_config.toml")
}
}
fn internal_error<E: std::fmt::Display>(context: &str, err: E) -> JSONRPCErrorError {
JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("{context}: {err}"),
data: None,
}
}
fn config_write_error(code: ConfigWriteErrorCode, message: impl Into<String>) -> JSONRPCErrorError {
JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: message.into(),
data: Some(json!({
"config_write_error_code": code,
})),
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use tempfile::tempdir;
#[tokio::test]
async fn read_includes_origins_and_layers() {
let tmp = tempdir().expect("tempdir");
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "model = \"user\"").unwrap();
let managed_path = tmp.path().join("managed_config.toml");
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
let api = ConfigApi::with_overrides(
tmp.path().to_path_buf(),
vec![],
LoaderOverrides {
managed_config_path: Some(managed_path),
#[cfg(target_os = "macos")]
managed_preferences_base64: None,
},
);
let response = api
.read(ConfigReadParams {
include_layers: true,
})
.await
.expect("response");
assert_eq!(
response.config.get("approval_policy"),
Some(&json!("never"))
);
assert_eq!(
response
.origins
.get("approval_policy")
.expect("origin")
.name,
ConfigLayerName::System
);
let layers = response.layers.expect("layers present");
assert_eq!(layers.first().unwrap().name, ConfigLayerName::System);
assert_eq!(layers.get(1).unwrap().name, ConfigLayerName::SessionFlags);
assert_eq!(layers.last().unwrap().name, ConfigLayerName::User);
}
#[tokio::test]
async fn write_value_reports_override() {
let tmp = tempdir().expect("tempdir");
std::fs::write(
tmp.path().join(CONFIG_FILE_NAME),
"approval_policy = \"on-request\"",
)
.unwrap();
let managed_path = tmp.path().join("managed_config.toml");
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
let api = ConfigApi::with_overrides(
tmp.path().to_path_buf(),
vec![],
LoaderOverrides {
managed_config_path: Some(managed_path),
#[cfg(target_os = "macos")]
managed_preferences_base64: None,
},
);
let result = api
.write_value(ConfigValueWriteParams {
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
key_path: "approval_policy".to_string(),
value: json!("never"),
merge_strategy: MergeStrategy::Replace,
expected_version: None,
})
.await
.expect("result");
let read_after = api
.read(ConfigReadParams {
include_layers: true,
})
.await
.expect("read");
let config_object = read_after.config.as_object().expect("object");
assert_eq!(config_object.get("approval_policy"), Some(&json!("never")));
assert_eq!(
read_after
.origins
.get("approval_policy")
.expect("origin")
.name,
ConfigLayerName::System
);
assert_eq!(result.status, WriteStatus::Ok);
assert!(result.overridden_metadata.is_none());
}
#[tokio::test]
async fn version_conflict_rejected() {
let tmp = tempdir().expect("tempdir");
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "model = \"user\"").unwrap();
let api = ConfigApi::new(tmp.path().to_path_buf(), vec![]);
let error = api
.write_value(ConfigValueWriteParams {
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
key_path: "model".to_string(),
value: json!("gpt-5"),
merge_strategy: MergeStrategy::Replace,
expected_version: Some("sha256:bogus".to_string()),
})
.await
.expect_err("should fail");
assert_eq!(error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(
error
.data
.as_ref()
.and_then(|d| d.get("config_write_error_code"))
.and_then(serde_json::Value::as_str),
Some("configVersionConflict")
);
}
#[tokio::test]
async fn invalid_user_value_rejected_even_if_overridden_by_managed() {
let tmp = tempdir().expect("tempdir");
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "model = \"user\"").unwrap();
let managed_path = tmp.path().join("managed_config.toml");
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
let api = ConfigApi::with_overrides(
tmp.path().to_path_buf(),
vec![],
LoaderOverrides {
managed_config_path: Some(managed_path),
#[cfg(target_os = "macos")]
managed_preferences_base64: None,
},
);
let error = api
.write_value(ConfigValueWriteParams {
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
key_path: "approval_policy".to_string(),
value: json!("bogus"),
merge_strategy: MergeStrategy::Replace,
expected_version: None,
})
.await
.expect_err("should fail validation");
assert_eq!(error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(
error
.data
.as_ref()
.and_then(|d| d.get("config_write_error_code"))
.and_then(serde_json::Value::as_str),
Some("configValidationError")
);
let contents =
std::fs::read_to_string(tmp.path().join(CONFIG_FILE_NAME)).expect("read config");
assert_eq!(contents.trim(), "model = \"user\"");
}
#[tokio::test]
async fn read_reports_managed_overrides_user_and_session_flags() {
let tmp = tempdir().expect("tempdir");
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "model = \"user\"").unwrap();
let managed_path = tmp.path().join("managed_config.toml");
std::fs::write(&managed_path, "model = \"system\"").unwrap();
let cli_overrides = vec![(
"model".to_string(),
TomlValue::String("session".to_string()),
)];
let api = ConfigApi::with_overrides(
tmp.path().to_path_buf(),
cli_overrides,
LoaderOverrides {
managed_config_path: Some(managed_path),
#[cfg(target_os = "macos")]
managed_preferences_base64: None,
},
);
let response = api
.read(ConfigReadParams {
include_layers: true,
})
.await
.expect("response");
assert_eq!(response.config.get("model"), Some(&json!("system")));
assert_eq!(
response.origins.get("model").expect("origin").name,
ConfigLayerName::System
);
let layers = response.layers.expect("layers");
assert_eq!(layers.first().unwrap().name, ConfigLayerName::System);
assert_eq!(layers.get(1).unwrap().name, ConfigLayerName::SessionFlags);
assert_eq!(layers.get(2).unwrap().name, ConfigLayerName::User);
}
#[tokio::test]
async fn write_value_reports_managed_override() {
let tmp = tempdir().expect("tempdir");
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "").unwrap();
let managed_path = tmp.path().join("managed_config.toml");
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
let api = ConfigApi::with_overrides(
tmp.path().to_path_buf(),
vec![],
LoaderOverrides {
managed_config_path: Some(managed_path),
#[cfg(target_os = "macos")]
managed_preferences_base64: None,
},
);
let result = api
.write_value(ConfigValueWriteParams {
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
key_path: "approval_policy".to_string(),
value: json!("on-request"),
merge_strategy: MergeStrategy::Replace,
expected_version: None,
})
.await
.expect("result");
assert_eq!(result.status, WriteStatus::OkOverridden);
let overridden = result.overridden_metadata.expect("overridden metadata");
assert_eq!(overridden.overriding_layer.name, ConfigLayerName::System);
assert_eq!(overridden.effective_value, json!("never"));
}
}

View File

@@ -1,2 +0,0 @@
pub(crate) const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
pub(crate) const INTERNAL_ERROR_CODE: i64 = -32603;

View File

@@ -1,97 +0,0 @@
use std::num::NonZero;
use std::num::NonZeroUsize;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use codex_app_server_protocol::FuzzyFileSearchResult;
use codex_file_search as file_search;
use tokio::task::JoinSet;
use tracing::warn;
const LIMIT_PER_ROOT: usize = 50;
const MAX_THREADS: usize = 12;
const COMPUTE_INDICES: bool = true;
pub(crate) async fn run_fuzzy_file_search(
query: String,
roots: Vec<String>,
cancellation_flag: Arc<AtomicBool>,
) -> Vec<FuzzyFileSearchResult> {
if roots.is_empty() {
return Vec::new();
}
#[expect(clippy::expect_used)]
let limit_per_root =
NonZero::new(LIMIT_PER_ROOT).expect("LIMIT_PER_ROOT should be a valid non-zero usize");
let cores = std::thread::available_parallelism()
.map(std::num::NonZero::get)
.unwrap_or(1);
let threads = cores.min(MAX_THREADS);
let threads_per_root = (threads / roots.len()).max(1);
let threads = NonZero::new(threads_per_root).unwrap_or(NonZeroUsize::MIN);
let mut files: Vec<FuzzyFileSearchResult> = Vec::new();
let mut join_set = JoinSet::new();
for root in roots {
let search_dir = PathBuf::from(&root);
let query = query.clone();
let cancel_flag = cancellation_flag.clone();
join_set.spawn_blocking(move || {
match file_search::run(
query.as_str(),
limit_per_root,
&search_dir,
Vec::new(),
threads,
cancel_flag,
COMPUTE_INDICES,
true,
) {
Ok(res) => Ok((root, res)),
Err(err) => Err((root, err)),
}
});
}
while let Some(res) = join_set.join_next().await {
match res {
Ok(Ok((root, res))) => {
for m in res.matches {
let path = m.path;
//TODO(shijie): Move file name generation to file_search lib.
let file_name = Path::new(&path)
.file_name()
.map(|name| name.to_string_lossy().into_owned())
.unwrap_or_else(|| path.clone());
let result = FuzzyFileSearchResult {
root: root.clone(),
path,
file_name,
score: m.score,
indices: m.indices,
};
files.push(result);
}
}
Ok(Err((root, err))) => {
warn!("fuzzy-file-search in dir '{root}' failed: {err}");
}
Err(err) => {
warn!("fuzzy-file-search join_next failed: {err}");
}
}
}
files.sort_by(file_search::cmp_by_score_desc_then_path_asc::<
FuzzyFileSearchResult,
_,
_,
>(|f| f.score, |f| f.path.as_str()));
files
}

View File

@@ -1,178 +0,0 @@
#![deny(clippy::print_stdout, clippy::print_stderr)]
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
use std::io::ErrorKind;
use std::io::Result as IoResult;
use std::path::PathBuf;
use crate::message_processor::MessageProcessor;
use crate::outgoing_message::OutgoingMessage;
use crate::outgoing_message::OutgoingMessageSender;
use codex_app_server_protocol::JSONRPCMessage;
use codex_feedback::CodexFeedback;
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncWriteExt;
use tokio::io::BufReader;
use tokio::io::{self};
use tokio::sync::mpsc;
use toml::Value as TomlValue;
use tracing::Level;
use tracing::debug;
use tracing::error;
use tracing::info;
use tracing_subscriber::EnvFilter;
use tracing_subscriber::Layer;
use tracing_subscriber::filter::Targets;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
mod bespoke_event_handling;
mod codex_message_processor;
mod config_api;
mod error_code;
mod fuzzy_file_search;
mod message_processor;
mod models;
mod outgoing_message;
/// Size of the bounded channels used to communicate between tasks. The value
/// is a balance between throughput and memory usage 128 messages should be
/// plenty for an interactive CLI.
const CHANNEL_CAPACITY: usize = 128;
pub async fn run_main(
codex_linux_sandbox_exe: Option<PathBuf>,
cli_config_overrides: CliConfigOverrides,
) -> IoResult<()> {
// Set up channels.
let (incoming_tx, mut incoming_rx) = mpsc::channel::<JSONRPCMessage>(CHANNEL_CAPACITY);
let (outgoing_tx, mut outgoing_rx) = mpsc::channel::<OutgoingMessage>(CHANNEL_CAPACITY);
// Task: read from stdin, push to `incoming_tx`.
let stdin_reader_handle = tokio::spawn({
async move {
let stdin = io::stdin();
let reader = BufReader::new(stdin);
let mut lines = reader.lines();
while let Some(line) = lines.next_line().await.unwrap_or_default() {
match serde_json::from_str::<JSONRPCMessage>(&line) {
Ok(msg) => {
if incoming_tx.send(msg).await.is_err() {
// Receiver gone nothing left to do.
break;
}
}
Err(e) => error!("Failed to deserialize JSONRPCMessage: {e}"),
}
}
debug!("stdin reader finished (EOF)");
}
});
// Parse CLI overrides once and derive the base Config eagerly so later
// components do not need to work with raw TOML values.
let cli_kv_overrides = cli_config_overrides.parse_overrides().map_err(|e| {
std::io::Error::new(
ErrorKind::InvalidInput,
format!("error parsing -c overrides: {e}"),
)
})?;
let config =
Config::load_with_cli_overrides(cli_kv_overrides.clone(), ConfigOverrides::default())
.await
.map_err(|e| {
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
})?;
let feedback = CodexFeedback::new();
let otel =
codex_core::otel_init::build_provider(&config, env!("CARGO_PKG_VERSION")).map_err(|e| {
std::io::Error::new(
ErrorKind::InvalidData,
format!("error loading otel config: {e}"),
)
})?;
// Install a simple subscriber so `tracing` output is visible. Users can
// control the log level with `RUST_LOG`.
let stderr_fmt = tracing_subscriber::fmt::layer()
.with_writer(std::io::stderr)
.with_filter(EnvFilter::from_default_env());
let feedback_layer = tracing_subscriber::fmt::layer()
.with_writer(feedback.make_writer())
.with_ansi(false)
.with_target(false)
.with_filter(Targets::new().with_default(Level::TRACE));
let _ = tracing_subscriber::registry()
.with(stderr_fmt)
.with(feedback_layer)
.with(otel.as_ref().map(|provider| {
OpenTelemetryTracingBridge::new(&provider.logger).with_filter(
tracing_subscriber::filter::filter_fn(codex_core::otel_init::codex_export_filter),
)
}))
.try_init();
// Task: process incoming messages.
let processor_handle = tokio::spawn({
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
let cli_overrides: Vec<(String, TomlValue)> = cli_kv_overrides.clone();
let mut processor = MessageProcessor::new(
outgoing_message_sender,
codex_linux_sandbox_exe,
std::sync::Arc::new(config),
cli_overrides,
feedback.clone(),
);
async move {
while let Some(msg) = incoming_rx.recv().await {
match msg {
JSONRPCMessage::Request(r) => processor.process_request(r).await,
JSONRPCMessage::Response(r) => processor.process_response(r).await,
JSONRPCMessage::Notification(n) => processor.process_notification(n).await,
JSONRPCMessage::Error(e) => processor.process_error(e),
}
}
info!("processor task exited (channel closed)");
}
});
// Task: write outgoing messages to stdout.
let stdout_writer_handle = tokio::spawn(async move {
let mut stdout = io::stdout();
while let Some(outgoing_message) = outgoing_rx.recv().await {
let Ok(value) = serde_json::to_value(outgoing_message) else {
error!("Failed to convert OutgoingMessage to JSON value");
continue;
};
match serde_json::to_string(&value) {
Ok(mut json) => {
json.push('\n');
if let Err(e) = stdout.write_all(json.as_bytes()).await {
error!("Failed to write to stdout: {e}");
break;
}
}
Err(e) => error!("Failed to serialize JSONRPCMessage: {e}"),
}
}
info!("stdout writer exited (channel closed)");
});
// Wait for all tasks to finish. The typical exit path is the stdin reader
// hitting EOF which, once it drops `incoming_tx`, propagates shutdown to
// the processor and then to the stdout task.
let _ = tokio::join!(stdin_reader_handle, processor_handle, stdout_writer_handle);
Ok(())
}

View File

@@ -1,10 +0,0 @@
use codex_app_server::run_main;
use codex_arg0::arg0_dispatch_or_else;
use codex_common::CliConfigOverrides;
fn main() -> anyhow::Result<()> {
arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move {
run_main(codex_linux_sandbox_exe, CliConfigOverrides::default()).await?;
Ok(())
})
}

View File

@@ -1,209 +0,0 @@
use std::path::PathBuf;
use std::sync::Arc;
use crate::codex_message_processor::CodexMessageProcessor;
use crate::config_api::ConfigApi;
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
use crate::outgoing_message::OutgoingMessageSender;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::ClientRequest;
use codex_app_server_protocol::ConfigBatchWriteParams;
use codex_app_server_protocol::ConfigReadParams;
use codex_app_server_protocol::ConfigValueWriteParams;
use codex_app_server_protocol::InitializeResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_core::AuthManager;
use codex_core::ConversationManager;
use codex_core::config::Config;
use codex_core::default_client::USER_AGENT_SUFFIX;
use codex_core::default_client::get_codex_user_agent;
use codex_feedback::CodexFeedback;
use codex_protocol::protocol::SessionSource;
use toml::Value as TomlValue;
pub(crate) struct MessageProcessor {
outgoing: Arc<OutgoingMessageSender>,
codex_message_processor: CodexMessageProcessor,
config_api: ConfigApi,
initialized: bool,
}
impl MessageProcessor {
/// Create a new `MessageProcessor`, retaining a handle to the outgoing
/// `Sender` so handlers can enqueue messages to be written to stdout.
pub(crate) fn new(
outgoing: OutgoingMessageSender,
codex_linux_sandbox_exe: Option<PathBuf>,
config: Arc<Config>,
cli_overrides: Vec<(String, TomlValue)>,
feedback: CodexFeedback,
) -> Self {
let outgoing = Arc::new(outgoing);
let auth_manager = AuthManager::shared(
config.codex_home.clone(),
false,
config.cli_auth_credentials_store_mode,
);
let conversation_manager = Arc::new(ConversationManager::new(
auth_manager.clone(),
SessionSource::VSCode,
));
let codex_message_processor = CodexMessageProcessor::new(
auth_manager,
conversation_manager,
outgoing.clone(),
codex_linux_sandbox_exe,
Arc::clone(&config),
feedback,
);
let config_api = ConfigApi::new(config.codex_home.clone(), cli_overrides);
Self {
outgoing,
codex_message_processor,
config_api,
initialized: false,
}
}
pub(crate) async fn process_request(&mut self, request: JSONRPCRequest) {
let request_id = request.id.clone();
let request_json = match serde_json::to_value(&request) {
Ok(request_json) => request_json,
Err(err) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("Invalid request: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
let codex_request = match serde_json::from_value::<ClientRequest>(request_json) {
Ok(codex_request) => codex_request,
Err(err) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("Invalid request: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
match codex_request {
// Handle Initialize internally so CodexMessageProcessor does not have to concern
// itself with the `initialized` bool.
ClientRequest::Initialize { request_id, params } => {
if self.initialized {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Already initialized".to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
} else {
let ClientInfo {
name,
title: _title,
version,
} = params.client_info;
let user_agent_suffix = format!("{name}; {version}");
if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() {
*suffix = Some(user_agent_suffix);
}
let user_agent = get_codex_user_agent();
let response = InitializeResponse { user_agent };
self.outgoing.send_response(request_id, response).await;
self.initialized = true;
return;
}
}
_ => {
if !self.initialized {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Not initialized".to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
}
}
match codex_request {
ClientRequest::ConfigRead { request_id, params } => {
self.handle_config_read(request_id, params).await;
}
ClientRequest::ConfigValueWrite { request_id, params } => {
self.handle_config_value_write(request_id, params).await;
}
ClientRequest::ConfigBatchWrite { request_id, params } => {
self.handle_config_batch_write(request_id, params).await;
}
other => {
self.codex_message_processor.process_request(other).await;
}
}
}
pub(crate) async fn process_notification(&self, notification: JSONRPCNotification) {
// Currently, we do not expect to receive any notifications from the
// client, so we just log them.
tracing::info!("<- notification: {:?}", notification);
}
/// Handle a standalone JSON-RPC response originating from the peer.
pub(crate) async fn process_response(&mut self, response: JSONRPCResponse) {
tracing::info!("<- response: {:?}", response);
let JSONRPCResponse { id, result, .. } = response;
self.outgoing.notify_client_response(id, result).await
}
/// Handle an error object received from the peer.
pub(crate) fn process_error(&mut self, err: JSONRPCError) {
tracing::error!("<- error: {:?}", err);
}
async fn handle_config_read(&self, request_id: RequestId, params: ConfigReadParams) {
match self.config_api.read(params).await {
Ok(response) => self.outgoing.send_response(request_id, response).await,
Err(error) => self.outgoing.send_error(request_id, error).await,
}
}
async fn handle_config_value_write(
&self,
request_id: RequestId,
params: ConfigValueWriteParams,
) {
match self.config_api.write_value(params).await {
Ok(response) => self.outgoing.send_response(request_id, response).await,
Err(error) => self.outgoing.send_error(request_id, error).await,
}
}
async fn handle_config_batch_write(
&self,
request_id: RequestId,
params: ConfigBatchWriteParams,
) {
match self.config_api.batch_write(params).await {
Ok(response) => self.outgoing.send_response(request_id, response).await,
Err(error) => self.outgoing.send_error(request_id, error).await,
}
}
}

View File

@@ -1,39 +0,0 @@
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_common::model_presets::builtin_model_presets;
pub fn supported_models(auth_mode: Option<AuthMode>) -> Vec<Model> {
builtin_model_presets(auth_mode)
.into_iter()
.map(model_from_preset)
.collect()
}
fn model_from_preset(preset: ModelPreset) -> Model {
Model {
id: preset.id.to_string(),
model: preset.model.to_string(),
display_name: preset.display_name.to_string(),
description: preset.description.to_string(),
supported_reasoning_efforts: reasoning_efforts_from_preset(
preset.supported_reasoning_efforts,
),
default_reasoning_effort: preset.default_reasoning_effort,
is_default: preset.is_default,
}
}
fn reasoning_efforts_from_preset(
efforts: &'static [ReasoningEffortPreset],
) -> Vec<ReasoningEffortOption> {
efforts
.iter()
.map(|preset| ReasoningEffortOption {
reasoning_effort: preset.effort,
description: preset.description.to_string(),
})
.collect()
}

View File

@@ -1,277 +0,0 @@
use std::collections::HashMap;
use std::sync::atomic::AtomicI64;
use std::sync::atomic::Ordering;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::Result;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ServerRequestPayload;
use serde::Serialize;
use tokio::sync::Mutex;
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tracing::warn;
use crate::error_code::INTERNAL_ERROR_CODE;
/// Sends messages to the client and manages request callbacks.
pub(crate) struct OutgoingMessageSender {
next_request_id: AtomicI64,
sender: mpsc::Sender<OutgoingMessage>,
request_id_to_callback: Mutex<HashMap<RequestId, oneshot::Sender<Result>>>,
}
impl OutgoingMessageSender {
pub(crate) fn new(sender: mpsc::Sender<OutgoingMessage>) -> Self {
Self {
next_request_id: AtomicI64::new(0),
sender,
request_id_to_callback: Mutex::new(HashMap::new()),
}
}
pub(crate) async fn send_request(
&self,
request: ServerRequestPayload,
) -> oneshot::Receiver<Result> {
let id = RequestId::Integer(self.next_request_id.fetch_add(1, Ordering::Relaxed));
let outgoing_message_id = id.clone();
let (tx_approve, rx_approve) = oneshot::channel();
{
let mut request_id_to_callback = self.request_id_to_callback.lock().await;
request_id_to_callback.insert(id, tx_approve);
}
let outgoing_message =
OutgoingMessage::Request(request.request_with_id(outgoing_message_id.clone()));
if let Err(err) = self.sender.send(outgoing_message).await {
warn!("failed to send request {outgoing_message_id:?} to client: {err:?}");
let mut request_id_to_callback = self.request_id_to_callback.lock().await;
request_id_to_callback.remove(&outgoing_message_id);
}
rx_approve
}
pub(crate) async fn notify_client_response(&self, id: RequestId, result: Result) {
let entry = {
let mut request_id_to_callback = self.request_id_to_callback.lock().await;
request_id_to_callback.remove_entry(&id)
};
match entry {
Some((id, sender)) => {
if let Err(err) = sender.send(result) {
warn!("could not notify callback for {id:?} due to: {err:?}");
}
}
None => {
warn!("could not find callback for {id:?}");
}
}
}
pub(crate) async fn send_response<T: Serialize>(&self, id: RequestId, response: T) {
match serde_json::to_value(response) {
Ok(result) => {
let outgoing_message = OutgoingMessage::Response(OutgoingResponse { id, result });
if let Err(err) = self.sender.send(outgoing_message).await {
warn!("failed to send response to client: {err:?}");
}
}
Err(err) => {
self.send_error(
id,
JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to serialize response: {err}"),
data: None,
},
)
.await;
}
}
}
pub(crate) async fn send_server_notification(&self, notification: ServerNotification) {
if let Err(err) = self
.sender
.send(OutgoingMessage::AppServerNotification(notification))
.await
{
warn!("failed to send server notification to client: {err:?}");
}
}
/// All notifications should be migrated to [`ServerNotification`] and
/// [`OutgoingMessage::Notification`] should be removed.
pub(crate) async fn send_notification(&self, notification: OutgoingNotification) {
let outgoing_message = OutgoingMessage::Notification(notification);
if let Err(err) = self.sender.send(outgoing_message).await {
warn!("failed to send notification to client: {err:?}");
}
}
pub(crate) async fn send_error(&self, id: RequestId, error: JSONRPCErrorError) {
let outgoing_message = OutgoingMessage::Error(OutgoingError { id, error });
if let Err(err) = self.sender.send(outgoing_message).await {
warn!("failed to send error to client: {err:?}");
}
}
}
/// Outgoing message from the server to the client.
#[derive(Debug, Clone, Serialize)]
#[serde(untagged)]
pub(crate) enum OutgoingMessage {
Request(ServerRequest),
Notification(OutgoingNotification),
/// AppServerNotification is specific to the case where this is run as an
/// "app server" as opposed to an MCP server.
AppServerNotification(ServerNotification),
Response(OutgoingResponse),
Error(OutgoingError),
}
#[derive(Debug, Clone, PartialEq, Serialize)]
pub(crate) struct OutgoingNotification {
pub method: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub params: Option<serde_json::Value>,
}
#[derive(Debug, Clone, PartialEq, Serialize)]
pub(crate) struct OutgoingResponse {
pub id: RequestId,
pub result: Result,
}
#[derive(Debug, Clone, PartialEq, Serialize)]
pub(crate) struct OutgoingError {
pub error: JSONRPCErrorError,
pub id: RequestId,
}
#[cfg(test)]
mod tests {
use codex_app_server_protocol::AccountLoginCompletedNotification;
use codex_app_server_protocol::AccountRateLimitsUpdatedNotification;
use codex_app_server_protocol::AccountUpdatedNotification;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::LoginChatGptCompleteNotification;
use codex_app_server_protocol::RateLimitSnapshot;
use codex_app_server_protocol::RateLimitWindow;
use pretty_assertions::assert_eq;
use serde_json::json;
use uuid::Uuid;
use super::*;
#[test]
fn verify_server_notification_serialization() {
let notification =
ServerNotification::LoginChatGptComplete(LoginChatGptCompleteNotification {
login_id: Uuid::nil(),
success: true,
error: None,
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);
assert_eq!(
json!({
"method": "loginChatGptComplete",
"params": {
"loginId": Uuid::nil(),
"success": true,
"error": null,
},
}),
serde_json::to_value(jsonrpc_notification)
.expect("ensure the strum macros serialize the method field correctly"),
"ensure the strum macros serialize the method field correctly"
);
}
#[test]
fn verify_account_login_completed_notification_serialization() {
let notification =
ServerNotification::AccountLoginCompleted(AccountLoginCompletedNotification {
login_id: Some(Uuid::nil().to_string()),
success: true,
error: None,
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);
assert_eq!(
json!({
"method": "account/login/completed",
"params": {
"loginId": Uuid::nil().to_string(),
"success": true,
"error": null,
},
}),
serde_json::to_value(jsonrpc_notification)
.expect("ensure the notification serializes correctly"),
"ensure the notification serializes correctly"
);
}
#[test]
fn verify_account_rate_limits_notification_serialization() {
let notification =
ServerNotification::AccountRateLimitsUpdated(AccountRateLimitsUpdatedNotification {
rate_limits: RateLimitSnapshot {
primary: Some(RateLimitWindow {
used_percent: 25,
window_duration_mins: Some(15),
resets_at: Some(123),
}),
secondary: None,
credits: None,
},
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);
assert_eq!(
json!({
"method": "account/rateLimits/updated",
"params": {
"rateLimits": {
"primary": {
"usedPercent": 25,
"windowDurationMins": 15,
"resetsAt": 123
},
"secondary": null,
"credits": null
}
},
}),
serde_json::to_value(jsonrpc_notification)
.expect("ensure the notification serializes correctly"),
"ensure the notification serializes correctly"
);
}
#[test]
fn verify_account_updated_notification_serialization() {
let notification = ServerNotification::AccountUpdated(AccountUpdatedNotification {
auth_mode: Some(AuthMode::ApiKey),
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);
assert_eq!(
json!({
"method": "account/updated",
"params": {
"authMode": "apikey"
},
}),
serde_json::to_value(jsonrpc_notification)
.expect("ensure the notification serializes correctly"),
"ensure the notification serializes correctly"
);
}
}

View File

@@ -1,29 +0,0 @@
[package]
name = "app_test_support"
version.workspace = true
edition.workspace = true
license.workspace = true
[lib]
path = "lib.rs"
[dependencies]
anyhow = { workspace = true }
assert_cmd = { workspace = true }
base64 = { workspace = true }
chrono = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-core = { workspace = true }
codex-protocol = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true, features = [
"io-std",
"macros",
"process",
"rt-multi-thread",
] }
uuid = { workspace = true }
wiremock = { workspace = true }
core_test_support = { path = "../../../core/tests/common" }
shlex = { workspace = true }

View File

@@ -1,135 +0,0 @@
use std::path::Path;
use anyhow::Context;
use anyhow::Result;
use base64::Engine;
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
use chrono::DateTime;
use chrono::Utc;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::auth::AuthDotJson;
use codex_core::auth::save_auth;
use codex_core::token_data::TokenData;
use codex_core::token_data::parse_id_token;
use serde_json::json;
/// Builder for writing a fake ChatGPT auth.json in tests.
#[derive(Debug, Clone)]
pub struct ChatGptAuthFixture {
access_token: String,
refresh_token: String,
account_id: Option<String>,
claims: ChatGptIdTokenClaims,
last_refresh: Option<Option<DateTime<Utc>>>,
}
impl ChatGptAuthFixture {
pub fn new(access_token: impl Into<String>) -> Self {
Self {
access_token: access_token.into(),
refresh_token: "refresh-token".to_string(),
account_id: None,
claims: ChatGptIdTokenClaims::default(),
last_refresh: None,
}
}
pub fn refresh_token(mut self, refresh_token: impl Into<String>) -> Self {
self.refresh_token = refresh_token.into();
self
}
pub fn account_id(mut self, account_id: impl Into<String>) -> Self {
self.account_id = Some(account_id.into());
self
}
pub fn plan_type(mut self, plan_type: impl Into<String>) -> Self {
self.claims.plan_type = Some(plan_type.into());
self
}
pub fn email(mut self, email: impl Into<String>) -> Self {
self.claims.email = Some(email.into());
self
}
pub fn last_refresh(mut self, last_refresh: Option<DateTime<Utc>>) -> Self {
self.last_refresh = Some(last_refresh);
self
}
pub fn claims(mut self, claims: ChatGptIdTokenClaims) -> Self {
self.claims = claims;
self
}
}
#[derive(Debug, Clone, Default)]
pub struct ChatGptIdTokenClaims {
pub email: Option<String>,
pub plan_type: Option<String>,
}
impl ChatGptIdTokenClaims {
pub fn new() -> Self {
Self::default()
}
pub fn email(mut self, email: impl Into<String>) -> Self {
self.email = Some(email.into());
self
}
pub fn plan_type(mut self, plan_type: impl Into<String>) -> Self {
self.plan_type = Some(plan_type.into());
self
}
}
pub fn encode_id_token(claims: &ChatGptIdTokenClaims) -> Result<String> {
let header = json!({ "alg": "none", "typ": "JWT" });
let mut payload = serde_json::Map::new();
if let Some(email) = &claims.email {
payload.insert("email".to_string(), json!(email));
}
if let Some(plan_type) = &claims.plan_type {
payload.insert(
"https://api.openai.com/auth".to_string(),
json!({ "chatgpt_plan_type": plan_type }),
);
}
let payload = serde_json::Value::Object(payload);
let header_b64 =
URL_SAFE_NO_PAD.encode(serde_json::to_vec(&header).context("serialize jwt header")?);
let payload_b64 =
URL_SAFE_NO_PAD.encode(serde_json::to_vec(&payload).context("serialize jwt payload")?);
let signature_b64 = URL_SAFE_NO_PAD.encode(b"signature");
Ok(format!("{header_b64}.{payload_b64}.{signature_b64}"))
}
pub fn write_chatgpt_auth(
codex_home: &Path,
fixture: ChatGptAuthFixture,
cli_auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> Result<()> {
let id_token_raw = encode_id_token(&fixture.claims)?;
let id_token = parse_id_token(&id_token_raw).context("parse id token")?;
let tokens = TokenData {
id_token,
access_token: fixture.access_token,
refresh_token: fixture.refresh_token,
account_id: fixture.account_id,
};
let last_refresh = fixture.last_refresh.unwrap_or_else(|| Some(Utc::now()));
let auth = AuthDotJson {
openai_api_key: None,
tokens: Some(tokens),
last_refresh,
};
save_auth(codex_home, &auth, cli_auth_credentials_store_mode).context("write auth.json")
}

View File

@@ -1,28 +0,0 @@
mod auth_fixtures;
mod mcp_process;
mod mock_model_server;
mod responses;
mod rollout;
pub use auth_fixtures::ChatGptAuthFixture;
pub use auth_fixtures::ChatGptIdTokenClaims;
pub use auth_fixtures::encode_id_token;
pub use auth_fixtures::write_chatgpt_auth;
use codex_app_server_protocol::JSONRPCResponse;
pub use core_test_support::format_with_current_shell;
pub use core_test_support::format_with_current_shell_display;
pub use mcp_process::McpProcess;
pub use mock_model_server::create_mock_chat_completions_server;
pub use mock_model_server::create_mock_chat_completions_server_unchecked;
pub use responses::create_apply_patch_sse_response;
pub use responses::create_exec_command_sse_response;
pub use responses::create_final_assistant_message_sse_response;
pub use responses::create_shell_command_sse_response;
pub use rollout::create_fake_rollout;
use serde::de::DeserializeOwned;
pub fn to_response<T: DeserializeOwned>(response: JSONRPCResponse) -> anyhow::Result<T> {
let value = serde_json::to_value(response.result)?;
let codex_response = serde_json::from_value(value)?;
Ok(codex_response)
}

View File

@@ -1,668 +0,0 @@
use std::collections::VecDeque;
use std::path::Path;
use std::process::Stdio;
use std::sync::atomic::AtomicI64;
use std::sync::atomic::Ordering;
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncWriteExt;
use tokio::io::BufReader;
use tokio::process::Child;
use tokio::process::ChildStdin;
use tokio::process::ChildStdout;
use anyhow::Context;
use assert_cmd::prelude::*;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::ArchiveConversationParams;
use codex_app_server_protocol::CancelLoginAccountParams;
use codex_app_server_protocol::CancelLoginChatGptParams;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::ClientNotification;
use codex_app_server_protocol::ConfigBatchWriteParams;
use codex_app_server_protocol::ConfigReadParams;
use codex_app_server_protocol::ConfigValueWriteParams;
use codex_app_server_protocol::FeedbackUploadParams;
use codex_app_server_protocol::GetAccountParams;
use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::InitializeParams;
use codex_app_server_protocol::InterruptConversationParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::NewConversationParams;
use codex_app_server_protocol::RemoveConversationListenerParams;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ResumeConversationParams;
use codex_app_server_protocol::ReviewStartParams;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserTurnParams;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::SetDefaultModelParams;
use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnStartParams;
use std::process::Command as StdCommand;
use tokio::process::Command;
pub struct McpProcess {
next_request_id: AtomicI64,
/// Retain this child process until the client is dropped. The Tokio runtime
/// will make a "best effort" to reap the process after it exits, but it is
/// not a guarantee. See the `kill_on_drop` documentation for details.
#[allow(dead_code)]
process: Child,
stdin: ChildStdin,
stdout: BufReader<ChildStdout>,
pending_user_messages: VecDeque<JSONRPCNotification>,
}
impl McpProcess {
pub async fn new(codex_home: &Path) -> anyhow::Result<Self> {
Self::new_with_env(codex_home, &[]).await
}
/// Creates a new MCP process, allowing tests to override or remove
/// specific environment variables for the child process only.
///
/// Pass a tuple of (key, Some(value)) to set/override, or (key, None) to
/// remove a variable from the child's environment.
pub async fn new_with_env(
codex_home: &Path,
env_overrides: &[(&str, Option<&str>)],
) -> anyhow::Result<Self> {
// Use assert_cmd to locate the binary path and then switch to tokio::process::Command
let std_cmd = StdCommand::cargo_bin("codex-app-server")
.context("should find binary for codex-mcp-server")?;
let program = std_cmd.get_program().to_owned();
let mut cmd = Command::new(program);
cmd.stdin(Stdio::piped());
cmd.stdout(Stdio::piped());
cmd.stderr(Stdio::piped());
cmd.env("CODEX_HOME", codex_home);
cmd.env("RUST_LOG", "debug");
for (k, v) in env_overrides {
match v {
Some(val) => {
cmd.env(k, val);
}
None => {
cmd.env_remove(k);
}
}
}
let mut process = cmd
.kill_on_drop(true)
.spawn()
.context("codex-mcp-server proc should start")?;
let stdin = process
.stdin
.take()
.ok_or_else(|| anyhow::format_err!("mcp should have stdin fd"))?;
let stdout = process
.stdout
.take()
.ok_or_else(|| anyhow::format_err!("mcp should have stdout fd"))?;
let stdout = BufReader::new(stdout);
// Forward child's stderr to our stderr so failures are visible even
// when stdout/stderr are captured by the test harness.
if let Some(stderr) = process.stderr.take() {
let mut stderr_reader = BufReader::new(stderr).lines();
tokio::spawn(async move {
while let Ok(Some(line)) = stderr_reader.next_line().await {
eprintln!("[mcp stderr] {line}");
}
});
}
Ok(Self {
next_request_id: AtomicI64::new(0),
process,
stdin,
stdout,
pending_user_messages: VecDeque::new(),
})
}
/// Performs the initialization handshake with the MCP server.
pub async fn initialize(&mut self) -> anyhow::Result<()> {
let params = Some(serde_json::to_value(InitializeParams {
client_info: ClientInfo {
name: "codex-app-server-tests".to_string(),
title: None,
version: "0.1.0".to_string(),
},
})?);
let req_id = self.send_request("initialize", params).await?;
let initialized = self.read_jsonrpc_message().await?;
let JSONRPCMessage::Response(response) = initialized else {
unreachable!("expected JSONRPCMessage::Response for initialize, got {initialized:?}");
};
if response.id != RequestId::Integer(req_id) {
anyhow::bail!(
"initialize response id mismatch: expected {}, got {:?}",
req_id,
response.id
);
}
// Send notifications/initialized to ack the response.
self.send_notification(ClientNotification::Initialized)
.await?;
Ok(())
}
/// Send a `newConversation` JSON-RPC request.
pub async fn send_new_conversation_request(
&mut self,
params: NewConversationParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("newConversation", params).await
}
/// Send an `archiveConversation` JSON-RPC request.
pub async fn send_archive_conversation_request(
&mut self,
params: ArchiveConversationParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("archiveConversation", params).await
}
/// Send an `addConversationListener` JSON-RPC request.
pub async fn send_add_conversation_listener_request(
&mut self,
params: AddConversationListenerParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("addConversationListener", params).await
}
/// Send a `sendUserMessage` JSON-RPC request with a single text item.
pub async fn send_send_user_message_request(
&mut self,
params: SendUserMessageParams,
) -> anyhow::Result<i64> {
// Wire format expects variants in camelCase; text item uses external tagging.
let params = Some(serde_json::to_value(params)?);
self.send_request("sendUserMessage", params).await
}
/// Send a `removeConversationListener` JSON-RPC request.
pub async fn send_remove_conversation_listener_request(
&mut self,
params: RemoveConversationListenerParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("removeConversationListener", params)
.await
}
/// Send a `sendUserTurn` JSON-RPC request.
pub async fn send_send_user_turn_request(
&mut self,
params: SendUserTurnParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("sendUserTurn", params).await
}
/// Send a `interruptConversation` JSON-RPC request.
pub async fn send_interrupt_conversation_request(
&mut self,
params: InterruptConversationParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("interruptConversation", params).await
}
/// Send a `getAuthStatus` JSON-RPC request.
pub async fn send_get_auth_status_request(
&mut self,
params: GetAuthStatusParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("getAuthStatus", params).await
}
/// Send a `getUserSavedConfig` JSON-RPC request.
pub async fn send_get_user_saved_config_request(&mut self) -> anyhow::Result<i64> {
self.send_request("getUserSavedConfig", None).await
}
/// Send a `getUserAgent` JSON-RPC request.
pub async fn send_get_user_agent_request(&mut self) -> anyhow::Result<i64> {
self.send_request("getUserAgent", None).await
}
/// Send an `account/rateLimits/read` JSON-RPC request.
pub async fn send_get_account_rate_limits_request(&mut self) -> anyhow::Result<i64> {
self.send_request("account/rateLimits/read", None).await
}
/// Send an `account/read` JSON-RPC request.
pub async fn send_get_account_request(
&mut self,
params: GetAccountParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("account/read", params).await
}
/// Send a `feedback/upload` JSON-RPC request.
pub async fn send_feedback_upload_request(
&mut self,
params: FeedbackUploadParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("feedback/upload", params).await
}
/// Send a `userInfo` JSON-RPC request.
pub async fn send_user_info_request(&mut self) -> anyhow::Result<i64> {
self.send_request("userInfo", None).await
}
/// Send a `setDefaultModel` JSON-RPC request.
pub async fn send_set_default_model_request(
&mut self,
params: SetDefaultModelParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("setDefaultModel", params).await
}
/// Send a `listConversations` JSON-RPC request.
pub async fn send_list_conversations_request(
&mut self,
params: ListConversationsParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("listConversations", params).await
}
/// Send a `thread/start` JSON-RPC request.
pub async fn send_thread_start_request(
&mut self,
params: ThreadStartParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/start", params).await
}
/// Send a `thread/resume` JSON-RPC request.
pub async fn send_thread_resume_request(
&mut self,
params: ThreadResumeParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/resume", params).await
}
/// Send a `thread/archive` JSON-RPC request.
pub async fn send_thread_archive_request(
&mut self,
params: ThreadArchiveParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/archive", params).await
}
/// Send a `thread/list` JSON-RPC request.
pub async fn send_thread_list_request(
&mut self,
params: ThreadListParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/list", params).await
}
/// Send a `model/list` JSON-RPC request.
pub async fn send_list_models_request(
&mut self,
params: ModelListParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("model/list", params).await
}
/// Send a `resumeConversation` JSON-RPC request.
pub async fn send_resume_conversation_request(
&mut self,
params: ResumeConversationParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("resumeConversation", params).await
}
/// Send a `loginApiKey` JSON-RPC request.
pub async fn send_login_api_key_request(
&mut self,
params: LoginApiKeyParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("loginApiKey", params).await
}
/// Send a `loginChatGpt` JSON-RPC request.
pub async fn send_login_chat_gpt_request(&mut self) -> anyhow::Result<i64> {
self.send_request("loginChatGpt", None).await
}
/// Send a `turn/start` JSON-RPC request (v2).
pub async fn send_turn_start_request(
&mut self,
params: TurnStartParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("turn/start", params).await
}
/// Send a `turn/interrupt` JSON-RPC request (v2).
pub async fn send_turn_interrupt_request(
&mut self,
params: TurnInterruptParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("turn/interrupt", params).await
}
/// Send a `review/start` JSON-RPC request (v2).
pub async fn send_review_start_request(
&mut self,
params: ReviewStartParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("review/start", params).await
}
/// Send a `cancelLoginChatGpt` JSON-RPC request.
pub async fn send_cancel_login_chat_gpt_request(
&mut self,
params: CancelLoginChatGptParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("cancelLoginChatGpt", params).await
}
/// Send a `logoutChatGpt` JSON-RPC request.
pub async fn send_logout_chat_gpt_request(&mut self) -> anyhow::Result<i64> {
self.send_request("logoutChatGpt", None).await
}
pub async fn send_config_read_request(
&mut self,
params: ConfigReadParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("config/read", params).await
}
pub async fn send_config_value_write_request(
&mut self,
params: ConfigValueWriteParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("config/value/write", params).await
}
pub async fn send_config_batch_write_request(
&mut self,
params: ConfigBatchWriteParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("config/batchWrite", params).await
}
/// Send an `account/logout` JSON-RPC request.
pub async fn send_logout_account_request(&mut self) -> anyhow::Result<i64> {
self.send_request("account/logout", None).await
}
/// Send an `account/login/start` JSON-RPC request for API key login.
pub async fn send_login_account_api_key_request(
&mut self,
api_key: &str,
) -> anyhow::Result<i64> {
let params = serde_json::json!({
"type": "apiKey",
"apiKey": api_key,
});
self.send_request("account/login/start", Some(params)).await
}
/// Send an `account/login/start` JSON-RPC request for ChatGPT login.
pub async fn send_login_account_chatgpt_request(&mut self) -> anyhow::Result<i64> {
let params = serde_json::json!({
"type": "chatgpt"
});
self.send_request("account/login/start", Some(params)).await
}
/// Send an `account/login/cancel` JSON-RPC request.
pub async fn send_cancel_login_account_request(
&mut self,
params: CancelLoginAccountParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("account/login/cancel", params).await
}
/// Send a `fuzzyFileSearch` JSON-RPC request.
pub async fn send_fuzzy_file_search_request(
&mut self,
query: &str,
roots: Vec<String>,
cancellation_token: Option<String>,
) -> anyhow::Result<i64> {
let mut params = serde_json::json!({
"query": query,
"roots": roots,
});
if let Some(token) = cancellation_token {
params["cancellationToken"] = serde_json::json!(token);
}
self.send_request("fuzzyFileSearch", Some(params)).await
}
async fn send_request(
&mut self,
method: &str,
params: Option<serde_json::Value>,
) -> anyhow::Result<i64> {
let request_id = self.next_request_id.fetch_add(1, Ordering::Relaxed);
let message = JSONRPCMessage::Request(JSONRPCRequest {
id: RequestId::Integer(request_id),
method: method.to_string(),
params,
});
self.send_jsonrpc_message(message).await?;
Ok(request_id)
}
pub async fn send_response(
&mut self,
id: RequestId,
result: serde_json::Value,
) -> anyhow::Result<()> {
self.send_jsonrpc_message(JSONRPCMessage::Response(JSONRPCResponse { id, result }))
.await
}
pub async fn send_notification(
&mut self,
notification: ClientNotification,
) -> anyhow::Result<()> {
let value = serde_json::to_value(notification)?;
self.send_jsonrpc_message(JSONRPCMessage::Notification(JSONRPCNotification {
method: value
.get("method")
.and_then(|m| m.as_str())
.ok_or_else(|| anyhow::format_err!("notification missing method field"))?
.to_string(),
params: value.get("params").cloned(),
}))
.await
}
async fn send_jsonrpc_message(&mut self, message: JSONRPCMessage) -> anyhow::Result<()> {
eprintln!("writing message to stdin: {message:?}");
let payload = serde_json::to_string(&message)?;
self.stdin.write_all(payload.as_bytes()).await?;
self.stdin.write_all(b"\n").await?;
self.stdin.flush().await?;
Ok(())
}
async fn read_jsonrpc_message(&mut self) -> anyhow::Result<JSONRPCMessage> {
let mut line = String::new();
self.stdout.read_line(&mut line).await?;
let message = serde_json::from_str::<JSONRPCMessage>(&line)?;
eprintln!("read message from stdout: {message:?}");
Ok(message)
}
pub async fn read_stream_until_request_message(&mut self) -> anyhow::Result<ServerRequest> {
eprintln!("in read_stream_until_request_message()");
loop {
let message = self.read_jsonrpc_message().await?;
match message {
JSONRPCMessage::Notification(notification) => {
eprintln!("notification: {notification:?}");
self.enqueue_user_message(notification);
}
JSONRPCMessage::Request(jsonrpc_request) => {
return jsonrpc_request.try_into().with_context(
|| "failed to deserialize ServerRequest from JSONRPCRequest",
);
}
JSONRPCMessage::Error(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Error: {message:?}");
}
JSONRPCMessage::Response(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Response: {message:?}");
}
}
}
}
pub async fn read_stream_until_response_message(
&mut self,
request_id: RequestId,
) -> anyhow::Result<JSONRPCResponse> {
eprintln!("in read_stream_until_response_message({request_id:?})");
loop {
let message = self.read_jsonrpc_message().await?;
match message {
JSONRPCMessage::Notification(notification) => {
eprintln!("notification: {notification:?}");
self.enqueue_user_message(notification);
}
JSONRPCMessage::Request(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Request: {message:?}");
}
JSONRPCMessage::Error(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Error: {message:?}");
}
JSONRPCMessage::Response(jsonrpc_response) => {
if jsonrpc_response.id == request_id {
return Ok(jsonrpc_response);
}
}
}
}
}
pub async fn read_stream_until_error_message(
&mut self,
request_id: RequestId,
) -> anyhow::Result<JSONRPCError> {
loop {
let message = self.read_jsonrpc_message().await?;
match message {
JSONRPCMessage::Notification(notification) => {
eprintln!("notification: {notification:?}");
self.enqueue_user_message(notification);
}
JSONRPCMessage::Request(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Request: {message:?}");
}
JSONRPCMessage::Response(_) => {
// Keep scanning; we're waiting for an error with matching id.
}
JSONRPCMessage::Error(err) => {
if err.id == request_id {
return Ok(err);
}
}
}
}
}
pub async fn read_stream_until_notification_message(
&mut self,
method: &str,
) -> anyhow::Result<JSONRPCNotification> {
eprintln!("in read_stream_until_notification_message({method})");
if let Some(notification) = self.take_pending_notification_by_method(method) {
return Ok(notification);
}
loop {
let message = self.read_jsonrpc_message().await?;
match message {
JSONRPCMessage::Notification(notification) => {
if notification.method == method {
return Ok(notification);
}
self.enqueue_user_message(notification);
}
JSONRPCMessage::Request(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Request: {message:?}");
}
JSONRPCMessage::Error(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Error: {message:?}");
}
JSONRPCMessage::Response(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Response: {message:?}");
}
}
}
}
fn take_pending_notification_by_method(&mut self, method: &str) -> Option<JSONRPCNotification> {
if let Some(pos) = self
.pending_user_messages
.iter()
.position(|notification| notification.method == method)
{
return self.pending_user_messages.remove(pos);
}
None
}
fn enqueue_user_message(&mut self, notification: JSONRPCNotification) {
if notification.method == "codex/event/user_message" {
self.pending_user_messages.push_back(notification);
}
}
}

View File

@@ -1,66 +0,0 @@
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::Respond;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
/// Create a mock server that will provide the responses, in order, for
/// requests to the `/v1/chat/completions` endpoint.
pub async fn create_mock_chat_completions_server(responses: Vec<String>) -> MockServer {
let server = MockServer::start().await;
let num_calls = responses.len();
let seq_responder = SeqResponder {
num_calls: AtomicUsize::new(0),
responses,
};
Mock::given(method("POST"))
.and(path("/v1/chat/completions"))
.respond_with(seq_responder)
.expect(num_calls as u64)
.mount(&server)
.await;
server
}
/// Same as `create_mock_chat_completions_server` but does not enforce an
/// expectation on the number of calls.
pub async fn create_mock_chat_completions_server_unchecked(responses: Vec<String>) -> MockServer {
let server = MockServer::start().await;
let seq_responder = SeqResponder {
num_calls: AtomicUsize::new(0),
responses,
};
Mock::given(method("POST"))
.and(path("/v1/chat/completions"))
.respond_with(seq_responder)
.mount(&server)
.await;
server
}
struct SeqResponder {
num_calls: AtomicUsize,
responses: Vec<String>,
}
impl Respond for SeqResponder {
fn respond(&self, _: &wiremock::Request) -> ResponseTemplate {
let call_num = self.num_calls.fetch_add(1, Ordering::SeqCst);
match self.responses.get(call_num) {
Some(response) => ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(response.clone(), "text/event-stream"),
None => panic!("no response for {call_num}"),
}
}
}

View File

@@ -1,135 +0,0 @@
use serde_json::json;
use std::path::Path;
pub fn create_shell_command_sse_response(
command: Vec<String>,
workdir: Option<&Path>,
timeout_ms: Option<u64>,
call_id: &str,
) -> anyhow::Result<String> {
// The `arguments` for the `shell_command` tool is a serialized JSON object.
let command_str = shlex::try_join(command.iter().map(String::as_str))?;
let tool_call_arguments = serde_json::to_string(&json!({
"command": command_str,
"workdir": workdir.map(|w| w.to_string_lossy()),
"timeout_ms": timeout_ms
}))?;
let tool_call = json!({
"choices": [
{
"delta": {
"tool_calls": [
{
"id": call_id,
"function": {
"name": "shell_command",
"arguments": tool_call_arguments
}
}
]
},
"finish_reason": "tool_calls"
}
]
});
let sse = format!(
"data: {}\n\ndata: DONE\n\n",
serde_json::to_string(&tool_call)?
);
Ok(sse)
}
pub fn create_final_assistant_message_sse_response(message: &str) -> anyhow::Result<String> {
let assistant_message = json!({
"choices": [
{
"delta": {
"content": message
},
"finish_reason": "stop"
}
]
});
let sse = format!(
"data: {}\n\ndata: DONE\n\n",
serde_json::to_string(&assistant_message)?
);
Ok(sse)
}
pub fn create_apply_patch_sse_response(
patch_content: &str,
call_id: &str,
) -> anyhow::Result<String> {
// Use shell_command to call apply_patch with heredoc format
let command = format!("apply_patch <<'EOF'\n{patch_content}\nEOF");
let tool_call_arguments = serde_json::to_string(&json!({
"command": command
}))?;
let tool_call = json!({
"choices": [
{
"delta": {
"tool_calls": [
{
"id": call_id,
"function": {
"name": "shell_command",
"arguments": tool_call_arguments
}
}
]
},
"finish_reason": "tool_calls"
}
]
});
let sse = format!(
"data: {}\n\ndata: DONE\n\n",
serde_json::to_string(&tool_call)?
);
Ok(sse)
}
pub fn create_exec_command_sse_response(call_id: &str) -> anyhow::Result<String> {
let (cmd, args) = if cfg!(windows) {
("cmd.exe", vec!["/d", "/c", "echo hi"])
} else {
("/bin/sh", vec!["-c", "echo hi"])
};
let command = std::iter::once(cmd.to_string())
.chain(args.into_iter().map(str::to_string))
.collect::<Vec<_>>();
let tool_call_arguments = serde_json::to_string(&json!({
"cmd": command.join(" "),
"yield_time_ms": 500
}))?;
let tool_call = json!({
"choices": [
{
"delta": {
"tool_calls": [
{
"id": call_id,
"function": {
"name": "exec_command",
"arguments": tool_call_arguments
}
}
]
},
"finish_reason": "tool_calls"
}
]
});
let sse = format!(
"data: {}\n\ndata: DONE\n\n",
serde_json::to_string(&tool_call)?
);
Ok(sse)
}

View File

@@ -1,89 +0,0 @@
use anyhow::Result;
use codex_protocol::ConversationId;
use codex_protocol::protocol::GitInfo;
use codex_protocol::protocol::SessionMeta;
use codex_protocol::protocol::SessionMetaLine;
use codex_protocol::protocol::SessionSource;
use serde_json::json;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use uuid::Uuid;
/// Create a minimal rollout file under `CODEX_HOME/sessions/YYYY/MM/DD/`.
///
/// - `filename_ts` is the filename timestamp component in `YYYY-MM-DDThh-mm-ss` format.
/// - `meta_rfc3339` is the envelope timestamp used in JSON lines.
/// - `preview` is the user message preview text.
/// - `model_provider` optionally sets the provider in the session meta payload.
///
/// Returns the generated conversation/session UUID as a string.
pub fn create_fake_rollout(
codex_home: &Path,
filename_ts: &str,
meta_rfc3339: &str,
preview: &str,
model_provider: Option<&str>,
git_info: Option<GitInfo>,
) -> Result<String> {
let uuid = Uuid::new_v4();
let uuid_str = uuid.to_string();
let conversation_id = ConversationId::from_string(&uuid_str)?;
// sessions/YYYY/MM/DD derived from filename_ts (YYYY-MM-DDThh-mm-ss)
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
let dir = codex_home.join("sessions").join(year).join(month).join(day);
fs::create_dir_all(&dir)?;
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
// Build JSONL lines
let meta = SessionMeta {
id: conversation_id,
timestamp: meta_rfc3339.to_string(),
cwd: PathBuf::from("/"),
originator: "codex".to_string(),
cli_version: "0.0.0".to_string(),
instructions: None,
source: SessionSource::Cli,
model_provider: model_provider.map(str::to_string),
};
let payload = serde_json::to_value(SessionMetaLine {
meta,
git: git_info,
})?;
let lines = [
json!({
"timestamp": meta_rfc3339,
"type": "session_meta",
"payload": payload
})
.to_string(),
json!({
"timestamp": meta_rfc3339,
"type":"response_item",
"payload": {
"type":"message",
"role":"user",
"content":[{"type":"input_text","text": preview}]
}
})
.to_string(),
json!({
"timestamp": meta_rfc3339,
"type":"event_msg",
"payload": {
"type":"user_message",
"message": preview,
"kind": "plain"
}
})
.to_string(),
];
fs::write(file_path, lines.join("\n") + "\n")?;
Ok(uuid_str)
}

View File

@@ -1,230 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::GetAuthStatusResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::LoginApiKeyResponse;
use codex_app_server_protocol::RequestId;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
fn create_config_toml_custom_provider(
codex_home: &Path,
requires_openai_auth: bool,
) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
let requires_line = if requires_openai_auth {
"requires_openai_auth = true\n"
} else {
""
};
let contents = format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "http://127.0.0.1:0/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
{requires_line}
"#
);
std::fs::write(config_toml, contents)
}
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
"#,
)
}
fn create_config_toml_forced_login(codex_home: &Path, forced_method: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
let contents = format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
forced_login_method = "{forced_method}"
"#
);
std::fs::write(config_toml, contents)
}
async fn login_with_api_key_via_request(mcp: &mut McpProcess, api_key: &str) -> Result<()> {
let request_id = mcp
.send_login_api_key_request(LoginApiKeyParams {
api_key: api_key.to_string(),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let _: LoginApiKeyResponse = to_response(resp)?;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_no_auth() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_get_auth_status_request(GetAuthStatusParams {
include_token: Some(true),
refresh_token: Some(false),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let status: GetAuthStatusResponse = to_response(resp)?;
assert_eq!(status.auth_method, None, "expected no auth method");
assert_eq!(status.auth_token, None, "expected no token");
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_with_api_key() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
login_with_api_key_via_request(&mut mcp, "sk-test-key").await?;
let request_id = mcp
.send_get_auth_status_request(GetAuthStatusParams {
include_token: Some(true),
refresh_token: Some(false),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let status: GetAuthStatusResponse = to_response(resp)?;
assert_eq!(status.auth_method, Some(AuthMode::ApiKey));
assert_eq!(status.auth_token, Some("sk-test-key".to_string()));
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_with_api_key_when_auth_not_required() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml_custom_provider(codex_home.path(), false)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
login_with_api_key_via_request(&mut mcp, "sk-test-key").await?;
let request_id = mcp
.send_get_auth_status_request(GetAuthStatusParams {
include_token: Some(true),
refresh_token: Some(false),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let status: GetAuthStatusResponse = to_response(resp)?;
assert_eq!(status.auth_method, None, "expected no auth method");
assert_eq!(status.auth_token, None, "expected no token");
assert_eq!(
status.requires_openai_auth,
Some(false),
"requires_openai_auth should be false",
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_with_api_key_no_include_token() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
login_with_api_key_via_request(&mut mcp, "sk-test-key").await?;
// Build params via struct so None field is omitted in wire JSON.
let params = GetAuthStatusParams {
include_token: None,
refresh_token: Some(false),
};
let request_id = mcp.send_get_auth_status_request(params).await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let status: GetAuthStatusResponse = to_response(resp)?;
assert_eq!(status.auth_method, Some(AuthMode::ApiKey));
assert!(status.auth_token.is_none(), "token must be omitted");
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn login_api_key_rejected_when_forced_chatgpt() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml_forced_login(codex_home.path(), "chatgpt")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_login_api_key_request(LoginApiKeyParams {
api_key: "sk-test-key".to_string(),
})
.await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(
err.error.message,
"API key login is disabled. Use ChatGPT login instead."
);
Ok(())
}

View File

@@ -1,508 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_shell_command_sse_response;
use app_test_support::format_with_current_shell;
use app_test_support::to_response;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::AddConversationSubscriptionResponse;
use codex_app_server_protocol::ExecCommandApprovalParams;
use codex_app_server_protocol::InputItem;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::NewConversationParams;
use codex_app_server_protocol::NewConversationResponse;
use codex_app_server_protocol::RemoveConversationListenerParams;
use codex_app_server_protocol::RemoveConversationSubscriptionResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use codex_app_server_protocol::SendUserTurnParams;
use codex_app_server_protocol::SendUserTurnResponse;
use codex_app_server_protocol::ServerRequest;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::SandboxPolicy;
use codex_core::protocol_config_types::ReasoningEffort;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;
use pretty_assertions::assert_eq;
use std::env;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return Ok(());
}
let tmp = TempDir::new()?;
// Temporary Codex home with config pointing at the mock server.
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
let working_directory = tmp.path().join("workdir");
std::fs::create_dir(&working_directory)?;
// Create a mock model server that immediately ends each turn.
// Two turns are expected: initial session configure + one user message.
let responses = vec![
create_shell_command_sse_response(
vec!["ls".to_string()],
Some(&working_directory),
Some(5000),
"call1234",
)?,
create_final_assistant_message_sse_response("Enjoy your new git repo!")?,
];
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri())?;
// Start MCP server and initialize.
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// 1) newConversation
let new_conv_id = mcp
.send_new_conversation_request(NewConversationParams {
cwd: Some(working_directory.to_string_lossy().into_owned()),
..Default::default()
})
.await?;
let new_conv_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
)
.await??;
let new_conv_resp = to_response::<NewConversationResponse>(new_conv_resp)?;
let NewConversationResponse {
conversation_id,
model,
reasoning_effort: _,
rollout_path: _,
} = new_conv_resp;
assert_eq!(model, "mock-model");
// 2) addConversationListener
let add_listener_id = mcp
.send_add_conversation_listener_request(AddConversationListenerParams {
conversation_id,
experimental_raw_events: false,
})
.await?;
let add_listener_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
)
.await??;
let AddConversationSubscriptionResponse { subscription_id } =
to_response::<AddConversationSubscriptionResponse>(add_listener_resp)?;
// 3) sendUserMessage (should trigger notifications; we only validate an OK response)
let send_user_id = mcp
.send_send_user_message_request(SendUserMessageParams {
conversation_id,
items: vec![codex_app_server_protocol::InputItem::Text {
text: "text".to_string(),
}],
})
.await?;
let send_user_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(send_user_id)),
)
.await??;
let SendUserMessageResponse {} = to_response::<SendUserMessageResponse>(send_user_resp)?;
// Verify the task_finished notification is received.
// Note this also ensures that the final request to the server was made.
let task_finished_notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
let serde_json::Value::Object(map) = task_finished_notification
.params
.expect("notification should have params")
else {
panic!("task_finished_notification should have params");
};
assert_eq!(
map.get("conversationId")
.expect("should have conversationId"),
&serde_json::Value::String(conversation_id.to_string())
);
// 4) removeConversationListener
let remove_listener_id = mcp
.send_remove_conversation_listener_request(RemoveConversationListenerParams {
subscription_id,
})
.await?;
let remove_listener_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(remove_listener_id)),
)
.await??;
let RemoveConversationSubscriptionResponse {} = to_response(remove_listener_resp)?;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return Ok(());
}
let tmp = TempDir::new()?;
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
let working_directory = tmp.path().join("workdir");
std::fs::create_dir(&working_directory)?;
// Mock server will request a python shell call for the first and second turn, then finish.
let responses = vec![
create_shell_command_sse_response(
vec![
"python3".to_string(),
"-c".to_string(),
"print(42)".to_string(),
],
Some(&working_directory),
Some(5000),
"call1",
)?,
create_final_assistant_message_sse_response("done 1")?,
create_shell_command_sse_response(
vec![
"python3".to_string(),
"-c".to_string(),
"print(42)".to_string(),
],
Some(&working_directory),
Some(5000),
"call2",
)?,
create_final_assistant_message_sse_response("done 2")?,
];
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri())?;
// Start MCP server and initialize.
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// 1) Start conversation with approval_policy=untrusted
let new_conv_id = mcp
.send_new_conversation_request(NewConversationParams {
cwd: Some(working_directory.to_string_lossy().into_owned()),
..Default::default()
})
.await?;
let new_conv_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
)
.await??;
let NewConversationResponse {
conversation_id, ..
} = to_response::<NewConversationResponse>(new_conv_resp)?;
// 2) addConversationListener
let add_listener_id = mcp
.send_add_conversation_listener_request(AddConversationListenerParams {
conversation_id,
experimental_raw_events: false,
})
.await?;
let _: AddConversationSubscriptionResponse = to_response::<AddConversationSubscriptionResponse>(
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
)
.await??,
)?;
// 3) sendUserMessage triggers a shell call; approval policy is Untrusted so we should get an elicitation
let send_user_id = mcp
.send_send_user_message_request(SendUserMessageParams {
conversation_id,
items: vec![codex_app_server_protocol::InputItem::Text {
text: "run python".to_string(),
}],
})
.await?;
let _send_user_resp: SendUserMessageResponse = to_response::<SendUserMessageResponse>(
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(send_user_id)),
)
.await??,
)?;
// Expect an ExecCommandApproval request (elicitation)
let request = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ExecCommandApproval { request_id, params } = request else {
panic!("expected ExecCommandApproval request, got: {request:?}");
};
assert_eq!(
ExecCommandApprovalParams {
conversation_id,
call_id: "call1".to_string(),
command: format_with_current_shell("python3 -c 'print(42)'"),
cwd: working_directory.clone(),
reason: None,
risk: None,
parsed_cmd: vec![ParsedCommand::Unknown {
cmd: "python3 -c 'print(42)'".to_string()
}],
},
params
);
// Approve so the first turn can complete
mcp.send_response(
request_id,
serde_json::json!({ "decision": codex_core::protocol::ReviewDecision::Approved }),
)
.await?;
// Wait for first TaskComplete
let _ = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
// 4) sendUserTurn with approval_policy=never should run without elicitation
let send_turn_id = mcp
.send_send_user_turn_request(SendUserTurnParams {
conversation_id,
items: vec![codex_app_server_protocol::InputItem::Text {
text: "run python again".to_string(),
}],
cwd: working_directory.clone(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::new_read_only_policy(),
model: "mock-model".to_string(),
effort: Some(ReasoningEffort::Medium),
summary: ReasoningSummary::Auto,
})
.await?;
// Acknowledge sendUserTurn
let _send_turn_resp: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id)),
)
.await??,
)?;
// Ensure we do NOT receive an ExecCommandApproval request before the task completes.
// If any Request is seen while waiting for task_complete, the helper will error and the test fails.
let _ = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
// Helper: minimal config.toml pointing at mock provider.
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<()> {
if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return Ok(());
}
let tmp = TempDir::new()?;
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
let workspace_root = tmp.path().join("workspace");
std::fs::create_dir(&workspace_root)?;
let first_cwd = workspace_root.join("turn1");
let second_cwd = workspace_root.join("turn2");
std::fs::create_dir(&first_cwd)?;
std::fs::create_dir(&second_cwd)?;
let responses = vec![
create_shell_command_sse_response(
vec!["echo".to_string(), "first".to_string(), "turn".to_string()],
None,
Some(5000),
"call-first",
)?,
create_final_assistant_message_sse_response("done first")?,
create_shell_command_sse_response(
vec!["echo".to_string(), "second".to_string(), "turn".to_string()],
None,
Some(5000),
"call-second",
)?,
create_final_assistant_message_sse_response("done second")?,
];
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri())?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let new_conv_id = mcp
.send_new_conversation_request(NewConversationParams {
cwd: Some(first_cwd.to_string_lossy().into_owned()),
approval_policy: Some(AskForApproval::Never),
sandbox: Some(SandboxMode::WorkspaceWrite),
..Default::default()
})
.await?;
let new_conv_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
)
.await??;
let NewConversationResponse {
conversation_id,
model,
..
} = to_response::<NewConversationResponse>(new_conv_resp)?;
let add_listener_id = mcp
.send_add_conversation_listener_request(AddConversationListenerParams {
conversation_id,
experimental_raw_events: false,
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
)
.await??;
let first_turn_id = mcp
.send_send_user_turn_request(SendUserTurnParams {
conversation_id,
items: vec![InputItem::Text {
text: "first turn".to_string(),
}],
cwd: first_cwd.clone(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::WorkspaceWrite {
writable_roots: vec![first_cwd.clone()],
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,
},
model: model.clone(),
effort: Some(ReasoningEffort::Medium),
summary: ReasoningSummary::Auto,
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
let second_turn_id = mcp
.send_send_user_turn_request(SendUserTurnParams {
conversation_id,
items: vec![InputItem::Text {
text: "second turn".to_string(),
}],
cwd: second_cwd.clone(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::DangerFullAccess,
model: model.clone(),
effort: Some(ReasoningEffort::Medium),
summary: ReasoningSummary::Auto,
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_turn_id)),
)
.await??;
let exec_begin_notification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/exec_command_begin"),
)
.await??;
let params = exec_begin_notification
.params
.clone()
.expect("exec_command_begin params");
let event: Event = serde_json::from_value(params).expect("deserialize exec begin event");
let exec_begin = match event.msg {
EventMsg::ExecCommandBegin(exec_begin) => exec_begin,
other => panic!("expected ExecCommandBegin event, got {other:?}"),
};
assert_eq!(
exec_begin.cwd, second_cwd,
"exec turn should run from updated cwd"
);
let expected_command = format_with_current_shell("echo second turn");
assert_eq!(
exec_begin.command, expected_command,
"exec turn should run expected command"
);
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "untrusted"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,128 +0,0 @@
use anyhow::Result;
use anyhow::anyhow;
use app_test_support::McpProcess;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use pretty_assertions::assert_eq;
use serde_json::json;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
// Prepare a temporary Codex home and a separate root with test files.
let codex_home = TempDir::new()?;
let root = TempDir::new()?;
// Create files designed to have deterministic ordering for query "abe".
std::fs::write(root.path().join("abc"), "x")?;
std::fs::write(root.path().join("abcde"), "x")?;
std::fs::write(root.path().join("abexy"), "x")?;
std::fs::write(root.path().join("zzz.txt"), "x")?;
let sub_dir = root.path().join("sub");
std::fs::create_dir_all(&sub_dir)?;
let sub_abce_path = sub_dir.join("abce");
std::fs::write(&sub_abce_path, "x")?;
let sub_abce_rel = sub_abce_path
.strip_prefix(root.path())?
.to_string_lossy()
.to_string();
// Start MCP server and initialize.
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let root_path = root.path().to_string_lossy().to_string();
// Send fuzzyFileSearch request.
let request_id = mcp
.send_fuzzy_file_search_request("abe", vec![root_path.clone()], None)
.await?;
// Read response and verify shape and ordering.
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let value = resp.result;
// The path separator on Windows affects the score.
let expected_score = if cfg!(windows) { 69 } else { 72 };
assert_eq!(
value,
json!({
"files": [
{
"root": root_path.clone(),
"path": "abexy",
"file_name": "abexy",
"score": 88,
"indices": [0, 1, 2],
},
{
"root": root_path.clone(),
"path": "abcde",
"file_name": "abcde",
"score": 74,
"indices": [0, 1, 4],
},
{
"root": root_path.clone(),
"path": sub_abce_rel,
"file_name": "abce",
"score": expected_score,
"indices": [4, 5, 7],
},
]
})
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_fuzzy_file_search_accepts_cancellation_token() -> Result<()> {
let codex_home = TempDir::new()?;
let root = TempDir::new()?;
std::fs::write(root.path().join("alpha.txt"), "contents")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let root_path = root.path().to_string_lossy().to_string();
let request_id = mcp
.send_fuzzy_file_search_request("alp", vec![root_path.clone()], None)
.await?;
let request_id_2 = mcp
.send_fuzzy_file_search_request(
"alp",
vec![root_path.clone()],
Some(request_id.to_string()),
)
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id_2)),
)
.await??;
let files = resp
.result
.get("files")
.ok_or_else(|| anyhow!("files key missing"))?
.as_array()
.ok_or_else(|| anyhow!("files not array"))?
.clone();
assert_eq!(files.len(), 1);
assert_eq!(files[0]["root"], root_path);
assert_eq!(files[0]["path"], "alpha.txt");
Ok(())
}

View File

@@ -1,360 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::ListConversationsResponse;
use codex_app_server_protocol::NewConversationParams; // reused for overrides shape
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ResumeConversationParams;
use codex_app_server_protocol::ResumeConversationResponse;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::SessionConfiguredNotification;
use codex_core::protocol::EventMsg;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_list_and_resume_conversations() -> Result<()> {
// Prepare a temporary CODEX_HOME with a few fake rollout files.
let codex_home = TempDir::new()?;
create_fake_rollout(
codex_home.path(),
"2025-01-02T12-00-00",
"2025-01-02T12:00:00Z",
"Hello A",
Some("openai"),
None,
)?;
create_fake_rollout(
codex_home.path(),
"2025-01-01T13-00-00",
"2025-01-01T13:00:00Z",
"Hello B",
Some("openai"),
None,
)?;
create_fake_rollout(
codex_home.path(),
"2025-01-01T12-00-00",
"2025-01-01T12:00:00Z",
"Hello C",
None,
None,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Request first page with size 2
let req_id = mcp
.send_list_conversations_request(ListConversationsParams {
page_size: Some(2),
cursor: None,
model_providers: None,
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let ListConversationsResponse { items, next_cursor } =
to_response::<ListConversationsResponse>(resp)?;
assert_eq!(items.len(), 2);
// Newest first; preview text should match
assert_eq!(items[0].preview, "Hello A");
assert_eq!(items[1].preview, "Hello B");
assert_eq!(items[0].model_provider, "openai");
assert_eq!(items[1].model_provider, "openai");
assert!(items[0].path.is_absolute());
assert!(next_cursor.is_some());
// Request the next page using the cursor
let req_id2 = mcp
.send_list_conversations_request(ListConversationsParams {
page_size: Some(2),
cursor: next_cursor,
model_providers: None,
})
.await?;
let resp2: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id2)),
)
.await??;
let ListConversationsResponse {
items: items2,
next_cursor: next2,
..
} = to_response::<ListConversationsResponse>(resp2)?;
assert_eq!(items2.len(), 1);
assert_eq!(items2[0].preview, "Hello C");
assert_eq!(items2[0].model_provider, "openai");
assert_eq!(next2, None);
// Add a conversation with an explicit non-OpenAI provider for filter tests.
create_fake_rollout(
codex_home.path(),
"2025-01-01T11-30-00",
"2025-01-01T11:30:00Z",
"Hello TP",
Some("test-provider"),
None,
)?;
// Filtering by model provider should return only matching sessions.
let filter_req_id = mcp
.send_list_conversations_request(ListConversationsParams {
page_size: Some(10),
cursor: None,
model_providers: Some(vec!["test-provider".to_string()]),
})
.await?;
let filter_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(filter_req_id)),
)
.await??;
let ListConversationsResponse {
items: filtered_items,
next_cursor: filtered_next,
} = to_response::<ListConversationsResponse>(filter_resp)?;
assert_eq!(filtered_items.len(), 1);
assert_eq!(filtered_next, None);
assert_eq!(filtered_items[0].preview, "Hello TP");
assert_eq!(filtered_items[0].model_provider, "test-provider");
// Empty filter should include every session regardless of provider metadata.
let unfiltered_req_id = mcp
.send_list_conversations_request(ListConversationsParams {
page_size: Some(10),
cursor: None,
model_providers: Some(Vec::new()),
})
.await?;
let unfiltered_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(unfiltered_req_id)),
)
.await??;
let ListConversationsResponse {
items: unfiltered_items,
next_cursor: unfiltered_next,
} = to_response::<ListConversationsResponse>(unfiltered_resp)?;
assert_eq!(unfiltered_items.len(), 4);
assert!(unfiltered_next.is_none());
let empty_req_id = mcp
.send_list_conversations_request(ListConversationsParams {
page_size: Some(10),
cursor: None,
model_providers: Some(vec!["other".to_string()]),
})
.await?;
let empty_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(empty_req_id)),
)
.await??;
let ListConversationsResponse {
items: empty_items,
next_cursor: empty_next,
} = to_response::<ListConversationsResponse>(empty_resp)?;
assert!(empty_items.is_empty());
assert!(empty_next.is_none());
let first_item = &items[0];
// Now resume one of the sessions from an explicit rollout path.
let resume_req_id = mcp
.send_resume_conversation_request(ResumeConversationParams {
path: Some(first_item.path.clone()),
conversation_id: None,
history: None,
overrides: Some(NewConversationParams {
model: Some("o3".to_string()),
..Default::default()
}),
})
.await?;
// Expect a codex/event notification with msg.type == sessionConfigured
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("sessionConfigured"),
)
.await??;
let session_configured: ServerNotification = notification.try_into()?;
let ServerNotification::SessionConfigured(SessionConfiguredNotification {
model,
rollout_path,
initial_messages: session_initial_messages,
..
}) = session_configured
else {
unreachable!("expected sessionConfigured notification");
};
assert_eq!(model, "o3");
assert_eq!(rollout_path, first_item.path.clone());
let session_initial_messages = session_initial_messages
.expect("expected initial messages when resuming from rollout path");
match session_initial_messages.as_slice() {
[EventMsg::UserMessage(message)] => {
assert_eq!(message.message, first_item.preview.clone());
}
other => panic!("unexpected initial messages from rollout resume: {other:#?}"),
}
// Then the response for resumeConversation
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_req_id)),
)
.await??;
let ResumeConversationResponse {
conversation_id,
model: resume_model,
initial_messages: response_initial_messages,
..
} = to_response::<ResumeConversationResponse>(resume_resp)?;
// conversation id should be a valid UUID
assert!(!conversation_id.to_string().is_empty());
assert_eq!(resume_model, "o3");
let response_initial_messages =
response_initial_messages.expect("expected initial messages in resume response");
match response_initial_messages.as_slice() {
[EventMsg::UserMessage(message)] => {
assert_eq!(message.message, first_item.preview.clone());
}
other => panic!("unexpected initial messages in resume response: {other:#?}"),
}
// Resuming with only a conversation id should locate the rollout automatically.
let resume_by_id_req_id = mcp
.send_resume_conversation_request(ResumeConversationParams {
path: None,
conversation_id: Some(first_item.conversation_id),
history: None,
overrides: Some(NewConversationParams {
model: Some("o3".to_string()),
..Default::default()
}),
})
.await?;
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("sessionConfigured"),
)
.await??;
let session_configured: ServerNotification = notification.try_into()?;
let ServerNotification::SessionConfigured(SessionConfiguredNotification {
model,
rollout_path,
initial_messages: session_initial_messages,
..
}) = session_configured
else {
unreachable!("expected sessionConfigured notification");
};
assert_eq!(model, "o3");
assert_eq!(rollout_path, first_item.path.clone());
let session_initial_messages = session_initial_messages
.expect("expected initial messages when resuming from conversation id");
match session_initial_messages.as_slice() {
[EventMsg::UserMessage(message)] => {
assert_eq!(message.message, first_item.preview.clone());
}
other => panic!("unexpected initial messages from conversation id resume: {other:#?}"),
}
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_by_id_req_id)),
)
.await??;
let ResumeConversationResponse {
conversation_id: by_id_conversation_id,
model: by_id_model,
initial_messages: by_id_initial_messages,
..
} = to_response::<ResumeConversationResponse>(resume_resp)?;
assert!(!by_id_conversation_id.to_string().is_empty());
assert_eq!(by_id_model, "o3");
let by_id_initial_messages = by_id_initial_messages
.expect("expected initial messages when resuming from conversation id response");
match by_id_initial_messages.as_slice() {
[EventMsg::UserMessage(message)] => {
assert_eq!(message.message, first_item.preview.clone());
}
other => {
panic!("unexpected initial messages in conversation id resume response: {other:#?}")
}
}
// Resuming with explicit history should succeed even without a stored rollout.
let fork_history_text = "Hello from history";
let history = vec![ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: fork_history_text.to_string(),
}],
}];
let resume_with_history_req_id = mcp
.send_resume_conversation_request(ResumeConversationParams {
path: None,
conversation_id: None,
history: Some(history),
overrides: Some(NewConversationParams {
model: Some("o3".to_string()),
..Default::default()
}),
})
.await?;
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("sessionConfigured"),
)
.await??;
let session_configured: ServerNotification = notification.try_into()?;
let ServerNotification::SessionConfigured(SessionConfiguredNotification {
model,
initial_messages: session_initial_messages,
..
}) = session_configured
else {
unreachable!("expected sessionConfigured notification");
};
assert_eq!(model, "o3");
assert!(
session_initial_messages.as_ref().is_none_or(Vec::is_empty),
"expected no initial messages when resuming from explicit history but got {session_initial_messages:#?}"
);
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_with_history_req_id)),
)
.await??;
let ResumeConversationResponse {
conversation_id: history_conversation_id,
model: history_model,
initial_messages: history_initial_messages,
..
} = to_response::<ResumeConversationResponse>(resume_resp)?;
assert!(!history_conversation_id.to_string().is_empty());
assert_eq!(history_model, "o3");
assert!(
history_initial_messages.as_ref().is_none_or(Vec::is_empty),
"expected no initial messages in resume response when history is provided but got {history_initial_messages:#?}"
);
Ok(())
}

View File

@@ -1,206 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::CancelLoginChatGptParams;
use codex_app_server_protocol::CancelLoginChatGptResponse;
use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::GetAuthStatusResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginChatGptResponse;
use codex_app_server_protocol::LogoutChatGptResponse;
use codex_app_server_protocol::RequestId;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_login::login_with_api_key;
use serial_test::serial;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
// Helper to create a config.toml; mirrors create_conversation.rs
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "http://127.0.0.1:0/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#,
)
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn logout_chatgpt_removes_auth() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
login_with_api_key(
codex_home.path(),
"sk-test-key",
AuthCredentialsStoreMode::File,
)?;
assert!(codex_home.path().join("auth.json").exists());
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let id = mcp.send_logout_chat_gpt_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(id)),
)
.await??;
let _ok: LogoutChatGptResponse = to_response(resp)?;
assert!(
!codex_home.path().join("auth.json").exists(),
"auth.json should be deleted"
);
// Verify status reflects signed-out state.
let status_id = mcp
.send_get_auth_status_request(GetAuthStatusParams {
include_token: Some(true),
refresh_token: Some(false),
})
.await?;
let status_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(status_id)),
)
.await??;
let status: GetAuthStatusResponse = to_response(status_resp)?;
assert_eq!(status.auth_method, None);
assert_eq!(status.auth_token, None);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn login_and_cancel_chatgpt() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let login_id = mcp.send_login_chat_gpt_request().await?;
let login_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(login_id)),
)
.await??;
let login: LoginChatGptResponse = to_response(login_resp)?;
let cancel_id = mcp
.send_cancel_login_chat_gpt_request(CancelLoginChatGptParams {
login_id: login.login_id,
})
.await?;
let cancel_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
)
.await??;
let _ok: CancelLoginChatGptResponse = to_response(cancel_resp)?;
// Optionally observe the completion notification; do not fail if it races.
let maybe_note = timeout(
Duration::from_secs(2),
mcp.read_stream_until_notification_message("codex/event/login_chat_gpt_complete"),
)
.await;
if maybe_note.is_err() {
eprintln!("warning: did not observe login_chat_gpt_complete notification after cancel");
}
Ok(())
}
fn create_config_toml_forced_login(codex_home: &Path, forced_method: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
let contents = format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
forced_login_method = "{forced_method}"
"#
);
std::fs::write(config_toml, contents)
}
fn create_config_toml_forced_workspace(
codex_home: &Path,
workspace_id: &str,
) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
let contents = format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
forced_chatgpt_workspace_id = "{workspace_id}"
"#
);
std::fs::write(config_toml, contents)
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn login_chatgpt_rejected_when_forced_api() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml_forced_login(codex_home.path(), "api")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_login_chat_gpt_request().await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(
err.error.message,
"ChatGPT login is disabled. Use API key login instead."
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn login_chatgpt_includes_forced_workspace_query_param() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml_forced_workspace(codex_home.path(), "ws-forced")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_login_chat_gpt_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let login: LoginChatGptResponse = to_response(resp)?;
assert!(
login.auth_url.contains("allowed_workspace_id=ws-forced"),
"auth URL should include forced workspace"
);
Ok(())
}

View File

@@ -1,14 +0,0 @@
mod archive_conversation;
mod auth;
mod codex_message_processor_flow;
mod config;
mod create_conversation;
mod fuzzy_file_search;
mod interrupt;
mod list_resume;
mod login;
mod send_message;
mod set_default_model;
mod user_agent;
mod user_info;
mod v2;

View File

@@ -1,396 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::to_response;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::AddConversationSubscriptionResponse;
use codex_app_server_protocol::InputItem;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::NewConversationParams;
use codex_app_server_protocol::NewConversationResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use codex_protocol::ConversationId;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::RawResponseItemEvent;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn test_send_message_success() -> Result<()> {
// Spin up a mock completions server that immediately ends the Codex turn.
// Two Codex turns hit the mock model (session start + send-user-message). Provide two SSE responses.
let responses = vec![
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
];
let server = create_mock_chat_completions_server(responses).await;
// Create a temporary Codex home with config pointing at the mock server.
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
// Start MCP server process and initialize.
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a conversation using the new wire API.
let new_conv_id = mcp
.send_new_conversation_request(NewConversationParams {
..Default::default()
})
.await?;
let new_conv_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
)
.await??;
let NewConversationResponse {
conversation_id, ..
} = to_response::<_>(new_conv_resp)?;
// 2) addConversationListener
let add_listener_id = mcp
.send_add_conversation_listener_request(AddConversationListenerParams {
conversation_id,
experimental_raw_events: false,
})
.await?;
let add_listener_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
)
.await??;
let AddConversationSubscriptionResponse { subscription_id: _ } =
to_response::<_>(add_listener_resp)?;
// Now exercise sendUserMessage twice.
send_message("Hello", conversation_id, &mut mcp).await?;
send_message("Hello again", conversation_id, &mut mcp).await?;
Ok(())
}
#[expect(clippy::expect_used)]
async fn send_message(
message: &str,
conversation_id: ConversationId,
mcp: &mut McpProcess,
) -> Result<()> {
// Now exercise sendUserMessage.
let send_id = mcp
.send_send_user_message_request(SendUserMessageParams {
conversation_id,
items: vec![InputItem::Text {
text: message.to_string(),
}],
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(send_id)),
)
.await??;
let _ok: SendUserMessageResponse = to_response::<SendUserMessageResponse>(response)?;
// Verify the task_finished notification is received.
// Note this also ensures that the final request to the server was made.
let task_finished_notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
let serde_json::Value::Object(map) = task_finished_notification
.params
.expect("notification should have params")
else {
panic!("task_finished_notification should have params");
};
assert_eq!(
map.get("conversationId")
.expect("should have conversationId"),
&serde_json::Value::String(conversation_id.to_string())
);
let raw_attempt = tokio::time::timeout(
std::time::Duration::from_millis(200),
mcp.read_stream_until_notification_message("codex/event/raw_response_item"),
)
.await;
assert!(
raw_attempt.is_err(),
"unexpected raw item notification when not opted in"
);
Ok(())
}
#[tokio::test]
async fn test_send_message_raw_notifications_opt_in() -> Result<()> {
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let server = create_mock_chat_completions_server(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let new_conv_id = mcp
.send_new_conversation_request(NewConversationParams {
developer_instructions: Some("Use the test harness tools.".to_string()),
..Default::default()
})
.await?;
let new_conv_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
)
.await??;
let NewConversationResponse {
conversation_id, ..
} = to_response::<_>(new_conv_resp)?;
let add_listener_id = mcp
.send_add_conversation_listener_request(AddConversationListenerParams {
conversation_id,
experimental_raw_events: true,
})
.await?;
let add_listener_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
)
.await??;
let AddConversationSubscriptionResponse { subscription_id: _ } =
to_response::<_>(add_listener_resp)?;
let send_id = mcp
.send_send_user_message_request(SendUserMessageParams {
conversation_id,
items: vec![InputItem::Text {
text: "Hello".to_string(),
}],
})
.await?;
let developer = read_raw_response_item(&mut mcp, conversation_id).await;
assert_developer_message(&developer, "Use the test harness tools.");
let instructions = read_raw_response_item(&mut mcp, conversation_id).await;
assert_instructions_message(&instructions);
let environment = read_raw_response_item(&mut mcp, conversation_id).await;
assert_environment_message(&environment);
let response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(send_id)),
)
.await??;
let _ok: SendUserMessageResponse = to_response::<SendUserMessageResponse>(response)?;
let user_message = read_raw_response_item(&mut mcp, conversation_id).await;
assert_user_message(&user_message, "Hello");
let assistant_message = read_raw_response_item(&mut mcp, conversation_id).await;
assert_assistant_message(&assistant_message, "Done");
let _ = tokio::time::timeout(
std::time::Duration::from_millis(250),
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await;
Ok(())
}
#[tokio::test]
async fn test_send_message_session_not_found() -> Result<()> {
// Start MCP without creating a Codex session
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let unknown = ConversationId::new();
let req_id = mcp
.send_send_user_message_request(SendUserMessageParams {
conversation_id: unknown,
items: vec![InputItem::Text {
text: "ping".to_string(),
}],
})
.await?;
// Expect an error response for unknown conversation.
let err = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(req_id)),
)
.await??;
assert_eq!(err.id, RequestId::Integer(req_id));
Ok(())
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}
#[expect(clippy::expect_used)]
async fn read_raw_response_item(
mcp: &mut McpProcess,
conversation_id: ConversationId,
) -> ResponseItem {
loop {
let raw_notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/raw_response_item"),
)
.await
.expect("codex/event/raw_response_item notification timeout")
.expect("codex/event/raw_response_item notification resp");
let serde_json::Value::Object(params) = raw_notification
.params
.expect("codex/event/raw_response_item should have params")
else {
panic!("codex/event/raw_response_item should have params");
};
let conversation_id_value = params
.get("conversationId")
.and_then(|value| value.as_str())
.expect("raw response item should include conversationId");
assert_eq!(
conversation_id_value,
conversation_id.to_string(),
"raw response item conversation mismatch"
);
let msg_value = params
.get("msg")
.cloned()
.expect("raw response item should include msg payload");
// Ghost snapshots are produced concurrently and may arrive before the model reply.
let event: RawResponseItemEvent =
serde_json::from_value(msg_value).expect("deserialize raw response item");
if !matches!(event.item, ResponseItem::GhostSnapshot { .. }) {
return event.item;
}
}
}
fn assert_instructions_message(item: &ResponseItem) {
match item {
ResponseItem::Message { role, content, .. } => {
assert_eq!(role, "user");
let texts = content_texts(content);
let is_instructions = texts
.iter()
.any(|text| text.starts_with("# AGENTS.md instructions for "));
assert!(
is_instructions,
"expected instructions message, got {texts:?}"
);
}
other => panic!("expected instructions message, got {other:?}"),
}
}
fn assert_developer_message(item: &ResponseItem, expected_text: &str) {
match item {
ResponseItem::Message { role, content, .. } => {
assert_eq!(role, "developer");
let texts = content_texts(content);
assert_eq!(
texts,
vec![expected_text],
"expected developer instructions message, got {texts:?}"
);
}
other => panic!("expected developer instructions message, got {other:?}"),
}
}
fn assert_environment_message(item: &ResponseItem) {
match item {
ResponseItem::Message { role, content, .. } => {
assert_eq!(role, "user");
let texts = content_texts(content);
assert!(
texts
.iter()
.any(|text| text.contains("<environment_context>")),
"expected environment context message, got {texts:?}"
);
}
other => panic!("expected environment message, got {other:?}"),
}
}
fn assert_user_message(item: &ResponseItem, expected_text: &str) {
match item {
ResponseItem::Message { role, content, .. } => {
assert_eq!(role, "user");
let texts = content_texts(content);
assert_eq!(texts, vec![expected_text]);
}
other => panic!("expected user message, got {other:?}"),
}
}
fn assert_assistant_message(item: &ResponseItem, expected_text: &str) {
match item {
ResponseItem::Message { role, content, .. } => {
assert_eq!(role, "assistant");
let texts = content_texts(content);
assert_eq!(texts, vec![expected_text]);
}
other => panic!("expected assistant message, got {other:?}"),
}
}
fn content_texts(content: &[ContentItem]) -> Vec<&str> {
content
.iter()
.filter_map(|item| match item {
ContentItem::InputText { text } | ContentItem::OutputText { text } => {
Some(text.as_str())
}
_ => None,
})
.collect()
}

View File

@@ -1,64 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SetDefaultModelParams;
use codex_app_server_protocol::SetDefaultModelResponse;
use codex_core::config::ConfigToml;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn set_default_model_persists_overrides() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let params = SetDefaultModelParams {
model: Some("gpt-4.1".to_string()),
reasoning_effort: None,
};
let request_id = mcp.send_set_default_model_request(params).await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let _: SetDefaultModelResponse = to_response(resp)?;
let config_path = codex_home.path().join("config.toml");
let config_contents = tokio::fs::read_to_string(&config_path).await?;
let config_toml: ConfigToml = toml::from_str(&config_contents)?;
assert_eq!(
ConfigToml {
model: Some("gpt-4.1".to_string()),
model_reasoning_effort: None,
..Default::default()
},
config_toml,
);
Ok(())
}
// Helper to create a config.toml; mirrors create_conversation.rs
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
r#"
model = "gpt-5.1-codex-max"
model_reasoning_effort = "medium"
"#,
)
}

View File

@@ -1,41 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::GetUserAgentResponse;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_user_agent_returns_current_codex_user_agent() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_get_user_agent_request().await?;
let response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let os_info = os_info::get();
let user_agent = format!(
"codex_cli_rs/0.0.0 ({} {}; {}) {} (codex-app-server-tests; 0.1.0)",
os_info.os_type(),
os_info.version(),
os_info.architecture().unwrap_or("unknown"),
codex_core::terminal::user_agent()
);
let received: GetUserAgentResponse = to_response(response)?;
let expected = GetUserAgentResponse { user_agent };
assert_eq!(received, expected);
Ok(())
}

View File

@@ -1,46 +0,0 @@
use anyhow::Result;
use app_test_support::ChatGptAuthFixture;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_chatgpt_auth;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::UserInfoResponse;
use codex_core::auth::AuthCredentialsStoreMode;
use pretty_assertions::assert_eq;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn user_info_returns_email_from_auth_json() -> Result<()> {
let codex_home = TempDir::new()?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("access")
.refresh_token("refresh")
.email("user@example.com"),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_user_info_request().await?;
let response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let received: UserInfoResponse = to_response(response)?;
let expected = UserInfoResponse {
alleged_user_email: Some("user@example.com".to_string()),
};
assert_eq!(received, expected);
Ok(())
}

View File

@@ -1,492 +0,0 @@
use anyhow::Result;
use anyhow::bail;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::ChatGptAuthFixture;
use app_test_support::write_chatgpt_auth;
use codex_app_server_protocol::Account;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::CancelLoginAccountParams;
use codex_app_server_protocol::CancelLoginAccountResponse;
use codex_app_server_protocol::GetAccountParams;
use codex_app_server_protocol::GetAccountResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginAccountResponse;
use codex_app_server_protocol::LogoutAccountResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerNotification;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_login::login_with_api_key;
use codex_protocol::account::PlanType as AccountPlanType;
use pretty_assertions::assert_eq;
use serial_test::serial;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
// Helper to create a minimal config.toml for the app server
#[derive(Default)]
struct CreateConfigTomlParams {
forced_method: Option<String>,
forced_workspace_id: Option<String>,
requires_openai_auth: Option<bool>,
}
fn create_config_toml(codex_home: &Path, params: CreateConfigTomlParams) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
let forced_line = if let Some(method) = params.forced_method {
format!("forced_login_method = \"{method}\"\n")
} else {
String::new()
};
let forced_workspace_line = if let Some(ws) = params.forced_workspace_id {
format!("forced_chatgpt_workspace_id = \"{ws}\"\n")
} else {
String::new()
};
let requires_line = match params.requires_openai_auth {
Some(true) => "requires_openai_auth = true\n".to_string(),
Some(false) => String::new(),
None => String::new(),
};
let contents = format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
{forced_line}
{forced_workspace_line}
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "http://127.0.0.1:0/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
{requires_line}
"#
);
std::fs::write(config_toml, contents)
}
#[tokio::test]
async fn logout_account_removes_auth_and_notifies() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
login_with_api_key(
codex_home.path(),
"sk-test-key",
AuthCredentialsStoreMode::File,
)?;
assert!(codex_home.path().join("auth.json").exists());
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let id = mcp.send_logout_account_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(id)),
)
.await??;
let _ok: LogoutAccountResponse = to_response(resp)?;
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountUpdated(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
assert!(
payload.auth_mode.is_none(),
"auth_method should be None after logout"
);
assert!(
!codex_home.path().join("auth.json").exists(),
"auth.json should be deleted"
);
let get_id = mcp
.send_get_account_request(GetAccountParams {
refresh_token: false,
})
.await?;
let get_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(get_id)),
)
.await??;
let account: GetAccountResponse = to_response(get_resp)?;
assert_eq!(account.account, None);
Ok(())
}
#[tokio::test]
async fn login_account_api_key_succeeds_and_notifies() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let req_id = mcp
.send_login_account_api_key_request("sk-test-key")
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let login: LoginAccountResponse = to_response(resp)?;
assert_eq!(login, LoginAccountResponse::ApiKey {});
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/login/completed"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountLoginCompleted(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
pretty_assertions::assert_eq!(payload.login_id, None);
pretty_assertions::assert_eq!(payload.success, true);
pretty_assertions::assert_eq!(payload.error, None);
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountUpdated(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
pretty_assertions::assert_eq!(payload.auth_mode, Some(AuthMode::ApiKey));
assert!(codex_home.path().join("auth.json").exists());
Ok(())
}
#[tokio::test]
async fn login_account_api_key_rejected_when_forced_chatgpt() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
forced_method: Some("chatgpt".to_string()),
..Default::default()
},
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_login_account_api_key_request("sk-test-key")
.await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(
err.error.message,
"API key login is disabled. Use ChatGPT login instead."
);
Ok(())
}
#[tokio::test]
async fn login_account_chatgpt_rejected_when_forced_api() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
forced_method: Some("api".to_string()),
..Default::default()
},
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_login_account_chatgpt_request().await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(
err.error.message,
"ChatGPT login is disabled. Use API key login instead."
);
Ok(())
}
#[tokio::test]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn login_account_chatgpt_start() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_login_account_chatgpt_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let login: LoginAccountResponse = to_response(resp)?;
let LoginAccountResponse::Chatgpt { login_id, auth_url } = login else {
bail!("unexpected login response: {login:?}");
};
assert!(
auth_url.contains("redirect_uri=http%3A%2F%2Flocalhost"),
"auth_url should contain a redirect_uri to localhost"
);
let cancel_id = mcp
.send_cancel_login_account_request(CancelLoginAccountParams {
login_id: login_id.clone(),
})
.await?;
let cancel_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
)
.await??;
let _ok: CancelLoginAccountResponse = to_response(cancel_resp)?;
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/login/completed"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountLoginCompleted(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
pretty_assertions::assert_eq!(payload.login_id, Some(login_id));
pretty_assertions::assert_eq!(payload.success, false);
assert!(
payload.error.is_some(),
"expected a non-empty error on cancel"
);
let maybe_updated = timeout(
Duration::from_millis(500),
mcp.read_stream_until_notification_message("account/updated"),
)
.await;
assert!(
maybe_updated.is_err(),
"account/updated should not be emitted when login is cancelled"
);
Ok(())
}
#[tokio::test]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn login_account_chatgpt_includes_forced_workspace_query_param() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
forced_workspace_id: Some("ws-forced".to_string()),
..Default::default()
},
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_login_account_chatgpt_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let login: LoginAccountResponse = to_response(resp)?;
let LoginAccountResponse::Chatgpt { auth_url, .. } = login else {
bail!("unexpected login response: {login:?}");
};
assert!(
auth_url.contains("allowed_workspace_id=ws-forced"),
"auth URL should include forced workspace"
);
Ok(())
}
#[tokio::test]
async fn get_account_no_auth() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
..Default::default()
},
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let params = GetAccountParams {
refresh_token: false,
};
let request_id = mcp.send_get_account_request(params).await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let account: GetAccountResponse = to_response(resp)?;
assert_eq!(account.account, None, "expected no account");
assert_eq!(account.requires_openai_auth, true);
Ok(())
}
#[tokio::test]
async fn get_account_with_api_key() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
..Default::default()
},
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let req_id = mcp
.send_login_account_api_key_request("sk-test-key")
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let _login_ok = to_response::<LoginAccountResponse>(resp)?;
let params = GetAccountParams {
refresh_token: false,
};
let request_id = mcp.send_get_account_request(params).await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let received: GetAccountResponse = to_response(resp)?;
let expected = GetAccountResponse {
account: Some(Account::ApiKey {}),
requires_openai_auth: true,
};
assert_eq!(received, expected);
Ok(())
}
#[tokio::test]
async fn get_account_when_auth_not_required() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(false),
..Default::default()
},
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let params = GetAccountParams {
refresh_token: false,
};
let request_id = mcp.send_get_account_request(params).await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let received: GetAccountResponse = to_response(resp)?;
let expected = GetAccountResponse {
account: None,
requires_openai_auth: false,
};
assert_eq!(received, expected);
Ok(())
}
#[tokio::test]
async fn get_account_with_chatgpt() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
..Default::default()
},
)?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("access-chatgpt")
.email("user@example.com")
.plan_type("pro"),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let params = GetAccountParams {
refresh_token: false,
};
let request_id = mcp.send_get_account_request(params).await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let received: GetAccountResponse = to_response(resp)?;
let expected = GetAccountResponse {
account: Some(Account::Chatgpt {
email: "user@example.com".to_string(),
plan_type: AccountPlanType::Pro,
}),
requires_openai_auth: true,
};
assert_eq!(received, expected);
Ok(())
}

View File

@@ -1,347 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::ConfigBatchWriteParams;
use codex_app_server_protocol::ConfigEdit;
use codex_app_server_protocol::ConfigLayerName;
use codex_app_server_protocol::ConfigReadParams;
use codex_app_server_protocol::ConfigReadResponse;
use codex_app_server_protocol::ConfigValueWriteParams;
use codex_app_server_protocol::ConfigWriteResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::MergeStrategy;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::WriteStatus;
use pretty_assertions::assert_eq;
use serde_json::json;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
fn write_config(codex_home: &TempDir, contents: &str) -> Result<()> {
Ok(std::fs::write(
codex_home.path().join("config.toml"),
contents,
)?)
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_read_returns_effective_and_layers() -> Result<()> {
let codex_home = TempDir::new()?;
write_config(
&codex_home,
r#"
model = "gpt-user"
sandbox_mode = "workspace-write"
"#,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let ConfigReadResponse {
config,
origins,
layers,
} = to_response(resp)?;
assert_eq!(config.get("model"), Some(&json!("gpt-user")));
assert_eq!(
origins.get("model").expect("origin").name,
ConfigLayerName::User
);
let layers = layers.expect("layers present");
assert_eq!(layers.len(), 2);
assert_eq!(layers[0].name, ConfigLayerName::SessionFlags);
assert_eq!(layers[1].name, ConfigLayerName::User);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_read_includes_system_layer_and_overrides() -> Result<()> {
let codex_home = TempDir::new()?;
write_config(
&codex_home,
r#"
model = "gpt-user"
approval_policy = "on-request"
sandbox_mode = "workspace-write"
[sandbox_workspace_write]
writable_roots = ["/user"]
network_access = true
"#,
)?;
let managed_path = codex_home.path().join("managed_config.toml");
std::fs::write(
&managed_path,
r#"
model = "gpt-system"
approval_policy = "never"
[sandbox_workspace_write]
writable_roots = ["/system"]
"#,
)?;
let managed_path_str = managed_path.display().to_string();
let mut mcp = McpProcess::new_with_env(
codex_home.path(),
&[("CODEX_MANAGED_CONFIG_PATH", Some(&managed_path_str))],
)
.await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let ConfigReadResponse {
config,
origins,
layers,
} = to_response(resp)?;
assert_eq!(config.get("model"), Some(&json!("gpt-system")));
assert_eq!(
origins.get("model").expect("origin").name,
ConfigLayerName::System
);
assert_eq!(config.get("approval_policy"), Some(&json!("never")));
assert_eq!(
origins.get("approval_policy").expect("origin").name,
ConfigLayerName::System
);
assert_eq!(config.get("sandbox_mode"), Some(&json!("workspace-write")));
assert_eq!(
origins.get("sandbox_mode").expect("origin").name,
ConfigLayerName::User
);
assert_eq!(
config
.get("sandbox_workspace_write")
.and_then(|v| v.get("writable_roots")),
Some(&json!(["/system"]))
);
assert_eq!(
origins
.get("sandbox_workspace_write.writable_roots.0")
.expect("origin")
.name,
ConfigLayerName::System
);
assert_eq!(
config
.get("sandbox_workspace_write")
.and_then(|v| v.get("network_access")),
Some(&json!(true))
);
assert_eq!(
origins
.get("sandbox_workspace_write.network_access")
.expect("origin")
.name,
ConfigLayerName::User
);
let layers = layers.expect("layers present");
assert_eq!(layers.len(), 3);
assert_eq!(layers[0].name, ConfigLayerName::System);
assert_eq!(layers[1].name, ConfigLayerName::SessionFlags);
assert_eq!(layers[2].name, ConfigLayerName::User);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_value_write_replaces_value() -> Result<()> {
let codex_home = TempDir::new()?;
write_config(
&codex_home,
r#"
model = "gpt-old"
"#,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let read_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: false,
})
.await?;
let read_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(read_id)),
)
.await??;
let read: ConfigReadResponse = to_response(read_resp)?;
let expected_version = read.origins.get("model").map(|m| m.version.clone());
let write_id = mcp
.send_config_value_write_request(ConfigValueWriteParams {
file_path: codex_home.path().join("config.toml").display().to_string(),
key_path: "model".to_string(),
value: json!("gpt-new"),
merge_strategy: MergeStrategy::Replace,
expected_version,
})
.await?;
let write_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(write_id)),
)
.await??;
let write: ConfigWriteResponse = to_response(write_resp)?;
assert_eq!(write.status, WriteStatus::Ok);
assert!(write.overridden_metadata.is_none());
let verify_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: false,
})
.await?;
let verify_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(verify_id)),
)
.await??;
let verify: ConfigReadResponse = to_response(verify_resp)?;
assert_eq!(verify.config.get("model"), Some(&json!("gpt-new")));
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_value_write_rejects_version_conflict() -> Result<()> {
let codex_home = TempDir::new()?;
write_config(
&codex_home,
r#"
model = "gpt-old"
"#,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let write_id = mcp
.send_config_value_write_request(ConfigValueWriteParams {
file_path: codex_home.path().join("config.toml").display().to_string(),
key_path: "model".to_string(),
value: json!("gpt-new"),
merge_strategy: MergeStrategy::Replace,
expected_version: Some("sha256:stale".to_string()),
})
.await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(write_id)),
)
.await??;
let code = err
.error
.data
.as_ref()
.and_then(|d| d.get("config_write_error_code"))
.and_then(|v| v.as_str());
assert_eq!(code, Some("configVersionConflict"));
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_batch_write_applies_multiple_edits() -> Result<()> {
let codex_home = TempDir::new()?;
write_config(&codex_home, "")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let batch_id = mcp
.send_config_batch_write_request(ConfigBatchWriteParams {
file_path: codex_home.path().join("config.toml").display().to_string(),
edits: vec![
ConfigEdit {
key_path: "sandbox_mode".to_string(),
value: json!("workspace-write"),
merge_strategy: MergeStrategy::Replace,
},
ConfigEdit {
key_path: "sandbox_workspace_write".to_string(),
value: json!({
"writable_roots": ["/tmp"],
"network_access": false
}),
merge_strategy: MergeStrategy::Replace,
},
],
expected_version: None,
})
.await?;
let batch_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(batch_id)),
)
.await??;
let batch_write: ConfigWriteResponse = to_response(batch_resp)?;
assert_eq!(batch_write.status, WriteStatus::Ok);
let read_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: false,
})
.await?;
let read_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(read_id)),
)
.await??;
let read: ConfigReadResponse = to_response(read_resp)?;
assert_eq!(
read.config.get("sandbox_mode"),
Some(&json!("workspace-write"))
);
assert_eq!(
read.config
.get("sandbox_workspace_write")
.and_then(|v| v.get("writable_roots")),
Some(&json!(["/tmp"]))
);
assert_eq!(
read.config
.get("sandbox_workspace_write")
.and_then(|v| v.get("network_access")),
Some(&json!(false))
);
Ok(())
}

View File

@@ -1,11 +0,0 @@
mod account;
mod config_rpc;
mod model_list;
mod rate_limits;
mod review;
mod thread_archive;
mod thread_list;
mod thread_resume;
mod thread_start;
mod turn_interrupt;
mod turn_start;

View File

@@ -1,272 +0,0 @@
use std::time::Duration;
use anyhow::Result;
use anyhow::anyhow;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::ModelListResponse;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_app_server_protocol::RequestId;
use codex_protocol::config_types::ReasoningEffort;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
#[tokio::test]
async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ModelListParams {
limit: Some(100),
cursor: None,
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let ModelListResponse {
data: items,
next_cursor,
} = to_response::<ModelListResponse>(response)?;
let expected_models = vec![
Model {
id: "gpt-5.1-codex-max".to_string(),
model: "gpt-5.1-codex-max".to_string(),
display_name: "gpt-5.1-codex-max".to_string(),
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Fast responses with lighter reasoning".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Balances speed and reasoning depth for everyday tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex problems".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::XHigh,
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: true,
},
Model {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
display_name: "gpt-5.1-codex".to_string(),
description: "Optimized for codex.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "gpt-5.1-codex-mini".to_string(),
model: "gpt-5.1-codex-mini".to_string(),
display_name: "gpt-5.1-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward \
queries and short explanations"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for \
general-purpose tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
];
assert_eq!(items, expected_models);
assert!(next_cursor.is_none());
Ok(())
}
#[tokio::test]
async fn list_models_pagination_works() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let first_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: None,
})
.await?;
let first_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_request)),
)
.await??;
let ModelListResponse {
data: first_items,
next_cursor: first_cursor,
} = to_response::<ModelListResponse>(first_response)?;
assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, "gpt-5.1-codex-max");
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
let second_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(next_cursor.clone()),
})
.await?;
let second_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_request)),
)
.await??;
let ModelListResponse {
data: second_items,
next_cursor: second_cursor,
} = to_response::<ModelListResponse>(second_response)?;
assert_eq!(second_items.len(), 1);
assert_eq!(second_items[0].id, "gpt-5.1-codex");
let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?;
let third_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(third_cursor.clone()),
})
.await?;
let third_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(third_request)),
)
.await??;
let ModelListResponse {
data: third_items,
next_cursor: third_cursor,
} = to_response::<ModelListResponse>(third_response)?;
assert_eq!(third_items.len(), 1);
assert_eq!(third_items[0].id, "gpt-5.1-codex-mini");
let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?;
let fourth_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(fourth_cursor.clone()),
})
.await?;
let fourth_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(fourth_request)),
)
.await??;
let ModelListResponse {
data: fourth_items,
next_cursor: fourth_cursor,
} = to_response::<ModelListResponse>(fourth_response)?;
assert_eq!(fourth_items.len(), 1);
assert_eq!(fourth_items[0].id, "gpt-5.1");
assert!(fourth_cursor.is_none());
Ok(())
}
#[tokio::test]
async fn list_models_rejects_invalid_cursor() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ModelListParams {
limit: None,
cursor: Some("invalid".to_string()),
})
.await?;
let error: JSONRPCError = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.id, RequestId::Integer(request_id));
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(error.error.message, "invalid cursor: invalid");
Ok(())
}

View File

@@ -1,182 +0,0 @@
use anyhow::Result;
use app_test_support::ChatGptAuthFixture;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_chatgpt_auth;
use codex_app_server_protocol::GetAccountRateLimitsResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::RateLimitSnapshot;
use codex_app_server_protocol::RateLimitWindow;
use codex_app_server_protocol::RequestId;
use codex_core::auth::AuthCredentialsStoreMode;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
use wiremock::matchers::header;
use wiremock::matchers::method;
use wiremock::matchers::path;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
#[tokio::test]
async fn get_account_rate_limits_requires_auth() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_get_account_rate_limits_request().await?;
let error: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.id, RequestId::Integer(request_id));
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(
error.error.message,
"codex account authentication required to read rate limits"
);
Ok(())
}
#[tokio::test]
async fn get_account_rate_limits_requires_chatgpt_auth() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
login_with_api_key(&mut mcp, "sk-test-key").await?;
let request_id = mcp.send_get_account_rate_limits_request().await?;
let error: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.id, RequestId::Integer(request_id));
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(
error.error.message,
"chatgpt authentication required to read rate limits"
);
Ok(())
}
#[tokio::test]
async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
let codex_home = TempDir::new()?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("chatgpt-token")
.account_id("account-123")
.plan_type("pro"),
AuthCredentialsStoreMode::File,
)?;
let server = MockServer::start().await;
let server_url = server.uri();
write_chatgpt_base_url(codex_home.path(), &server_url)?;
let primary_reset_timestamp = chrono::DateTime::parse_from_rfc3339("2025-01-01T00:02:00Z")
.expect("parse primary reset timestamp")
.timestamp();
let secondary_reset_timestamp = chrono::DateTime::parse_from_rfc3339("2025-01-01T01:00:00Z")
.expect("parse secondary reset timestamp")
.timestamp();
let response_body = json!({
"plan_type": "pro",
"rate_limit": {
"allowed": true,
"limit_reached": false,
"primary_window": {
"used_percent": 42,
"limit_window_seconds": 3600,
"reset_after_seconds": 120,
"reset_at": primary_reset_timestamp,
},
"secondary_window": {
"used_percent": 5,
"limit_window_seconds": 86400,
"reset_after_seconds": 43200,
"reset_at": secondary_reset_timestamp,
}
}
});
Mock::given(method("GET"))
.and(path("/api/codex/usage"))
.and(header("authorization", "Bearer chatgpt-token"))
.and(header("chatgpt-account-id", "account-123"))
.respond_with(ResponseTemplate::new(200).set_body_json(response_body))
.mount(&server)
.await;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_get_account_rate_limits_request().await?;
let response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let received: GetAccountRateLimitsResponse = to_response(response)?;
let expected = GetAccountRateLimitsResponse {
rate_limits: RateLimitSnapshot {
primary: Some(RateLimitWindow {
used_percent: 42,
window_duration_mins: Some(60),
resets_at: Some(primary_reset_timestamp),
}),
secondary: Some(RateLimitWindow {
used_percent: 5,
window_duration_mins: Some(1440),
resets_at: Some(secondary_reset_timestamp),
}),
credits: None,
},
};
assert_eq!(received, expected);
Ok(())
}
async fn login_with_api_key(mcp: &mut McpProcess, api_key: &str) -> Result<()> {
let request_id = mcp
.send_login_api_key_request(LoginApiKeyParams {
api_key: api_key.to_string(),
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
Ok(())
}
fn write_chatgpt_base_url(codex_home: &Path, base_url: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(config_toml, format!("chatgpt_base_url = \"{base_url}\"\n"))
}

View File

@@ -1,329 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server_unchecked;
use app_test_support::to_response;
use codex_app_server_protocol::ItemCompletedNotification;
use codex_app_server_protocol::ItemStartedNotification;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ReviewDelivery;
use codex_app_server_protocol::ReviewStartParams;
use codex_app_server_protocol::ReviewStartResponse;
use codex_app_server_protocol::ReviewTarget;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStatus;
use serde_json::json;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
#[tokio::test]
async fn review_start_runs_review_turn_and_emits_code_review_item() -> Result<()> {
let review_payload = json!({
"findings": [
{
"title": "Prefer Stylize helpers",
"body": "Use .dim()/.bold() chaining instead of manual Style.",
"confidence_score": 0.9,
"priority": 1,
"code_location": {
"absolute_file_path": "/tmp/file.rs",
"line_range": {"start": 10, "end": 20}
}
}
],
"overall_correctness": "good",
"overall_explanation": "Looks solid overall with minor polish suggested.",
"overall_confidence_score": 0.75
})
.to_string();
let responses = vec![create_final_assistant_message_sse_response(
&review_payload,
)?];
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_id = start_default_thread(&mut mcp).await?;
let review_req = mcp
.send_review_start_request(ReviewStartParams {
thread_id: thread_id.clone(),
delivery: Some(ReviewDelivery::Inline),
target: ReviewTarget::Commit {
sha: "1234567deadbeef".to_string(),
title: Some("Tidy UI colors".to_string()),
},
})
.await?;
let review_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(review_req)),
)
.await??;
let ReviewStartResponse {
turn,
review_thread_id,
} = to_response::<ReviewStartResponse>(review_resp)?;
assert_eq!(review_thread_id, thread_id.clone());
let turn_id = turn.id.clone();
assert_eq!(turn.status, TurnStatus::InProgress);
// Confirm we see the EnteredReviewMode marker on the main thread.
let mut saw_entered_review_mode = false;
for _ in 0..10 {
let item_started: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("item/started"),
)
.await??;
let started: ItemStartedNotification =
serde_json::from_value(item_started.params.expect("params must be present"))?;
match started.item {
ThreadItem::EnteredReviewMode { id, review } => {
assert_eq!(id, turn_id);
assert_eq!(review, "commit 1234567: Tidy UI colors");
saw_entered_review_mode = true;
break;
}
_ => continue,
}
}
assert!(
saw_entered_review_mode,
"did not observe enteredReviewMode item"
);
// Confirm we see the ExitedReviewMode marker (with review text)
// on the same turn. Ignore any other items the stream surfaces.
let mut review_body: Option<String> = None;
for _ in 0..10 {
let review_notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("item/completed"),
)
.await??;
let completed: ItemCompletedNotification =
serde_json::from_value(review_notif.params.expect("params must be present"))?;
match completed.item {
ThreadItem::ExitedReviewMode { id, review } => {
assert_eq!(id, turn_id);
review_body = Some(review);
break;
}
_ => continue,
}
}
let review = review_body.expect("did not observe a code review item");
assert!(review.contains("Prefer Stylize helpers"));
assert!(review.contains("/tmp/file.rs:10-20"));
Ok(())
}
#[tokio::test]
async fn review_start_rejects_empty_base_branch() -> Result<()> {
let server = create_mock_chat_completions_server_unchecked(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_id = start_default_thread(&mut mcp).await?;
let request_id = mcp
.send_review_start_request(ReviewStartParams {
thread_id,
delivery: Some(ReviewDelivery::Inline),
target: ReviewTarget::BaseBranch {
branch: " ".to_string(),
},
})
.await?;
let error: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
assert!(
error.error.message.contains("branch must not be empty"),
"unexpected message: {}",
error.error.message
);
Ok(())
}
#[tokio::test]
async fn review_start_with_detached_delivery_returns_new_thread_id() -> Result<()> {
let review_payload = json!({
"findings": [],
"overall_correctness": "ok",
"overall_explanation": "detached review",
"overall_confidence_score": 0.5
})
.to_string();
let responses = vec![create_final_assistant_message_sse_response(
&review_payload,
)?];
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_id = start_default_thread(&mut mcp).await?;
let review_req = mcp
.send_review_start_request(ReviewStartParams {
thread_id: thread_id.clone(),
delivery: Some(ReviewDelivery::Detached),
target: ReviewTarget::Custom {
instructions: "detached review".to_string(),
},
})
.await?;
let review_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(review_req)),
)
.await??;
let ReviewStartResponse {
turn,
review_thread_id,
} = to_response::<ReviewStartResponse>(review_resp)?;
assert_eq!(turn.status, TurnStatus::InProgress);
assert_ne!(
review_thread_id, thread_id,
"detached review should run on a different thread"
);
Ok(())
}
#[tokio::test]
async fn review_start_rejects_empty_commit_sha() -> Result<()> {
let server = create_mock_chat_completions_server_unchecked(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_id = start_default_thread(&mut mcp).await?;
let request_id = mcp
.send_review_start_request(ReviewStartParams {
thread_id,
delivery: Some(ReviewDelivery::Inline),
target: ReviewTarget::Commit {
sha: "\t".to_string(),
title: None,
},
})
.await?;
let error: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
assert!(
error.error.message.contains("sha must not be empty"),
"unexpected message: {}",
error.error.message
);
Ok(())
}
#[tokio::test]
async fn review_start_rejects_empty_custom_instructions() -> Result<()> {
let server = create_mock_chat_completions_server_unchecked(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_id = start_default_thread(&mut mcp).await?;
let request_id = mcp
.send_review_start_request(ReviewStartParams {
thread_id,
delivery: Some(ReviewDelivery::Inline),
target: ReviewTarget::Custom {
instructions: "\n\n".to_string(),
},
})
.await?;
let error: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
assert!(
error
.error
.message
.contains("instructions must not be empty"),
"unexpected message: {}",
error.error.message
);
Ok(())
}
async fn start_default_thread(mcp: &mut McpProcess) -> Result<String> {
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
Ok(thread.id)
}
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,93 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadArchiveResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
use codex_core::find_conversation_path_by_id_str;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_archive_moves_rollout_into_archived_directory() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a thread.
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
assert!(!thread.id.is_empty());
// Locate the rollout path recorded for this thread id.
let rollout_path = find_conversation_path_by_id_str(codex_home.path(), &thread.id)
.await?
.expect("expected rollout path for thread id to exist");
assert!(
rollout_path.exists(),
"expected {} to exist",
rollout_path.display()
);
// Archive the thread.
let archive_id = mcp
.send_thread_archive_request(ThreadArchiveParams {
thread_id: thread.id.clone(),
})
.await?;
let archive_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(archive_id)),
)
.await??;
let _: ThreadArchiveResponse = to_response::<ThreadArchiveResponse>(archive_resp)?;
// Verify file moved.
let archived_directory = codex_home.path().join(ARCHIVED_SESSIONS_SUBDIR);
// The archived file keeps the original filename (rollout-...-<id>.jsonl).
let archived_rollout_path =
archived_directory.join(rollout_path.file_name().expect("rollout file name"));
assert!(
!rollout_path.exists(),
"expected rollout path {} to be moved",
rollout_path.display()
);
assert!(
archived_rollout_path.exists(),
"expected archived rollout path {} to exist",
archived_rollout_path.display()
);
Ok(())
}
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(config_toml, config_contents())
}
fn config_contents() -> &'static str {
r#"model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
"#
}

View File

@@ -1,259 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout;
use app_test_support::to_response;
use codex_app_server_protocol::GitInfo as ApiGitInfo;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SessionSource;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadListResponse;
use codex_protocol::protocol::GitInfo as CoreGitInfo;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_list_basic_empty() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// List threads in an empty CODEX_HOME; should return an empty page with nextCursor: null.
let list_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(10),
model_providers: Some(vec!["mock_provider".to_string()]),
})
.await?;
let list_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await??;
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(list_resp)?;
assert!(data.is_empty());
assert_eq!(next_cursor, None);
Ok(())
}
// Minimal config.toml for listing.
fn create_minimal_config(codex_home: &std::path::Path) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
r#"
model = "mock-model"
approval_policy = "never"
"#,
)
}
#[tokio::test]
async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
// Create three rollouts so we can paginate with limit=2.
let _a = create_fake_rollout(
codex_home.path(),
"2025-01-02T12-00-00",
"2025-01-02T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let _b = create_fake_rollout(
codex_home.path(),
"2025-01-01T13-00-00",
"2025-01-01T13:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let _c = create_fake_rollout(
codex_home.path(),
"2025-01-01T12-00-00",
"2025-01-01T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Page 1: limit 2 → expect next_cursor Some.
let page1_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(2),
model_providers: Some(vec!["mock_provider".to_string()]),
})
.await?;
let page1_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(page1_id)),
)
.await??;
let ThreadListResponse {
data: data1,
next_cursor: cursor1,
} = to_response::<ThreadListResponse>(page1_resp)?;
assert_eq!(data1.len(), 2);
for thread in &data1 {
assert_eq!(thread.preview, "Hello");
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.created_at > 0);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
assert_eq!(thread.git_info, None);
}
let cursor1 = cursor1.expect("expected nextCursor on first page");
// Page 2: with cursor → expect next_cursor None when no more results.
let page2_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: Some(cursor1),
limit: Some(2),
model_providers: Some(vec!["mock_provider".to_string()]),
})
.await?;
let page2_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(page2_id)),
)
.await??;
let ThreadListResponse {
data: data2,
next_cursor: cursor2,
} = to_response::<ThreadListResponse>(page2_resp)?;
assert!(data2.len() <= 2);
for thread in &data2 {
assert_eq!(thread.preview, "Hello");
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.created_at > 0);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
assert_eq!(thread.git_info, None);
}
assert_eq!(cursor2, None, "expected nextCursor to be null on last page");
Ok(())
}
#[tokio::test]
async fn thread_list_respects_provider_filter() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
// Create rollouts under two providers.
let _a = create_fake_rollout(
codex_home.path(),
"2025-01-02T10-00-00",
"2025-01-02T10:00:00Z",
"X",
Some("mock_provider"),
None,
)?; // mock_provider
let _b = create_fake_rollout(
codex_home.path(),
"2025-01-02T11-00-00",
"2025-01-02T11:00:00Z",
"X",
Some("other_provider"),
None,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Filter to only other_provider; expect 1 item, nextCursor None.
let list_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(10),
model_providers: Some(vec!["other_provider".to_string()]),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await??;
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(resp)?;
assert_eq!(data.len(), 1);
assert_eq!(next_cursor, None);
let thread = &data[0];
assert_eq!(thread.preview, "X");
assert_eq!(thread.model_provider, "other_provider");
let expected_ts = chrono::DateTime::parse_from_rfc3339("2025-01-02T11:00:00Z")?.timestamp();
assert_eq!(thread.created_at, expected_ts);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
assert_eq!(thread.git_info, None);
Ok(())
}
#[tokio::test]
async fn thread_list_includes_git_info() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let git_info = CoreGitInfo {
commit_hash: Some("abc123".to_string()),
branch: Some("main".to_string()),
repository_url: Some("https://example.com/repo.git".to_string()),
};
let conversation_id = create_fake_rollout(
codex_home.path(),
"2025-02-01T09-00-00",
"2025-02-01T09:00:00Z",
"Git info preview",
Some("mock_provider"),
Some(git_info),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let list_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(10),
model_providers: Some(vec!["mock_provider".to_string()]),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await??;
let ThreadListResponse { data, .. } = to_response::<ThreadListResponse>(resp)?;
let thread = data
.iter()
.find(|t| t.id == conversation_id)
.expect("expected thread for created rollout");
let expected_git = ApiGitInfo {
sha: Some("abc123".to_string()),
branch: Some("main".to_string()),
origin_url: Some("https://example.com/repo.git".to_string()),
};
assert_eq!(thread.git_info, Some(expected_git));
assert_eq!(thread.source, SessionSource::Cli);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
Ok(())
}

Some files were not shown because too many files have changed in this diff Show More