mirror of
https://github.com/openai/codex.git
synced 2026-02-17 14:23:48 +00:00
Compare commits
4 Commits
latest-alp
...
remove/pre
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cea73e3566 | ||
|
|
c7868ff39e | ||
|
|
0ceee61ad3 | ||
|
|
f730efcea4 |
@@ -1,5 +1,3 @@
|
||||
iTerm
|
||||
iTerm2
|
||||
psuedo
|
||||
te
|
||||
TE
|
||||
psuedo
|
||||
@@ -3,4 +3,4 @@
|
||||
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new,*meriyah.umd.min.js
|
||||
check-hidden = true
|
||||
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
|
||||
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm,te,TE
|
||||
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm
|
||||
|
||||
7
.github/ISSUE_TEMPLATE/1-codex-app.yml
vendored
7
.github/ISSUE_TEMPLATE/1-codex-app.yml
vendored
@@ -21,13 +21,6 @@ body:
|
||||
label: What subscription do you have?
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: platform
|
||||
attributes:
|
||||
label: What platform is your computer?
|
||||
description: |
|
||||
For macOS and Linux: copy the output of `uname -mprs`
|
||||
For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console
|
||||
- type: textarea
|
||||
id: actual
|
||||
attributes:
|
||||
|
||||
18
.github/prompts/issue-deduplicator.txt
vendored
Normal file
18
.github/prompts/issue-deduplicator.txt
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
You are an assistant that triages new GitHub issues by identifying potential duplicates.
|
||||
|
||||
You will receive the following JSON files located in the current working directory:
|
||||
- `codex-current-issue.json`: JSON object describing the newly created issue (fields: number, title, body).
|
||||
- `codex-existing-issues.json`: JSON array of recent issues (each element includes number, title, body, createdAt).
|
||||
|
||||
Instructions:
|
||||
- Load both files as JSON and review their contents carefully. The codex-existing-issues.json file is large, ensure you explore all of it.
|
||||
- Compare the current issue against the existing issues to find up to five that appear to describe the same underlying problem or request.
|
||||
- Only consider an issue a potential duplicate if there is a clear overlap in symptoms, feature requests, reproduction steps, or error messages.
|
||||
- Prioritize newer issues when similarity is comparable.
|
||||
- Ignore pull requests and issues whose similarity is tenuous.
|
||||
- When unsure, prefer returning fewer matches.
|
||||
|
||||
Output requirements:
|
||||
- Respond with a JSON array of issue numbers (integers), ordered from most likely duplicate to least.
|
||||
- Include at most five numbers.
|
||||
- If you find no plausible duplicates, respond with `[]`.
|
||||
5
.github/workflows/bazel.yml
vendored
5
.github/workflows/bazel.yml
vendored
@@ -65,11 +65,6 @@ jobs:
|
||||
- name: Set up Bazel
|
||||
uses: bazelbuild/setup-bazelisk@v3
|
||||
|
||||
- name: Check MODULE.bazel.lock is up to date
|
||||
if: matrix.os == 'ubuntu-24.04' && matrix.target == 'x86_64-unknown-linux-gnu'
|
||||
shell: bash
|
||||
run: ./scripts/check-module-bazel-lock.sh
|
||||
|
||||
# TODO(mbolin): Bring this back once we have caching working. Currently,
|
||||
# we never seem to get a cache hit but we still end up paying the cost of
|
||||
# uploading at the end of the build, which takes over a minute!
|
||||
|
||||
248
.github/workflows/issue-deduplicator.yml
vendored
248
.github/workflows/issue-deduplicator.yml
vendored
@@ -15,68 +15,34 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
outputs:
|
||||
codex_output: ${{ steps.select-final.outputs.codex_output }}
|
||||
codex_output: ${{ steps.codex.outputs.final-message }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Prepare Codex inputs
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
REPO: ${{ github.repository }}
|
||||
ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
CURRENT_ISSUE_FILE=codex-current-issue.json
|
||||
EXISTING_ALL_FILE=codex-existing-issues-all.json
|
||||
EXISTING_OPEN_FILE=codex-existing-issues-open.json
|
||||
EXISTING_ISSUES_FILE=codex-existing-issues.json
|
||||
|
||||
gh issue list --repo "$REPO" \
|
||||
--json number,title,body,createdAt,updatedAt,state,labels \
|
||||
gh issue list --repo "${{ github.repository }}" \
|
||||
--json number,title,body,createdAt \
|
||||
--limit 1000 \
|
||||
--state all \
|
||||
--search "sort:created-desc" \
|
||||
| jq '[.[] | {
|
||||
number,
|
||||
title,
|
||||
body: ((.body // "")[0:4000]),
|
||||
createdAt,
|
||||
updatedAt,
|
||||
state,
|
||||
labels: ((.labels // []) | map(.name))
|
||||
}]' \
|
||||
> "$EXISTING_ALL_FILE"
|
||||
| jq '.' \
|
||||
> "$EXISTING_ISSUES_FILE"
|
||||
|
||||
gh issue list --repo "$REPO" \
|
||||
--json number,title,body,createdAt,updatedAt,state,labels \
|
||||
--limit 1000 \
|
||||
--state open \
|
||||
--search "sort:created-desc" \
|
||||
| jq '[.[] | {
|
||||
number,
|
||||
title,
|
||||
body: ((.body // "")[0:4000]),
|
||||
createdAt,
|
||||
updatedAt,
|
||||
state,
|
||||
labels: ((.labels // []) | map(.name))
|
||||
}]' \
|
||||
> "$EXISTING_OPEN_FILE"
|
||||
|
||||
gh issue view "$ISSUE_NUMBER" \
|
||||
--repo "$REPO" \
|
||||
gh issue view "${{ github.event.issue.number }}" \
|
||||
--repo "${{ github.repository }}" \
|
||||
--json number,title,body \
|
||||
| jq '{number, title, body: ((.body // "")[0:4000])}' \
|
||||
| jq '.' \
|
||||
> "$CURRENT_ISSUE_FILE"
|
||||
|
||||
echo "Prepared duplicate detection input files."
|
||||
echo "all_issue_count=$(jq 'length' "$EXISTING_ALL_FILE")"
|
||||
echo "open_issue_count=$(jq 'length' "$EXISTING_OPEN_FILE")"
|
||||
|
||||
# Prompt instructions are intentionally inline in this workflow. The old
|
||||
# .github/prompts/issue-deduplicator.txt file is obsolete and removed.
|
||||
- id: codex-all
|
||||
name: Find duplicates (pass 1, all issues)
|
||||
- id: codex
|
||||
uses: openai/codex-action@main
|
||||
with:
|
||||
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
@@ -86,17 +52,14 @@ jobs:
|
||||
|
||||
You will receive the following JSON files located in the current working directory:
|
||||
- `codex-current-issue.json`: JSON object describing the newly created issue (fields: number, title, body).
|
||||
- `codex-existing-issues-all.json`: JSON array of recent issues with states, timestamps, and labels.
|
||||
- `codex-existing-issues.json`: JSON array of recent issues (each element includes number, title, body, createdAt).
|
||||
|
||||
Instructions:
|
||||
- Compare the current issue against the existing issues to find up to five that appear to describe the same underlying problem or request.
|
||||
- Prioritize concrete overlap in symptoms, reproduction details, error signatures, and user intent.
|
||||
- Prefer active unresolved issues when confidence is similar.
|
||||
- Closed issues can still be valid duplicates if they clearly match.
|
||||
- Return fewer matches rather than speculative ones.
|
||||
- If confidence is low, return an empty list.
|
||||
- Include at most five issue numbers.
|
||||
- After analysis, provide a short reason for your decision.
|
||||
- Focus on the underlying intent and context of each issue—such as reported symptoms, feature requests, reproduction steps, or error messages—rather than relying solely on string similarity or synthetic metrics.
|
||||
- After your analysis, validate your results in 1-2 lines explaining your decision to return the selected matches.
|
||||
- When unsure, prefer returning fewer matches.
|
||||
- Include at most five numbers.
|
||||
|
||||
output-schema: |
|
||||
{
|
||||
@@ -114,179 +77,6 @@ jobs:
|
||||
"additionalProperties": false
|
||||
}
|
||||
|
||||
- id: normalize-all
|
||||
name: Normalize pass 1 output
|
||||
env:
|
||||
CODEX_OUTPUT: ${{ steps.codex-all.outputs.final-message }}
|
||||
CURRENT_ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
raw=${CODEX_OUTPUT//$'\r'/}
|
||||
parsed=false
|
||||
issues='[]'
|
||||
reason=''
|
||||
|
||||
if [ -n "$raw" ] && printf '%s' "$raw" | jq -e 'type == "object" and (.issues | type == "array")' >/dev/null 2>&1; then
|
||||
parsed=true
|
||||
issues=$(printf '%s' "$raw" | jq -c '[.issues[] | tostring]')
|
||||
reason=$(printf '%s' "$raw" | jq -r '.reason // ""')
|
||||
else
|
||||
reason='Pass 1 output was empty or invalid JSON.'
|
||||
fi
|
||||
|
||||
filtered=$(jq -cn --argjson issues "$issues" --arg current "$CURRENT_ISSUE_NUMBER" '[
|
||||
$issues[]
|
||||
| tostring
|
||||
| select(. != $current)
|
||||
] | reduce .[] as $issue ([]; if index($issue) then . else . + [$issue] end) | .[:5]')
|
||||
|
||||
has_matches=false
|
||||
if [ "$(jq 'length' <<< "$filtered")" -gt 0 ]; then
|
||||
has_matches=true
|
||||
fi
|
||||
|
||||
echo "Pass 1 parsed: $parsed"
|
||||
echo "Pass 1 matches after filtering: $(jq 'length' <<< "$filtered")"
|
||||
echo "Pass 1 reason: $reason"
|
||||
|
||||
{
|
||||
echo "issues_json=$filtered"
|
||||
echo "reason<<EOF"
|
||||
echo "$reason"
|
||||
echo "EOF"
|
||||
echo "has_matches=$has_matches"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
- id: codex-open
|
||||
name: Find duplicates (pass 2, open issues)
|
||||
if: ${{ steps.normalize-all.outputs.has_matches != 'true' }}
|
||||
uses: openai/codex-action@main
|
||||
with:
|
||||
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
allow-users: "*"
|
||||
prompt: |
|
||||
You are an assistant that triages new GitHub issues by identifying potential duplicates.
|
||||
|
||||
This is a fallback pass because a broad search did not find convincing matches.
|
||||
|
||||
You will receive the following JSON files located in the current working directory:
|
||||
- `codex-current-issue.json`: JSON object describing the newly created issue (fields: number, title, body).
|
||||
- `codex-existing-issues-open.json`: JSON array of open issues only.
|
||||
|
||||
Instructions:
|
||||
- Search only these active unresolved issues for duplicates of the current issue.
|
||||
- Prioritize concrete overlap in symptoms, reproduction details, error signatures, and user intent.
|
||||
- Prefer fewer, higher-confidence matches.
|
||||
- If confidence is low, return an empty list.
|
||||
- Include at most five issue numbers.
|
||||
- After analysis, provide a short reason for your decision.
|
||||
|
||||
output-schema: |
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"issues": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"reason": { "type": "string" }
|
||||
},
|
||||
"required": ["issues", "reason"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
|
||||
- id: normalize-open
|
||||
name: Normalize pass 2 output
|
||||
if: ${{ steps.normalize-all.outputs.has_matches != 'true' }}
|
||||
env:
|
||||
CODEX_OUTPUT: ${{ steps.codex-open.outputs.final-message }}
|
||||
CURRENT_ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
raw=${CODEX_OUTPUT//$'\r'/}
|
||||
parsed=false
|
||||
issues='[]'
|
||||
reason=''
|
||||
|
||||
if [ -n "$raw" ] && printf '%s' "$raw" | jq -e 'type == "object" and (.issues | type == "array")' >/dev/null 2>&1; then
|
||||
parsed=true
|
||||
issues=$(printf '%s' "$raw" | jq -c '[.issues[] | tostring]')
|
||||
reason=$(printf '%s' "$raw" | jq -r '.reason // ""')
|
||||
else
|
||||
reason='Pass 2 output was empty or invalid JSON.'
|
||||
fi
|
||||
|
||||
filtered=$(jq -cn --argjson issues "$issues" --arg current "$CURRENT_ISSUE_NUMBER" '[
|
||||
$issues[]
|
||||
| tostring
|
||||
| select(. != $current)
|
||||
] | reduce .[] as $issue ([]; if index($issue) then . else . + [$issue] end) | .[:5]')
|
||||
|
||||
has_matches=false
|
||||
if [ "$(jq 'length' <<< "$filtered")" -gt 0 ]; then
|
||||
has_matches=true
|
||||
fi
|
||||
|
||||
echo "Pass 2 parsed: $parsed"
|
||||
echo "Pass 2 matches after filtering: $(jq 'length' <<< "$filtered")"
|
||||
echo "Pass 2 reason: $reason"
|
||||
|
||||
{
|
||||
echo "issues_json=$filtered"
|
||||
echo "reason<<EOF"
|
||||
echo "$reason"
|
||||
echo "EOF"
|
||||
echo "has_matches=$has_matches"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
- id: select-final
|
||||
name: Select final duplicate set
|
||||
env:
|
||||
PASS1_ISSUES: ${{ steps.normalize-all.outputs.issues_json }}
|
||||
PASS1_REASON: ${{ steps.normalize-all.outputs.reason }}
|
||||
PASS2_ISSUES: ${{ steps.normalize-open.outputs.issues_json }}
|
||||
PASS2_REASON: ${{ steps.normalize-open.outputs.reason }}
|
||||
PASS1_HAS_MATCHES: ${{ steps.normalize-all.outputs.has_matches }}
|
||||
PASS2_HAS_MATCHES: ${{ steps.normalize-open.outputs.has_matches }}
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
selected_issues='[]'
|
||||
selected_reason='No plausible duplicates found.'
|
||||
selected_pass='none'
|
||||
|
||||
if [ "$PASS1_HAS_MATCHES" = "true" ]; then
|
||||
selected_issues=${PASS1_ISSUES:-'[]'}
|
||||
selected_reason=${PASS1_REASON:-'Pass 1 found duplicates.'}
|
||||
selected_pass='all'
|
||||
fi
|
||||
|
||||
if [ "$PASS2_HAS_MATCHES" = "true" ]; then
|
||||
selected_issues=${PASS2_ISSUES:-'[]'}
|
||||
selected_reason=${PASS2_REASON:-'Pass 2 found duplicates.'}
|
||||
selected_pass='open-fallback'
|
||||
fi
|
||||
|
||||
final_json=$(jq -cn \
|
||||
--argjson issues "$selected_issues" \
|
||||
--arg reason "$selected_reason" \
|
||||
--arg pass "$selected_pass" \
|
||||
'{issues: $issues, reason: $reason, pass: $pass}')
|
||||
|
||||
echo "Final pass used: $selected_pass"
|
||||
echo "Final duplicate count: $(jq '.issues | length' <<< "$final_json")"
|
||||
echo "Final reason: $(jq -r '.reason' <<< "$final_json")"
|
||||
|
||||
{
|
||||
echo "codex_output<<EOF"
|
||||
echo "$final_json"
|
||||
echo "EOF"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
comment-on-issue:
|
||||
name: Comment with potential duplicates
|
||||
needs: gather-duplicates
|
||||
@@ -315,17 +105,11 @@ jobs:
|
||||
|
||||
const issues = Array.isArray(parsed?.issues) ? parsed.issues : [];
|
||||
const currentIssueNumber = String(context.payload.issue.number);
|
||||
const passUsed = typeof parsed?.pass === 'string' ? parsed.pass : 'unknown';
|
||||
const reason = typeof parsed?.reason === 'string' ? parsed.reason : '';
|
||||
|
||||
console.log(`Current issue number: ${currentIssueNumber}`);
|
||||
console.log(`Pass used: ${passUsed}`);
|
||||
if (reason) {
|
||||
console.log(`Reason: ${reason}`);
|
||||
}
|
||||
console.log(issues);
|
||||
|
||||
const filteredIssues = [...new Set(issues.map((value) => String(value)))].filter((value) => value !== currentIssueNumber).slice(0, 5);
|
||||
const filteredIssues = issues.filter((value) => String(value) !== currentIssueNumber);
|
||||
|
||||
if (filteredIssues.length === 0) {
|
||||
core.info('Codex reported no potential duplicates.');
|
||||
|
||||
8
.github/workflows/rust-ci.yml
vendored
8
.github/workflows/rust-ci.yml
vendored
@@ -59,7 +59,7 @@ jobs:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@1.93.0
|
||||
- uses: dtolnay/rust-toolchain@1.93
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: cargo fmt
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@1.93.0
|
||||
- uses: dtolnay/rust-toolchain@1.93
|
||||
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
||||
with:
|
||||
tool: cargo-shear
|
||||
@@ -196,7 +196,7 @@ jobs:
|
||||
fi
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends "${packages[@]}"
|
||||
fi
|
||||
- uses: dtolnay/rust-toolchain@1.93.0
|
||||
- uses: dtolnay/rust-toolchain@1.93
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
components: clippy
|
||||
@@ -513,7 +513,7 @@ jobs:
|
||||
- name: Install DotSlash
|
||||
uses: facebook/install-dotslash@v2
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.93.0
|
||||
- uses: dtolnay/rust-toolchain@1.93
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
|
||||
2
.github/workflows/rust-release-windows.yml
vendored
2
.github/workflows/rust-release-windows.yml
vendored
@@ -82,7 +82,7 @@ jobs:
|
||||
Write-Host "Total RAM: $ramGiB GiB"
|
||||
Write-Host "Disk usage:"
|
||||
Get-PSDrive -PSProvider FileSystem | Format-Table -AutoSize Name, @{Name='Size(GB)';Expression={[math]::Round(($_.Used + $_.Free) / 1GB, 1)}}, @{Name='Free(GB)';Expression={[math]::Round($_.Free / 1GB, 1)}}
|
||||
- uses: dtolnay/rust-toolchain@1.93.0
|
||||
- uses: dtolnay/rust-toolchain@1.93
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
|
||||
2
.github/workflows/rust-release.yml
vendored
2
.github/workflows/rust-release.yml
vendored
@@ -123,7 +123,7 @@ jobs:
|
||||
sudo apt-get update -y
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
|
||||
fi
|
||||
- uses: dtolnay/rust-toolchain@1.93.0
|
||||
- uses: dtolnay/rust-toolchain@1.93
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
|
||||
2
.github/workflows/sdk.yml
vendored
2
.github/workflows/sdk.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
node-version: 22
|
||||
cache: pnpm
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.93.0
|
||||
- uses: dtolnay/rust-toolchain@1.93
|
||||
|
||||
- name: build codex
|
||||
run: cargo build --bin codex
|
||||
|
||||
219
.github/workflows/shell-tool-mcp.yml
vendored
219
.github/workflows/shell-tool-mcp.yml
vendored
@@ -105,7 +105,7 @@ jobs:
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
|
||||
fi
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.93.0
|
||||
- uses: dtolnay/rust-toolchain@1.93
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
@@ -251,11 +251,11 @@ jobs:
|
||||
set -euo pipefail
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
apt-get update
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y git build-essential bison autoconf gettext libncursesw5-dev
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y git build-essential bison autoconf gettext
|
||||
elif command -v dnf >/dev/null 2>&1; then
|
||||
dnf install -y git gcc gcc-c++ make bison autoconf gettext ncurses-devel
|
||||
dnf install -y git gcc gcc-c++ make bison autoconf gettext
|
||||
elif command -v yum >/dev/null 2>&1; then
|
||||
yum install -y git gcc gcc-c++ make bison autoconf gettext ncurses-devel
|
||||
yum install -y git gcc gcc-c++ make bison autoconf gettext
|
||||
else
|
||||
echo "Unsupported package manager in container"
|
||||
exit 1
|
||||
@@ -329,212 +329,6 @@ jobs:
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
zsh-linux:
|
||||
name: Build zsh (Linux) - ${{ matrix.variant }} - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
container:
|
||||
image: ${{ matrix.image }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: ubuntu-24.04
|
||||
image: ubuntu:24.04
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: ubuntu-22.04
|
||||
image: ubuntu:22.04
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: debian-12
|
||||
image: debian:12
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: debian-11
|
||||
image: debian:11
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: centos-9
|
||||
image: quay.io/centos/centos:stream9
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: ubuntu-24.04
|
||||
image: arm64v8/ubuntu:24.04
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: ubuntu-22.04
|
||||
image: arm64v8/ubuntu:22.04
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: ubuntu-20.04
|
||||
image: arm64v8/ubuntu:20.04
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: debian-12
|
||||
image: arm64v8/debian:12
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: debian-11
|
||||
image: arm64v8/debian:11
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: centos-9
|
||||
image: quay.io/centos/centos:stream9
|
||||
steps:
|
||||
- name: Install build prerequisites
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
apt-get update
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y git build-essential bison autoconf gettext libncursesw5-dev
|
||||
elif command -v dnf >/dev/null 2>&1; then
|
||||
dnf install -y git gcc gcc-c++ make bison autoconf gettext ncurses-devel
|
||||
elif command -v yum >/dev/null 2>&1; then
|
||||
yum install -y git gcc gcc-c++ make bison autoconf gettext ncurses-devel
|
||||
else
|
||||
echo "Unsupported package manager in container"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Build patched zsh
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git clone --depth 1 https://git.code.sf.net/p/zsh/code /tmp/zsh
|
||||
cd /tmp/zsh
|
||||
git fetch --depth 1 origin 77045ef899e53b9598bebc5a41db93a548a40ca6
|
||||
git checkout 77045ef899e53b9598bebc5a41db93a548a40ca6
|
||||
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/zsh-exec-wrapper.patch"
|
||||
./Util/preconfig
|
||||
./configure
|
||||
cores="$(command -v nproc >/dev/null 2>&1 && nproc || getconf _NPROCESSORS_ONLN)"
|
||||
make -j"${cores}"
|
||||
|
||||
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/zsh/${{ matrix.variant }}"
|
||||
mkdir -p "$dest"
|
||||
cp Src/zsh "$dest/zsh"
|
||||
|
||||
- name: Smoke test zsh exec wrapper
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
tmpdir="$(mktemp -d)"
|
||||
cat > "$tmpdir/exec-wrapper" <<'EOF'
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
: "${CODEX_WRAPPER_LOG:?missing CODEX_WRAPPER_LOG}"
|
||||
printf '%s\n' "$@" > "$CODEX_WRAPPER_LOG"
|
||||
file="$1"
|
||||
shift
|
||||
if [[ "$#" -eq 0 ]]; then
|
||||
exec "$file"
|
||||
fi
|
||||
arg0="$1"
|
||||
shift
|
||||
exec -a "$arg0" "$file" "$@"
|
||||
EOF
|
||||
chmod +x "$tmpdir/exec-wrapper"
|
||||
|
||||
CODEX_WRAPPER_LOG="$tmpdir/wrapper.log" \
|
||||
EXEC_WRAPPER="$tmpdir/exec-wrapper" \
|
||||
/tmp/zsh/Src/zsh -fc '/bin/echo smoke-zsh' > "$tmpdir/stdout.txt"
|
||||
|
||||
grep -Fx "smoke-zsh" "$tmpdir/stdout.txt"
|
||||
grep -Fx "/bin/echo" "$tmpdir/wrapper.log"
|
||||
|
||||
- uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: shell-tool-mcp-zsh-${{ matrix.target }}-${{ matrix.variant }}
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
zsh-darwin:
|
||||
name: Build zsh (macOS) - ${{ matrix.variant }} - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
variant: macos-15
|
||||
- runner: macos-14
|
||||
target: aarch64-apple-darwin
|
||||
variant: macos-14
|
||||
steps:
|
||||
- name: Install build prerequisites
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if ! command -v autoconf >/dev/null 2>&1; then
|
||||
brew install autoconf
|
||||
fi
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Build patched zsh
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git clone --depth 1 https://git.code.sf.net/p/zsh/code /tmp/zsh
|
||||
cd /tmp/zsh
|
||||
git fetch --depth 1 origin 77045ef899e53b9598bebc5a41db93a548a40ca6
|
||||
git checkout 77045ef899e53b9598bebc5a41db93a548a40ca6
|
||||
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/zsh-exec-wrapper.patch"
|
||||
./Util/preconfig
|
||||
./configure
|
||||
cores="$(getconf _NPROCESSORS_ONLN)"
|
||||
make -j"${cores}"
|
||||
|
||||
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/zsh/${{ matrix.variant }}"
|
||||
mkdir -p "$dest"
|
||||
cp Src/zsh "$dest/zsh"
|
||||
|
||||
- name: Smoke test zsh exec wrapper
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
tmpdir="$(mktemp -d)"
|
||||
cat > "$tmpdir/exec-wrapper" <<'EOF'
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
: "${CODEX_WRAPPER_LOG:?missing CODEX_WRAPPER_LOG}"
|
||||
printf '%s\n' "$@" > "$CODEX_WRAPPER_LOG"
|
||||
file="$1"
|
||||
shift
|
||||
if [[ "$#" -eq 0 ]]; then
|
||||
exec "$file"
|
||||
fi
|
||||
arg0="$1"
|
||||
shift
|
||||
exec -a "$arg0" "$file" "$@"
|
||||
EOF
|
||||
chmod +x "$tmpdir/exec-wrapper"
|
||||
|
||||
CODEX_WRAPPER_LOG="$tmpdir/wrapper.log" \
|
||||
EXEC_WRAPPER="$tmpdir/exec-wrapper" \
|
||||
/tmp/zsh/Src/zsh -fc '/bin/echo smoke-zsh' > "$tmpdir/stdout.txt"
|
||||
|
||||
grep -Fx "smoke-zsh" "$tmpdir/stdout.txt"
|
||||
grep -Fx "/bin/echo" "$tmpdir/wrapper.log"
|
||||
|
||||
- uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: shell-tool-mcp-zsh-${{ matrix.target }}-${{ matrix.variant }}
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
package:
|
||||
name: Package npm module
|
||||
needs:
|
||||
@@ -542,8 +336,6 @@ jobs:
|
||||
- rust-binaries
|
||||
- bash-linux
|
||||
- bash-darwin
|
||||
- zsh-linux
|
||||
- zsh-darwin
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PACKAGE_VERSION: ${{ needs.metadata.outputs.version }}
|
||||
@@ -617,8 +409,7 @@ jobs:
|
||||
chmod +x \
|
||||
"$staging"/vendor/*/codex-exec-mcp-server \
|
||||
"$staging"/vendor/*/codex-execve-wrapper \
|
||||
"$staging"/vendor/*/bash/*/bash \
|
||||
"$staging"/vendor/*/zsh/*/zsh
|
||||
"$staging"/vendor/*/bash/*/bash
|
||||
|
||||
- name: Create npm tarball
|
||||
shell: bash
|
||||
|
||||
15
AGENTS.md
15
AGENTS.md
@@ -15,10 +15,6 @@ In the codex-rs folder where the rust code lives:
|
||||
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
|
||||
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
|
||||
- If you change `ConfigToml` or nested config types, run `just write-config-schema` to update `codex-rs/core/config.schema.json`.
|
||||
- If you change Rust dependencies (`Cargo.toml` or `Cargo.lock`), run `just bazel-lock-update` from the
|
||||
repo root to refresh `MODULE.bazel.lock`, and include that lockfile update in the same change.
|
||||
- After dependency changes, run `just bazel-lock-check` from the repo root so lockfile drift is caught
|
||||
locally before CI.
|
||||
- Do not create small helper methods that are referenced only once.
|
||||
|
||||
Run `just fmt` (in `codex-rs` directory) automatically after you have finished making Rust code changes; do not ask for approval to run it. Additionally, run the tests:
|
||||
@@ -64,14 +60,7 @@ See `codex-rs/tui/styles.md`.
|
||||
|
||||
### Snapshot tests
|
||||
|
||||
This repo uses snapshot tests (via `insta`), especially in `codex-rs/tui`, to validate rendered output.
|
||||
|
||||
**Requirement:** any change that affects user-visible UI (including adding new UI) must include
|
||||
corresponding `insta` snapshot coverage (add a new snapshot test if one doesn't exist yet, or
|
||||
update the existing snapshot). Review and accept snapshot updates as part of the PR so UI impact
|
||||
is easy to review and future diffs stay visual.
|
||||
|
||||
When UI or text output changes intentionally, update the snapshots as follows:
|
||||
This repo uses snapshot tests (via `insta`), especially in `codex-rs/tui`, to validate rendered output. When UI or text output changes intentionally, update the snapshots as follows:
|
||||
|
||||
- Run tests to generate any updated snapshots:
|
||||
- `cargo test -p codex-tui`
|
||||
@@ -169,5 +158,3 @@ These guidelines apply to app-server protocol work in `codex-rs`, especially:
|
||||
`just write-app-server-schema`
|
||||
(and `just write-app-server-schema --experimental` when experimental API fixtures are affected).
|
||||
- Validate with `cargo test -p codex-app-server-protocol`.
|
||||
- Avoid boilerplate tests that only assert experimental field markers for individual
|
||||
request fields in `common.rs`; rely on schema generation/tests and behavioral coverage instead.
|
||||
|
||||
4
MODULE.bazel.lock
generated
4
MODULE.bazel.lock
generated
File diff suppressed because one or more lines are too long
@@ -5,7 +5,6 @@
|
||||
</p>
|
||||
</br>
|
||||
If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE.</a>
|
||||
</br>If you want the desktop app experience, run <code>codex app</code> or visit <a href="https://chatgpt.com/codex?app-landing-page=true">the Codex App page</a>.
|
||||
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a>.</p>
|
||||
|
||||
---
|
||||
|
||||
15
codex-rs/Cargo.lock
generated
15
codex-rs/Cargo.lock
generated
@@ -1401,7 +1401,6 @@ dependencies = [
|
||||
"schemars 0.8.22",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"shlex",
|
||||
"similar",
|
||||
"strum_macros 0.27.2",
|
||||
"tempfile",
|
||||
@@ -1420,8 +1419,6 @@ dependencies = [
|
||||
"codex-protocol",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tungstenite",
|
||||
"url",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
@@ -1712,7 +1709,6 @@ dependencies = [
|
||||
"include_dir",
|
||||
"indexmap 2.13.0",
|
||||
"indoc",
|
||||
"insta",
|
||||
"keyring",
|
||||
"landlock",
|
||||
"libc",
|
||||
@@ -2038,7 +2034,6 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
"clap",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-rustls-provider",
|
||||
@@ -2298,7 +2293,6 @@ dependencies = [
|
||||
"codex-utils-oss",
|
||||
"codex-utils-pty",
|
||||
"codex-utils-sandbox-summary",
|
||||
"codex-utils-sleep-inhibitor",
|
||||
"codex-windows-sandbox",
|
||||
"color-eyre",
|
||||
"crossterm",
|
||||
@@ -2498,15 +2492,6 @@ dependencies = [
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-utils-sleep-inhibitor"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"core-foundation 0.9.4",
|
||||
"libc",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-utils-string"
|
||||
version = "0.0.0"
|
||||
|
||||
@@ -54,7 +54,6 @@ members = [
|
||||
"utils/elapsed",
|
||||
"utils/sandbox-summary",
|
||||
"utils/sanitizer",
|
||||
"utils/sleep-inhibitor",
|
||||
"utils/approval-presets",
|
||||
"utils/oss",
|
||||
"utils/fuzzy-match",
|
||||
@@ -66,7 +65,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.102.0-alpha.8"
|
||||
version = "0.0.0"
|
||||
# Track the edition for all workspace crates in one place. Individual
|
||||
# crates can still override this value, but keeping it here means new
|
||||
# crates created with `cargo new -w ...` automatically inherit the 2024
|
||||
@@ -132,7 +131,6 @@ codex-utils-readiness = { path = "utils/readiness" }
|
||||
codex-utils-rustls-provider = { path = "utils/rustls-provider" }
|
||||
codex-utils-sandbox-summary = { path = "utils/sandbox-summary" }
|
||||
codex-utils-sanitizer = { path = "utils/sanitizer" }
|
||||
codex-utils-sleep-inhibitor = { path = "utils/sleep-inhibitor" }
|
||||
codex-utils-string = { path = "utils/string" }
|
||||
codex-windows-sandbox = { path = "windows-sandbox-rs" }
|
||||
core_test_support = { path = "core/tests/common" }
|
||||
|
||||
@@ -20,7 +20,6 @@ codex-utils-absolute-path = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
shlex = { workspace = true }
|
||||
strum_macros = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
ts-rs = { workspace = true }
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
@@ -1128,13 +1128,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"includeHidden": {
|
||||
"description": "When true, include models that are hidden from the default picker list.",
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional page size; defaults to a reasonable server-side value.",
|
||||
"format": "uint32",
|
||||
@@ -2572,13 +2565,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"cwd": {
|
||||
"description": "Optional cwd filter; when set, only threads whose session cwd exactly matches this path are returned.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional page size; defaults to a reasonable server-side value.",
|
||||
"format": "uint32",
|
||||
|
||||
@@ -104,7 +104,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
@@ -1349,14 +1349,6 @@
|
||||
"default": "agent",
|
||||
"description": "Where the command originated. Defaults to Agent for backward compatibility."
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ExecCommandStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this command execution."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr",
|
||||
"type": "string"
|
||||
@@ -1385,7 +1377,6 @@
|
||||
"exit_code",
|
||||
"formatted_output",
|
||||
"parsed_cmd",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"turn_id",
|
||||
@@ -1438,17 +1429,6 @@
|
||||
"description": "The command's working directory.",
|
||||
"type": "string"
|
||||
},
|
||||
"network_approval_context": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/NetworkApprovalContext"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional network context for a blocked request that can be approved."
|
||||
},
|
||||
"parsed_cmd": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ParsedCommand"
|
||||
@@ -1825,14 +1805,6 @@
|
||||
"description": "The changes that were applied (mirrors PatchApplyBeginEvent::changes).",
|
||||
"type": "object"
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PatchApplyStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this patch application."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr (parser errors, IO failures, etc.).",
|
||||
"type": "string"
|
||||
@@ -1860,7 +1832,6 @@
|
||||
},
|
||||
"required": [
|
||||
"call_id",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"success",
|
||||
@@ -2902,14 +2873,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ExecCommandStatus": {
|
||||
"enum": [
|
||||
"completed",
|
||||
"failed",
|
||||
"declined"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ExecOutputStream": {
|
||||
"enum": [
|
||||
"stdout",
|
||||
@@ -3326,30 +3289,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"NetworkApprovalContext": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string"
|
||||
},
|
||||
"protocol": {
|
||||
"$ref": "#/definitions/NetworkApprovalProtocol"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"host",
|
||||
"protocol"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkApprovalProtocol": {
|
||||
"enum": [
|
||||
"http",
|
||||
"https",
|
||||
"socks5_tcp",
|
||||
"socks5_udp"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ParsedCommand": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -3461,14 +3400,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"PatchApplyStatus": {
|
||||
"enum": [
|
||||
"completed",
|
||||
"failed",
|
||||
"declined"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PlanItemArg": {
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
@@ -6254,14 +6185,6 @@
|
||||
"default": "agent",
|
||||
"description": "Where the command originated. Defaults to Agent for backward compatibility."
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ExecCommandStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this command execution."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr",
|
||||
"type": "string"
|
||||
@@ -6290,7 +6213,6 @@
|
||||
"exit_code",
|
||||
"formatted_output",
|
||||
"parsed_cmd",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"turn_id",
|
||||
@@ -6343,17 +6265,6 @@
|
||||
"description": "The command's working directory.",
|
||||
"type": "string"
|
||||
},
|
||||
"network_approval_context": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/NetworkApprovalContext"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional network context for a blocked request that can be approved."
|
||||
},
|
||||
"parsed_cmd": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ParsedCommand"
|
||||
@@ -6730,14 +6641,6 @@
|
||||
"description": "The changes that were applied (mirrors PatchApplyBeginEvent::changes).",
|
||||
"type": "object"
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PatchApplyStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this patch application."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr (parser errors, IO failures, etc.).",
|
||||
"type": "string"
|
||||
@@ -6765,7 +6668,6 @@
|
||||
},
|
||||
"required": [
|
||||
"call_id",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"success",
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"sessionId": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"sessionId"
|
||||
],
|
||||
"title": "FuzzyFileSearchSessionCompletedNotification",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -193,11 +193,6 @@
|
||||
"default": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
"isEnabled": {
|
||||
"default": true,
|
||||
"description": "Whether this app is enabled in config.toml. Example: ```toml [apps.bad_app] enabled = false ```",
|
||||
"type": "boolean"
|
||||
},
|
||||
"logoUrl": {
|
||||
"type": [
|
||||
"string",
|
||||
@@ -246,7 +241,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
@@ -1970,14 +1965,6 @@
|
||||
"default": "agent",
|
||||
"description": "Where the command originated. Defaults to Agent for backward compatibility."
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ExecCommandStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this command execution."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr",
|
||||
"type": "string"
|
||||
@@ -2006,7 +1993,6 @@
|
||||
"exit_code",
|
||||
"formatted_output",
|
||||
"parsed_cmd",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"turn_id",
|
||||
@@ -2059,17 +2045,6 @@
|
||||
"description": "The command's working directory.",
|
||||
"type": "string"
|
||||
},
|
||||
"network_approval_context": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/NetworkApprovalContext"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional network context for a blocked request that can be approved."
|
||||
},
|
||||
"parsed_cmd": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ParsedCommand"
|
||||
@@ -2446,14 +2421,6 @@
|
||||
"description": "The changes that were applied (mirrors PatchApplyBeginEvent::changes).",
|
||||
"type": "object"
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PatchApplyStatus2"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this patch application."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr (parser errors, IO failures, etc.).",
|
||||
"type": "string"
|
||||
@@ -2481,7 +2448,6 @@
|
||||
},
|
||||
"required": [
|
||||
"call_id",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"success",
|
||||
@@ -3523,14 +3489,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ExecCommandStatus": {
|
||||
"enum": [
|
||||
"completed",
|
||||
"failed",
|
||||
"declined"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ExecOutputStream": {
|
||||
"enum": [
|
||||
"stdout",
|
||||
@@ -3763,17 +3721,6 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"FuzzyFileSearchSessionCompletedNotification": {
|
||||
"properties": {
|
||||
"sessionId": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"sessionId"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"FuzzyFileSearchSessionUpdatedNotification": {
|
||||
"properties": {
|
||||
"files": {
|
||||
@@ -4218,30 +4165,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"NetworkApprovalContext": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string"
|
||||
},
|
||||
"protocol": {
|
||||
"$ref": "#/definitions/NetworkApprovalProtocol"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"host",
|
||||
"protocol"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkApprovalProtocol": {
|
||||
"enum": [
|
||||
"http",
|
||||
"https",
|
||||
"socks5_tcp",
|
||||
"socks5_udp"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ParsedCommand": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -4362,14 +4285,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PatchApplyStatus2": {
|
||||
"enum": [
|
||||
"completed",
|
||||
"failed",
|
||||
"declined"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PatchChangeKind": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -6043,8 +5958,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
@@ -8336,26 +8250,6 @@
|
||||
"title": "FuzzyFileSearch/sessionUpdatedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"fuzzyFileSearch/sessionCompleted"
|
||||
],
|
||||
"title": "FuzzyFileSearch/sessionCompletedNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/FuzzyFileSearchSessionCompletedNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "FuzzyFileSearch/sessionCompletedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Notifies the user of world-writable directories on Windows, which cannot be protected by the sandbox.",
|
||||
"properties": {
|
||||
|
||||
@@ -208,7 +208,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
@@ -3362,14 +3362,6 @@
|
||||
"default": "agent",
|
||||
"description": "Where the command originated. Defaults to Agent for backward compatibility."
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ExecCommandStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this command execution."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr",
|
||||
"type": "string"
|
||||
@@ -3398,7 +3390,6 @@
|
||||
"exit_code",
|
||||
"formatted_output",
|
||||
"parsed_cmd",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"turn_id",
|
||||
@@ -3451,17 +3442,6 @@
|
||||
"description": "The command's working directory.",
|
||||
"type": "string"
|
||||
},
|
||||
"network_approval_context": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/NetworkApprovalContext"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional network context for a blocked request that can be approved."
|
||||
},
|
||||
"parsed_cmd": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ParsedCommand"
|
||||
@@ -3838,14 +3818,6 @@
|
||||
"description": "The changes that were applied (mirrors PatchApplyBeginEvent::changes).",
|
||||
"type": "object"
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/v2/PatchApplyStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this patch application."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr (parser errors, IO failures, etc.).",
|
||||
"type": "string"
|
||||
@@ -3873,7 +3845,6 @@
|
||||
},
|
||||
"required": [
|
||||
"call_id",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"success",
|
||||
@@ -4971,14 +4942,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ExecCommandStatus": {
|
||||
"enum": [
|
||||
"completed",
|
||||
"failed",
|
||||
"declined"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ExecOneOffCommandParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
@@ -5423,19 +5386,6 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"FuzzyFileSearchSessionCompletedNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"sessionId": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"sessionId"
|
||||
],
|
||||
"title": "FuzzyFileSearchSessionCompletedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"FuzzyFileSearchSessionUpdatedNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
@@ -6274,30 +6224,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"NetworkApprovalContext": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string"
|
||||
},
|
||||
"protocol": {
|
||||
"$ref": "#/definitions/NetworkApprovalProtocol"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"host",
|
||||
"protocol"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkApprovalProtocol": {
|
||||
"enum": [
|
||||
"http",
|
||||
"https",
|
||||
"socks5_tcp",
|
||||
"socks5_udp"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"NewConversationParams": {
|
||||
"properties": {
|
||||
"approvalPolicy": {
|
||||
@@ -6520,14 +6446,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"PatchApplyStatus": {
|
||||
"enum": [
|
||||
"completed",
|
||||
"failed",
|
||||
"declined"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PlanItemArg": {
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
@@ -8551,26 +8469,6 @@
|
||||
"title": "FuzzyFileSearch/sessionUpdatedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"fuzzyFileSearch/sessionCompleted"
|
||||
],
|
||||
"title": "FuzzyFileSearch/sessionCompletedNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/FuzzyFileSearchSessionCompletedNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "FuzzyFileSearch/sessionCompletedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Notifies the user of world-writable directories on Windows, which cannot be protected by the sandbox.",
|
||||
"properties": {
|
||||
@@ -9194,8 +9092,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
@@ -10270,11 +10167,6 @@
|
||||
"default": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
"isEnabled": {
|
||||
"default": true,
|
||||
"description": "Whether this app is enabled in config.toml. Example: ```toml [apps.bad_app] enabled = false ```",
|
||||
"type": "boolean"
|
||||
},
|
||||
"logoUrl": {
|
||||
"type": [
|
||||
"string",
|
||||
@@ -12646,9 +12538,6 @@
|
||||
"displayName": {
|
||||
"type": "string"
|
||||
},
|
||||
"hidden": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -12689,7 +12578,6 @@
|
||||
"defaultReasoningEffort",
|
||||
"description",
|
||||
"displayName",
|
||||
"hidden",
|
||||
"id",
|
||||
"isDefault",
|
||||
"model",
|
||||
@@ -12707,13 +12595,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"includeHidden": {
|
||||
"description": "When true, include models that are hidden from the default picker list.",
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional page size; defaults to a reasonable server-side value.",
|
||||
"format": "uint32",
|
||||
@@ -14560,8 +14441,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
@@ -15415,13 +15295,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"cwd": {
|
||||
"description": "Optional cwd filter; when set, only threads whose session cwd exactly matches this path are returned.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional page size; defaults to a reasonable server-side value.",
|
||||
"format": "uint32",
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
|
||||
@@ -104,7 +104,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
@@ -1349,14 +1349,6 @@
|
||||
"default": "agent",
|
||||
"description": "Where the command originated. Defaults to Agent for backward compatibility."
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ExecCommandStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this command execution."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr",
|
||||
"type": "string"
|
||||
@@ -1385,7 +1377,6 @@
|
||||
"exit_code",
|
||||
"formatted_output",
|
||||
"parsed_cmd",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"turn_id",
|
||||
@@ -1438,17 +1429,6 @@
|
||||
"description": "The command's working directory.",
|
||||
"type": "string"
|
||||
},
|
||||
"network_approval_context": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/NetworkApprovalContext"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional network context for a blocked request that can be approved."
|
||||
},
|
||||
"parsed_cmd": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ParsedCommand"
|
||||
@@ -1825,14 +1805,6 @@
|
||||
"description": "The changes that were applied (mirrors PatchApplyBeginEvent::changes).",
|
||||
"type": "object"
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PatchApplyStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this patch application."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr (parser errors, IO failures, etc.).",
|
||||
"type": "string"
|
||||
@@ -1860,7 +1832,6 @@
|
||||
},
|
||||
"required": [
|
||||
"call_id",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"success",
|
||||
@@ -2902,14 +2873,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ExecCommandStatus": {
|
||||
"enum": [
|
||||
"completed",
|
||||
"failed",
|
||||
"declined"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ExecOutputStream": {
|
||||
"enum": [
|
||||
"stdout",
|
||||
@@ -3326,30 +3289,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"NetworkApprovalContext": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string"
|
||||
},
|
||||
"protocol": {
|
||||
"$ref": "#/definitions/NetworkApprovalProtocol"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"host",
|
||||
"protocol"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkApprovalProtocol": {
|
||||
"enum": [
|
||||
"http",
|
||||
"https",
|
||||
"socks5_tcp",
|
||||
"socks5_udp"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ParsedCommand": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -3461,14 +3400,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"PatchApplyStatus": {
|
||||
"enum": [
|
||||
"completed",
|
||||
"failed",
|
||||
"declined"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PlanItemArg": {
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
|
||||
@@ -113,8 +113,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
|
||||
@@ -113,8 +113,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
|
||||
@@ -104,7 +104,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
@@ -1349,14 +1349,6 @@
|
||||
"default": "agent",
|
||||
"description": "Where the command originated. Defaults to Agent for backward compatibility."
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ExecCommandStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this command execution."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr",
|
||||
"type": "string"
|
||||
@@ -1385,7 +1377,6 @@
|
||||
"exit_code",
|
||||
"formatted_output",
|
||||
"parsed_cmd",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"turn_id",
|
||||
@@ -1438,17 +1429,6 @@
|
||||
"description": "The command's working directory.",
|
||||
"type": "string"
|
||||
},
|
||||
"network_approval_context": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/NetworkApprovalContext"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional network context for a blocked request that can be approved."
|
||||
},
|
||||
"parsed_cmd": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ParsedCommand"
|
||||
@@ -1825,14 +1805,6 @@
|
||||
"description": "The changes that were applied (mirrors PatchApplyBeginEvent::changes).",
|
||||
"type": "object"
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PatchApplyStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this patch application."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr (parser errors, IO failures, etc.).",
|
||||
"type": "string"
|
||||
@@ -1860,7 +1832,6 @@
|
||||
},
|
||||
"required": [
|
||||
"call_id",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"success",
|
||||
@@ -2902,14 +2873,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ExecCommandStatus": {
|
||||
"enum": [
|
||||
"completed",
|
||||
"failed",
|
||||
"declined"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ExecOutputStream": {
|
||||
"enum": [
|
||||
"stdout",
|
||||
@@ -3326,30 +3289,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"NetworkApprovalContext": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string"
|
||||
},
|
||||
"protocol": {
|
||||
"$ref": "#/definitions/NetworkApprovalProtocol"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"host",
|
||||
"protocol"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkApprovalProtocol": {
|
||||
"enum": [
|
||||
"http",
|
||||
"https",
|
||||
"socks5_tcp",
|
||||
"socks5_udp"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ParsedCommand": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -3461,14 +3400,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"PatchApplyStatus": {
|
||||
"enum": [
|
||||
"completed",
|
||||
"failed",
|
||||
"declined"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PlanItemArg": {
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
|
||||
@@ -104,7 +104,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
@@ -1349,14 +1349,6 @@
|
||||
"default": "agent",
|
||||
"description": "Where the command originated. Defaults to Agent for backward compatibility."
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ExecCommandStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this command execution."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr",
|
||||
"type": "string"
|
||||
@@ -1385,7 +1377,6 @@
|
||||
"exit_code",
|
||||
"formatted_output",
|
||||
"parsed_cmd",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"turn_id",
|
||||
@@ -1438,17 +1429,6 @@
|
||||
"description": "The command's working directory.",
|
||||
"type": "string"
|
||||
},
|
||||
"network_approval_context": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/NetworkApprovalContext"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional network context for a blocked request that can be approved."
|
||||
},
|
||||
"parsed_cmd": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ParsedCommand"
|
||||
@@ -1825,14 +1805,6 @@
|
||||
"description": "The changes that were applied (mirrors PatchApplyBeginEvent::changes).",
|
||||
"type": "object"
|
||||
},
|
||||
"status": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PatchApplyStatus"
|
||||
}
|
||||
],
|
||||
"description": "Completion status for this patch application."
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Captured stderr (parser errors, IO failures, etc.).",
|
||||
"type": "string"
|
||||
@@ -1860,7 +1832,6 @@
|
||||
},
|
||||
"required": [
|
||||
"call_id",
|
||||
"status",
|
||||
"stderr",
|
||||
"stdout",
|
||||
"success",
|
||||
@@ -2902,14 +2873,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ExecCommandStatus": {
|
||||
"enum": [
|
||||
"completed",
|
||||
"failed",
|
||||
"declined"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ExecOutputStream": {
|
||||
"enum": [
|
||||
"stdout",
|
||||
@@ -3326,30 +3289,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"NetworkApprovalContext": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string"
|
||||
},
|
||||
"protocol": {
|
||||
"$ref": "#/definitions/NetworkApprovalProtocol"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"host",
|
||||
"protocol"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkApprovalProtocol": {
|
||||
"enum": [
|
||||
"http",
|
||||
"https",
|
||||
"socks5_tcp",
|
||||
"socks5_udp"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ParsedCommand": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -3461,14 +3400,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"PatchApplyStatus": {
|
||||
"enum": [
|
||||
"completed",
|
||||
"failed",
|
||||
"declined"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PlanItemArg": {
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
|
||||
@@ -29,11 +29,6 @@
|
||||
"default": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
"isEnabled": {
|
||||
"default": true,
|
||||
"description": "Whether this app is enabled in config.toml. Example: ```toml [apps.bad_app] enabled = false ```",
|
||||
"type": "boolean"
|
||||
},
|
||||
"logoUrl": {
|
||||
"type": [
|
||||
"string",
|
||||
|
||||
@@ -29,11 +29,6 @@
|
||||
"default": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
"isEnabled": {
|
||||
"default": true,
|
||||
"description": "Whether this app is enabled in config.toml. Example: ```toml [apps.bad_app] enabled = false ```",
|
||||
"type": "boolean"
|
||||
},
|
||||
"logoUrl": {
|
||||
"type": [
|
||||
"string",
|
||||
|
||||
@@ -8,13 +8,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"includeHidden": {
|
||||
"description": "When true, include models that are hidden from the default picker list.",
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional page size; defaults to a reasonable server-side value.",
|
||||
"format": "uint32",
|
||||
|
||||
@@ -31,9 +31,6 @@
|
||||
"displayName": {
|
||||
"type": "string"
|
||||
},
|
||||
"hidden": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -74,7 +71,6 @@
|
||||
"defaultReasoningEffort",
|
||||
"description",
|
||||
"displayName",
|
||||
"hidden",
|
||||
"id",
|
||||
"isDefault",
|
||||
"model",
|
||||
|
||||
@@ -666,8 +666,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
@@ -39,13 +39,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"cwd": {
|
||||
"description": "Optional cwd filter; when set, only threads whose session cwd exactly matches this path are returned.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional page size; defaults to a reasonable server-side value.",
|
||||
"format": "uint32",
|
||||
|
||||
@@ -472,8 +472,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
@@ -472,8 +472,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
@@ -666,8 +666,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
@@ -472,8 +472,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
@@ -666,8 +666,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
@@ -472,8 +472,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
@@ -472,8 +472,7 @@
|
||||
{
|
||||
"enum": [
|
||||
"review",
|
||||
"compact",
|
||||
"memory_consolidation"
|
||||
"compact"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ExecPolicyAmendment } from "./ExecPolicyAmendment";
|
||||
import type { NetworkApprovalContext } from "./NetworkApprovalContext";
|
||||
import type { ParsedCommand } from "./ParsedCommand";
|
||||
|
||||
export type ExecApprovalRequestEvent = {
|
||||
@@ -27,10 +26,6 @@ cwd: string,
|
||||
* Optional human-readable reason for the approval (e.g. retry without sandbox).
|
||||
*/
|
||||
reason: string | null,
|
||||
/**
|
||||
* Optional network context for a blocked request that can be approved.
|
||||
*/
|
||||
network_approval_context?: NetworkApprovalContext,
|
||||
/**
|
||||
* Proposed execpolicy amendment that can be applied to allow future runs.
|
||||
*/
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ExecCommandSource } from "./ExecCommandSource";
|
||||
import type { ExecCommandStatus } from "./ExecCommandStatus";
|
||||
import type { ParsedCommand } from "./ParsedCommand";
|
||||
|
||||
export type ExecCommandEndEvent = {
|
||||
@@ -57,8 +56,4 @@ duration: string,
|
||||
/**
|
||||
* Formatted output from the command, as seen by the model.
|
||||
*/
|
||||
formatted_output: string,
|
||||
/**
|
||||
* Completion status for this command execution.
|
||||
*/
|
||||
status: ExecCommandStatus, };
|
||||
formatted_output: string, };
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type ExecCommandStatus = "completed" | "failed" | "declined";
|
||||
@@ -1,5 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type FuzzyFileSearchSessionCompletedNotification = { sessionId: string, };
|
||||
@@ -1,6 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { NetworkApprovalProtocol } from "./NetworkApprovalProtocol";
|
||||
|
||||
export type NetworkApprovalContext = { host: string, protocol: NetworkApprovalProtocol, };
|
||||
@@ -1,5 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type NetworkApprovalProtocol = "http" | "https" | "socks5_tcp" | "socks5_udp";
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { FileChange } from "./FileChange";
|
||||
import type { PatchApplyStatus } from "./PatchApplyStatus";
|
||||
|
||||
export type PatchApplyEndEvent = {
|
||||
/**
|
||||
@@ -29,8 +28,4 @@ success: boolean,
|
||||
/**
|
||||
* The changes that were applied (mirrors PatchApplyBeginEvent::changes).
|
||||
*/
|
||||
changes: { [key in string]?: FileChange },
|
||||
/**
|
||||
* Completion status for this patch application.
|
||||
*/
|
||||
status: PatchApplyStatus, };
|
||||
changes: { [key in string]?: FileChange }, };
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type PatchApplyStatus = "completed" | "failed" | "declined";
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { AuthStatusChangeNotification } from "./AuthStatusChangeNotification";
|
||||
import type { FuzzyFileSearchSessionCompletedNotification } from "./FuzzyFileSearchSessionCompletedNotification";
|
||||
import type { FuzzyFileSearchSessionUpdatedNotification } from "./FuzzyFileSearchSessionUpdatedNotification";
|
||||
import type { LoginChatGptCompleteNotification } from "./LoginChatGptCompleteNotification";
|
||||
import type { SessionConfiguredNotification } from "./SessionConfiguredNotification";
|
||||
@@ -39,4 +38,4 @@ import type { WindowsWorldWritableWarningNotification } from "./v2/WindowsWorldW
|
||||
/**
|
||||
* Notification sent from the server to the client.
|
||||
*/
|
||||
export type ServerNotification = { "method": "error", "params": ErrorNotification } | { "method": "thread/started", "params": ThreadStartedNotification } | { "method": "thread/name/updated", "params": ThreadNameUpdatedNotification } | { "method": "thread/tokenUsage/updated", "params": ThreadTokenUsageUpdatedNotification } | { "method": "turn/started", "params": TurnStartedNotification } | { "method": "turn/completed", "params": TurnCompletedNotification } | { "method": "turn/diff/updated", "params": TurnDiffUpdatedNotification } | { "method": "turn/plan/updated", "params": TurnPlanUpdatedNotification } | { "method": "item/started", "params": ItemStartedNotification } | { "method": "item/completed", "params": ItemCompletedNotification } | { "method": "rawResponseItem/completed", "params": RawResponseItemCompletedNotification } | { "method": "item/agentMessage/delta", "params": AgentMessageDeltaNotification } | { "method": "item/plan/delta", "params": PlanDeltaNotification } | { "method": "item/commandExecution/outputDelta", "params": CommandExecutionOutputDeltaNotification } | { "method": "item/commandExecution/terminalInteraction", "params": TerminalInteractionNotification } | { "method": "item/fileChange/outputDelta", "params": FileChangeOutputDeltaNotification } | { "method": "item/mcpToolCall/progress", "params": McpToolCallProgressNotification } | { "method": "mcpServer/oauthLogin/completed", "params": McpServerOauthLoginCompletedNotification } | { "method": "account/updated", "params": AccountUpdatedNotification } | { "method": "account/rateLimits/updated", "params": AccountRateLimitsUpdatedNotification } | { "method": "app/list/updated", "params": AppListUpdatedNotification } | { "method": "item/reasoning/summaryTextDelta", "params": ReasoningSummaryTextDeltaNotification } | { "method": "item/reasoning/summaryPartAdded", "params": ReasoningSummaryPartAddedNotification } | { "method": "item/reasoning/textDelta", "params": ReasoningTextDeltaNotification } | { "method": "thread/compacted", "params": ContextCompactedNotification } | { "method": "deprecationNotice", "params": DeprecationNoticeNotification } | { "method": "configWarning", "params": ConfigWarningNotification } | { "method": "fuzzyFileSearch/sessionUpdated", "params": FuzzyFileSearchSessionUpdatedNotification } | { "method": "fuzzyFileSearch/sessionCompleted", "params": FuzzyFileSearchSessionCompletedNotification } | { "method": "windows/worldWritableWarning", "params": WindowsWorldWritableWarningNotification } | { "method": "account/login/completed", "params": AccountLoginCompletedNotification } | { "method": "authStatusChange", "params": AuthStatusChangeNotification } | { "method": "loginChatGptComplete", "params": LoginChatGptCompleteNotification } | { "method": "sessionConfigured", "params": SessionConfiguredNotification };
|
||||
export type ServerNotification = { "method": "error", "params": ErrorNotification } | { "method": "thread/started", "params": ThreadStartedNotification } | { "method": "thread/name/updated", "params": ThreadNameUpdatedNotification } | { "method": "thread/tokenUsage/updated", "params": ThreadTokenUsageUpdatedNotification } | { "method": "turn/started", "params": TurnStartedNotification } | { "method": "turn/completed", "params": TurnCompletedNotification } | { "method": "turn/diff/updated", "params": TurnDiffUpdatedNotification } | { "method": "turn/plan/updated", "params": TurnPlanUpdatedNotification } | { "method": "item/started", "params": ItemStartedNotification } | { "method": "item/completed", "params": ItemCompletedNotification } | { "method": "rawResponseItem/completed", "params": RawResponseItemCompletedNotification } | { "method": "item/agentMessage/delta", "params": AgentMessageDeltaNotification } | { "method": "item/plan/delta", "params": PlanDeltaNotification } | { "method": "item/commandExecution/outputDelta", "params": CommandExecutionOutputDeltaNotification } | { "method": "item/commandExecution/terminalInteraction", "params": TerminalInteractionNotification } | { "method": "item/fileChange/outputDelta", "params": FileChangeOutputDeltaNotification } | { "method": "item/mcpToolCall/progress", "params": McpToolCallProgressNotification } | { "method": "mcpServer/oauthLogin/completed", "params": McpServerOauthLoginCompletedNotification } | { "method": "account/updated", "params": AccountUpdatedNotification } | { "method": "account/rateLimits/updated", "params": AccountRateLimitsUpdatedNotification } | { "method": "app/list/updated", "params": AppListUpdatedNotification } | { "method": "item/reasoning/summaryTextDelta", "params": ReasoningSummaryTextDeltaNotification } | { "method": "item/reasoning/summaryPartAdded", "params": ReasoningSummaryPartAddedNotification } | { "method": "item/reasoning/textDelta", "params": ReasoningTextDeltaNotification } | { "method": "thread/compacted", "params": ContextCompactedNotification } | { "method": "deprecationNotice", "params": DeprecationNoticeNotification } | { "method": "configWarning", "params": ConfigWarningNotification } | { "method": "fuzzyFileSearch/sessionUpdated", "params": FuzzyFileSearchSessionUpdatedNotification } | { "method": "windows/worldWritableWarning", "params": WindowsWorldWritableWarningNotification } | { "method": "account/login/completed", "params": AccountLoginCompletedNotification } | { "method": "authStatusChange", "params": AuthStatusChangeNotification } | { "method": "loginChatGptComplete", "params": LoginChatGptCompleteNotification } | { "method": "sessionConfigured", "params": SessionConfiguredNotification };
|
||||
|
||||
@@ -3,4 +3,4 @@
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ThreadId } from "./ThreadId";
|
||||
|
||||
export type SubAgentSource = "review" | "compact" | { "thread_spawn": { parent_thread_id: ThreadId, depth: number, } } | "memory_consolidation" | { "other": string };
|
||||
export type SubAgentSource = "review" | "compact" | { "thread_spawn": { parent_thread_id: ThreadId, depth: number, } } | { "other": string };
|
||||
|
||||
@@ -62,7 +62,6 @@ export type { ExecCommandBeginEvent } from "./ExecCommandBeginEvent";
|
||||
export type { ExecCommandEndEvent } from "./ExecCommandEndEvent";
|
||||
export type { ExecCommandOutputDeltaEvent } from "./ExecCommandOutputDeltaEvent";
|
||||
export type { ExecCommandSource } from "./ExecCommandSource";
|
||||
export type { ExecCommandStatus } from "./ExecCommandStatus";
|
||||
export type { ExecOneOffCommandParams } from "./ExecOneOffCommandParams";
|
||||
export type { ExecOneOffCommandResponse } from "./ExecOneOffCommandResponse";
|
||||
export type { ExecOutputStream } from "./ExecOutputStream";
|
||||
@@ -78,7 +77,6 @@ export type { FunctionCallOutputPayload } from "./FunctionCallOutputPayload";
|
||||
export type { FuzzyFileSearchParams } from "./FuzzyFileSearchParams";
|
||||
export type { FuzzyFileSearchResponse } from "./FuzzyFileSearchResponse";
|
||||
export type { FuzzyFileSearchResult } from "./FuzzyFileSearchResult";
|
||||
export type { FuzzyFileSearchSessionCompletedNotification } from "./FuzzyFileSearchSessionCompletedNotification";
|
||||
export type { FuzzyFileSearchSessionUpdatedNotification } from "./FuzzyFileSearchSessionUpdatedNotification";
|
||||
export type { GetAuthStatusParams } from "./GetAuthStatusParams";
|
||||
export type { GetAuthStatusResponse } from "./GetAuthStatusResponse";
|
||||
@@ -126,14 +124,11 @@ export type { McpToolCallEndEvent } from "./McpToolCallEndEvent";
|
||||
export type { MessagePhase } from "./MessagePhase";
|
||||
export type { ModeKind } from "./ModeKind";
|
||||
export type { NetworkAccess } from "./NetworkAccess";
|
||||
export type { NetworkApprovalContext } from "./NetworkApprovalContext";
|
||||
export type { NetworkApprovalProtocol } from "./NetworkApprovalProtocol";
|
||||
export type { NewConversationParams } from "./NewConversationParams";
|
||||
export type { NewConversationResponse } from "./NewConversationResponse";
|
||||
export type { ParsedCommand } from "./ParsedCommand";
|
||||
export type { PatchApplyBeginEvent } from "./PatchApplyBeginEvent";
|
||||
export type { PatchApplyEndEvent } from "./PatchApplyEndEvent";
|
||||
export type { PatchApplyStatus } from "./PatchApplyStatus";
|
||||
export type { Personality } from "./Personality";
|
||||
export type { PlanDeltaEvent } from "./PlanDeltaEvent";
|
||||
export type { PlanItem } from "./PlanItem";
|
||||
|
||||
@@ -5,13 +5,4 @@
|
||||
/**
|
||||
* EXPERIMENTAL - app metadata returned by app-list APIs.
|
||||
*/
|
||||
export type AppInfo = { id: string, name: string, description: string | null, logoUrl: string | null, logoUrlDark: string | null, distributionChannel: string | null, installUrl: string | null, isAccessible: boolean,
|
||||
/**
|
||||
* Whether this app is enabled in config.toml.
|
||||
* Example:
|
||||
* ```toml
|
||||
* [apps.bad_app]
|
||||
* enabled = false
|
||||
* ```
|
||||
*/
|
||||
isEnabled: boolean, };
|
||||
export type AppInfo = { id: string, name: string, description: string | null, logoUrl: string | null, logoUrlDark: string | null, distributionChannel: string | null, installUrl: string | null, isAccessible: boolean, };
|
||||
|
||||
@@ -5,4 +5,4 @@ import type { InputModality } from "../InputModality";
|
||||
import type { ReasoningEffort } from "../ReasoningEffort";
|
||||
import type { ReasoningEffortOption } from "./ReasoningEffortOption";
|
||||
|
||||
export type Model = { id: string, model: string, upgrade: string | null, displayName: string, description: string, hidden: boolean, supportedReasoningEfforts: Array<ReasoningEffortOption>, defaultReasoningEffort: ReasoningEffort, inputModalities: Array<InputModality>, supportsPersonality: boolean, isDefault: boolean, };
|
||||
export type Model = { id: string, model: string, upgrade: string | null, displayName: string, description: string, supportedReasoningEfforts: Array<ReasoningEffortOption>, defaultReasoningEffort: ReasoningEffort, inputModalities: Array<InputModality>, supportsPersonality: boolean, isDefault: boolean, };
|
||||
|
||||
@@ -10,8 +10,4 @@ cursor?: string | null,
|
||||
/**
|
||||
* Optional page size; defaults to a reasonable server-side value.
|
||||
*/
|
||||
limit?: number | null,
|
||||
/**
|
||||
* When true, include models that are hidden from the default picker list.
|
||||
*/
|
||||
includeHidden?: boolean | null, };
|
||||
limit?: number | null, };
|
||||
|
||||
@@ -21,8 +21,4 @@ export type ThreadForkParams = {threadId: string, /**
|
||||
path?: string | null, /**
|
||||
* Configuration overrides for the forked thread, if any.
|
||||
*/
|
||||
model?: string | null, modelProvider?: string | null, cwd?: string | null, approvalPolicy?: AskForApproval | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, /**
|
||||
* If true, persist additional rollout EventMsg variants required to
|
||||
* reconstruct a richer thread history on subsequent resume/fork/read.
|
||||
*/
|
||||
persistExtendedHistory: boolean};
|
||||
model?: string | null, modelProvider?: string | null, cwd?: string | null, approvalPolicy?: AskForApproval | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null};
|
||||
|
||||
@@ -31,9 +31,4 @@ sourceKinds?: Array<ThreadSourceKind> | null,
|
||||
* Optional archived filter; when set to true, only archived threads are returned.
|
||||
* If false or null, only non-archived threads are returned.
|
||||
*/
|
||||
archived?: boolean | null,
|
||||
/**
|
||||
* Optional cwd filter; when set, only threads whose session cwd exactly
|
||||
* matches this path are returned.
|
||||
*/
|
||||
cwd?: string | null, };
|
||||
archived?: boolean | null, };
|
||||
|
||||
@@ -30,8 +30,4 @@ history?: Array<ResponseItem> | null, /**
|
||||
path?: string | null, /**
|
||||
* Configuration overrides for the resumed thread, if any.
|
||||
*/
|
||||
model?: string | null, modelProvider?: string | null, cwd?: string | null, approvalPolicy?: AskForApproval | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, personality?: Personality | null, /**
|
||||
* If true, persist additional rollout EventMsg variants required to
|
||||
* reconstruct a richer thread history on subsequent resume/fork/read.
|
||||
*/
|
||||
persistExtendedHistory: boolean};
|
||||
model?: string | null, modelProvider?: string | null, cwd?: string | null, approvalPolicy?: AskForApproval | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, personality?: Personality | null};
|
||||
|
||||
@@ -10,8 +10,4 @@ export type ThreadStartParams = {model?: string | null, modelProvider?: string |
|
||||
* If true, opt into emitting raw Responses API items on the event stream.
|
||||
* This is for internal use only (e.g. Codex Cloud).
|
||||
*/
|
||||
experimentalRawEvents: boolean, /**
|
||||
* If true, persist additional rollout EventMsg variants required to
|
||||
* reconstruct a richer thread history on resume/fork/read.
|
||||
*/
|
||||
persistExtendedHistory: boolean};
|
||||
experimentalRawEvents: boolean};
|
||||
|
||||
@@ -758,13 +758,6 @@ pub struct FuzzyFileSearchSessionUpdatedNotification {
|
||||
pub files: Vec<FuzzyFileSearchResult>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
pub struct FuzzyFileSearchSessionCompletedNotification {
|
||||
pub session_id: String,
|
||||
}
|
||||
|
||||
server_notification_definitions! {
|
||||
/// NEW NOTIFICATIONS
|
||||
Error => "error" (v2::ErrorNotification),
|
||||
@@ -798,7 +791,6 @@ server_notification_definitions! {
|
||||
DeprecationNotice => "deprecationNotice" (v2::DeprecationNoticeNotification),
|
||||
ConfigWarning => "configWarning" (v2::ConfigWarningNotification),
|
||||
FuzzyFileSearchSessionUpdated => "fuzzyFileSearch/sessionUpdated" (FuzzyFileSearchSessionUpdatedNotification),
|
||||
FuzzyFileSearchSessionCompleted => "fuzzyFileSearch/sessionCompleted" (FuzzyFileSearchSessionCompletedNotification),
|
||||
|
||||
/// Notifies the user of world-writable directories on Windows, which cannot be protected by the sandbox.
|
||||
WindowsWorldWritableWarning => "windows/worldWritableWarning" (v2::WindowsWorldWritableWarningNotification),
|
||||
@@ -1235,8 +1227,7 @@ mod tests {
|
||||
"id": 6,
|
||||
"params": {
|
||||
"limit": null,
|
||||
"cursor": null,
|
||||
"includeHidden": null
|
||||
"cursor": null
|
||||
}
|
||||
}),
|
||||
serde_json::to_value(&request)?,
|
||||
@@ -1332,4 +1323,17 @@ mod tests {
|
||||
let reason = crate::experimental_api::ExperimentalApi::experimental_reason(&request);
|
||||
assert_eq!(reason, Some("mock/experimentalMethod"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn thread_start_mock_field_is_marked_experimental() {
|
||||
let request = ClientRequest::ThreadStart {
|
||||
request_id: RequestId::Integer(1),
|
||||
params: v2::ThreadStartParams {
|
||||
mock_experimental_field: Some("mock".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
let reason = crate::experimental_api::ExperimentalApi::experimental_reason(&request);
|
||||
assert_eq!(reason, Some("thread/start.mockExperimentalField"));
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -29,9 +29,7 @@ use codex_protocol::protocol::AgentStatus as CoreAgentStatus;
|
||||
use codex_protocol::protocol::AskForApproval as CoreAskForApproval;
|
||||
use codex_protocol::protocol::CodexErrorInfo as CoreCodexErrorInfo;
|
||||
use codex_protocol::protocol::CreditsSnapshot as CoreCreditsSnapshot;
|
||||
use codex_protocol::protocol::ExecCommandStatus as CoreExecCommandStatus;
|
||||
use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess;
|
||||
use codex_protocol::protocol::PatchApplyStatus as CorePatchApplyStatus;
|
||||
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
|
||||
use codex_protocol::protocol::ReadOnlyAccess as CoreReadOnlyAccess;
|
||||
@@ -1110,9 +1108,6 @@ pub struct ModelListParams {
|
||||
/// Optional page size; defaults to a reasonable server-side value.
|
||||
#[ts(optional = nullable)]
|
||||
pub limit: Option<u32>,
|
||||
/// When true, include models that are hidden from the default picker list.
|
||||
#[ts(optional = nullable)]
|
||||
pub include_hidden: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1124,7 +1119,6 @@ pub struct Model {
|
||||
pub upgrade: Option<String>,
|
||||
pub display_name: String,
|
||||
pub description: String,
|
||||
pub hidden: bool,
|
||||
pub supported_reasoning_efforts: Vec<ReasoningEffortOption>,
|
||||
pub default_reasoning_effort: ReasoningEffort,
|
||||
#[serde(default = "default_input_modalities")]
|
||||
@@ -1294,14 +1288,6 @@ pub struct AppInfo {
|
||||
pub install_url: Option<String>,
|
||||
#[serde(default)]
|
||||
pub is_accessible: bool,
|
||||
/// Whether this app is enabled in config.toml.
|
||||
/// Example:
|
||||
/// ```toml
|
||||
/// [apps.bad_app]
|
||||
/// enabled = false
|
||||
/// ```
|
||||
#[serde(default = "default_enabled")]
|
||||
pub is_enabled: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1436,11 +1422,6 @@ pub struct ThreadStartParams {
|
||||
#[experimental("thread/start.experimentalRawEvents")]
|
||||
#[serde(default)]
|
||||
pub experimental_raw_events: bool,
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on resume/fork/read.
|
||||
#[experimental("thread/start.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1522,11 +1503,6 @@ pub struct ThreadResumeParams {
|
||||
pub developer_instructions: Option<String>,
|
||||
#[ts(optional = nullable)]
|
||||
pub personality: Option<Personality>,
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on subsequent resume/fork/read.
|
||||
#[experimental("thread/resume.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1580,11 +1556,6 @@ pub struct ThreadForkParams {
|
||||
pub base_instructions: Option<String>,
|
||||
#[ts(optional = nullable)]
|
||||
pub developer_instructions: Option<String>,
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on subsequent resume/fork/read.
|
||||
#[experimental("thread/fork.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1712,10 +1683,6 @@ pub struct ThreadListParams {
|
||||
/// If false or null, only non-archived threads are returned.
|
||||
#[ts(optional = nullable)]
|
||||
pub archived: Option<bool>,
|
||||
/// Optional cwd filter; when set, only threads whose session cwd exactly
|
||||
/// matches this path are returned.
|
||||
#[ts(optional = nullable)]
|
||||
pub cwd: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
@@ -2655,22 +2622,6 @@ pub enum CommandExecutionStatus {
|
||||
Declined,
|
||||
}
|
||||
|
||||
impl From<CoreExecCommandStatus> for CommandExecutionStatus {
|
||||
fn from(value: CoreExecCommandStatus) -> Self {
|
||||
Self::from(&value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&CoreExecCommandStatus> for CommandExecutionStatus {
|
||||
fn from(value: &CoreExecCommandStatus) -> Self {
|
||||
match value {
|
||||
CoreExecCommandStatus::Completed => CommandExecutionStatus::Completed,
|
||||
CoreExecCommandStatus::Failed => CommandExecutionStatus::Failed,
|
||||
CoreExecCommandStatus::Declined => CommandExecutionStatus::Declined,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -2711,22 +2662,6 @@ pub enum PatchApplyStatus {
|
||||
Declined,
|
||||
}
|
||||
|
||||
impl From<CorePatchApplyStatus> for PatchApplyStatus {
|
||||
fn from(value: CorePatchApplyStatus) -> Self {
|
||||
Self::from(&value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&CorePatchApplyStatus> for PatchApplyStatus {
|
||||
fn from(value: &CorePatchApplyStatus) -> Self {
|
||||
match value {
|
||||
CorePatchApplyStatus::Completed => PatchApplyStatus::Completed,
|
||||
CorePatchApplyStatus::Failed => PatchApplyStatus::Failed,
|
||||
CorePatchApplyStatus::Declined => PatchApplyStatus::Declined,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
|
||||
@@ -14,6 +14,4 @@ codex-app-server-protocol = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
tungstenite = { workspace = true }
|
||||
url = { workspace = true }
|
||||
uuid = { workspace = true, features = ["v4"] }
|
||||
|
||||
@@ -1,49 +1,2 @@
|
||||
# App Server Test Client
|
||||
Quickstart for running and hitting `codex app-server`.
|
||||
|
||||
## Quickstart
|
||||
|
||||
Run from `<reporoot>/codex-rs`.
|
||||
|
||||
```bash
|
||||
# 1) Build debug codex binary
|
||||
cargo build -p codex-cli --bin codex
|
||||
|
||||
# 2) Start websocket app-server in background
|
||||
cargo run -p codex-app-server-test-client -- \
|
||||
--codex-bin ./target/debug/codex \
|
||||
serve --listen ws://127.0.0.1:4222 --kill
|
||||
|
||||
# 3) Call app-server (defaults to ws://127.0.0.1:4222)
|
||||
cargo run -p codex-app-server-test-client -- model-list
|
||||
```
|
||||
|
||||
## Testing Thread Rejoin Behavior
|
||||
|
||||
Build and start an app server using commands above. The app-server log is written to `/tmp/codex-app-server-test-client/app-server.log`
|
||||
|
||||
### 1) Get a thread id
|
||||
|
||||
Create at least one thread, then list threads:
|
||||
|
||||
```bash
|
||||
cargo run -p codex-app-server-test-client -- send-message-v2 "seed thread for rejoin test"
|
||||
cargo run -p codex-app-server-test-client -- thread-list --limit 5
|
||||
```
|
||||
|
||||
Copy a thread id from the `thread-list` output.
|
||||
|
||||
### 2) Rejoin while a turn is in progress (two terminals)
|
||||
|
||||
Terminal A:
|
||||
|
||||
```bash
|
||||
cargo run --bin codex-app-server-test-client -- \
|
||||
resume-message-v2 <THREAD_ID> "respond with thorough docs on the rust core"
|
||||
```
|
||||
|
||||
Terminal B (while Terminal A is still streaming):
|
||||
|
||||
```bash
|
||||
cargo run --bin codex-app-server-test-client -- thread-resume <THREAD_ID>
|
||||
```
|
||||
Exercises simple `codex app-server` flows end-to-end, logging JSON-RPC messages sent between client and server to stdout.
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
use std::collections::VecDeque;
|
||||
use std::fs;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::BufRead;
|
||||
use std::io::BufReader;
|
||||
use std::io::Write;
|
||||
use std::net::TcpStream;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Child;
|
||||
@@ -55,8 +53,6 @@ use codex_app_server_protocol::SendUserMessageParams;
|
||||
use codex_app_server_protocol::SendUserMessageResponse;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
use codex_app_server_protocol::ThreadListParams;
|
||||
use codex_app_server_protocol::ThreadListResponse;
|
||||
use codex_app_server_protocol::ThreadResumeParams;
|
||||
use codex_app_server_protocol::ThreadResumeResponse;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
@@ -71,28 +67,15 @@ use codex_protocol::protocol::EventMsg;
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
use tungstenite::Message;
|
||||
use tungstenite::WebSocket;
|
||||
use tungstenite::connect;
|
||||
use tungstenite::stream::MaybeTlsStream;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Minimal launcher that initializes the Codex app-server and logs the handshake.
|
||||
#[derive(Parser)]
|
||||
#[command(author = "Codex", version, about = "Bootstrap Codex app-server", long_about = None)]
|
||||
struct Cli {
|
||||
/// Path to the `codex` CLI binary. When set, requests use stdio by
|
||||
/// spawning `codex app-server` as a child process.
|
||||
#[arg(long, env = "CODEX_BIN", global = true)]
|
||||
codex_bin: Option<PathBuf>,
|
||||
|
||||
/// Existing websocket server URL to connect to.
|
||||
///
|
||||
/// If neither `--codex-bin` nor `--url` is provided, defaults to
|
||||
/// `ws://127.0.0.1:4222`.
|
||||
#[arg(long, env = "CODEX_APP_SERVER_URL", global = true)]
|
||||
url: Option<String>,
|
||||
/// Path to the `codex` CLI binary.
|
||||
#[arg(long, env = "CODEX_BIN", default_value = "codex")]
|
||||
codex_bin: PathBuf,
|
||||
|
||||
/// Forwarded to the `codex` CLI as `--config key=value`. Repeatable.
|
||||
///
|
||||
@@ -122,18 +105,6 @@ struct Cli {
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum CliCommand {
|
||||
/// Start `codex app-server` on a websocket endpoint in the background.
|
||||
///
|
||||
/// Logs are written to:
|
||||
/// `/tmp/codex-app-server-test-client/`
|
||||
Serve {
|
||||
/// WebSocket listen URL passed to `codex app-server --listen`.
|
||||
#[arg(long, default_value = "ws://127.0.0.1:4222")]
|
||||
listen: String,
|
||||
/// Kill any process listening on the same port before starting.
|
||||
#[arg(long, default_value_t = false)]
|
||||
kill: bool,
|
||||
},
|
||||
/// Send a user message through the Codex app-server.
|
||||
SendMessage {
|
||||
/// User message to send to Codex.
|
||||
@@ -151,13 +122,6 @@ enum CliCommand {
|
||||
/// User message to send to Codex.
|
||||
user_message: String,
|
||||
},
|
||||
/// Resume a V2 thread and continuously stream notifications/events.
|
||||
///
|
||||
/// This command does not auto-exit; stop it with SIGINT/SIGTERM/SIGKILL.
|
||||
ThreadResume {
|
||||
/// Existing thread id to resume.
|
||||
thread_id: String,
|
||||
},
|
||||
/// Start a V2 turn that elicits an ExecCommand approval.
|
||||
#[command(name = "trigger-cmd-approval")]
|
||||
TriggerCmdApproval {
|
||||
@@ -187,19 +151,11 @@ enum CliCommand {
|
||||
/// List the available models from the Codex app-server.
|
||||
#[command(name = "model-list")]
|
||||
ModelList,
|
||||
/// List stored threads from the Codex app-server.
|
||||
#[command(name = "thread-list")]
|
||||
ThreadList {
|
||||
/// Number of threads to return.
|
||||
#[arg(long, default_value_t = 20)]
|
||||
limit: u32,
|
||||
},
|
||||
}
|
||||
|
||||
pub fn run() -> Result<()> {
|
||||
let Cli {
|
||||
codex_bin,
|
||||
url,
|
||||
config_overrides,
|
||||
dynamic_tools,
|
||||
command,
|
||||
@@ -208,222 +164,59 @@ pub fn run() -> Result<()> {
|
||||
let dynamic_tools = parse_dynamic_tools_arg(&dynamic_tools)?;
|
||||
|
||||
match command {
|
||||
CliCommand::Serve { listen, kill } => {
|
||||
ensure_dynamic_tools_unused(&dynamic_tools, "serve")?;
|
||||
let codex_bin = codex_bin.unwrap_or_else(|| PathBuf::from("codex"));
|
||||
serve(&codex_bin, &config_overrides, &listen, kill)
|
||||
}
|
||||
CliCommand::SendMessage { user_message } => {
|
||||
ensure_dynamic_tools_unused(&dynamic_tools, "send-message")?;
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
send_message(&endpoint, &config_overrides, user_message)
|
||||
send_message(&codex_bin, &config_overrides, user_message)
|
||||
}
|
||||
CliCommand::SendMessageV2 { user_message } => {
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
send_message_v2_endpoint(&endpoint, &config_overrides, user_message, &dynamic_tools)
|
||||
send_message_v2(&codex_bin, &config_overrides, user_message, &dynamic_tools)
|
||||
}
|
||||
CliCommand::ResumeMessageV2 {
|
||||
thread_id,
|
||||
user_message,
|
||||
} => {
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
resume_message_v2(
|
||||
&endpoint,
|
||||
&config_overrides,
|
||||
thread_id,
|
||||
user_message,
|
||||
&dynamic_tools,
|
||||
)
|
||||
}
|
||||
CliCommand::ThreadResume { thread_id } => {
|
||||
ensure_dynamic_tools_unused(&dynamic_tools, "thread-resume")?;
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
thread_resume_follow(&endpoint, &config_overrides, thread_id)
|
||||
}
|
||||
} => resume_message_v2(
|
||||
&codex_bin,
|
||||
&config_overrides,
|
||||
thread_id,
|
||||
user_message,
|
||||
&dynamic_tools,
|
||||
),
|
||||
CliCommand::TriggerCmdApproval { user_message } => {
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
trigger_cmd_approval(&endpoint, &config_overrides, user_message, &dynamic_tools)
|
||||
trigger_cmd_approval(&codex_bin, &config_overrides, user_message, &dynamic_tools)
|
||||
}
|
||||
CliCommand::TriggerPatchApproval { user_message } => {
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
trigger_patch_approval(&endpoint, &config_overrides, user_message, &dynamic_tools)
|
||||
trigger_patch_approval(&codex_bin, &config_overrides, user_message, &dynamic_tools)
|
||||
}
|
||||
CliCommand::NoTriggerCmdApproval => {
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
no_trigger_cmd_approval(&endpoint, &config_overrides, &dynamic_tools)
|
||||
no_trigger_cmd_approval(&codex_bin, &config_overrides, &dynamic_tools)
|
||||
}
|
||||
CliCommand::SendFollowUpV2 {
|
||||
first_message,
|
||||
follow_up_message,
|
||||
} => {
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
send_follow_up_v2(
|
||||
&endpoint,
|
||||
&config_overrides,
|
||||
first_message,
|
||||
follow_up_message,
|
||||
&dynamic_tools,
|
||||
)
|
||||
}
|
||||
} => send_follow_up_v2(
|
||||
&codex_bin,
|
||||
&config_overrides,
|
||||
first_message,
|
||||
follow_up_message,
|
||||
&dynamic_tools,
|
||||
),
|
||||
CliCommand::TestLogin => {
|
||||
ensure_dynamic_tools_unused(&dynamic_tools, "test-login")?;
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
test_login(&endpoint, &config_overrides)
|
||||
test_login(&codex_bin, &config_overrides)
|
||||
}
|
||||
CliCommand::GetAccountRateLimits => {
|
||||
ensure_dynamic_tools_unused(&dynamic_tools, "get-account-rate-limits")?;
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
get_account_rate_limits(&endpoint, &config_overrides)
|
||||
get_account_rate_limits(&codex_bin, &config_overrides)
|
||||
}
|
||||
CliCommand::ModelList => {
|
||||
ensure_dynamic_tools_unused(&dynamic_tools, "model-list")?;
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
model_list(&endpoint, &config_overrides)
|
||||
}
|
||||
CliCommand::ThreadList { limit } => {
|
||||
ensure_dynamic_tools_unused(&dynamic_tools, "thread-list")?;
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
thread_list(&endpoint, &config_overrides, limit)
|
||||
model_list(&codex_bin, &config_overrides)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum Endpoint {
|
||||
SpawnCodex(PathBuf),
|
||||
ConnectWs(String),
|
||||
}
|
||||
|
||||
fn resolve_endpoint(codex_bin: Option<PathBuf>, url: Option<String>) -> Result<Endpoint> {
|
||||
if codex_bin.is_some() && url.is_some() {
|
||||
bail!("--codex-bin and --url are mutually exclusive");
|
||||
}
|
||||
if let Some(codex_bin) = codex_bin {
|
||||
return Ok(Endpoint::SpawnCodex(codex_bin));
|
||||
}
|
||||
if let Some(url) = url {
|
||||
return Ok(Endpoint::ConnectWs(url));
|
||||
}
|
||||
Ok(Endpoint::ConnectWs("ws://127.0.0.1:4222".to_string()))
|
||||
}
|
||||
|
||||
fn serve(codex_bin: &Path, config_overrides: &[String], listen: &str, kill: bool) -> Result<()> {
|
||||
let runtime_dir = PathBuf::from("/tmp/codex-app-server-test-client");
|
||||
fs::create_dir_all(&runtime_dir)
|
||||
.with_context(|| format!("failed to create runtime dir {}", runtime_dir.display()))?;
|
||||
let log_path = runtime_dir.join("app-server.log");
|
||||
if kill {
|
||||
kill_listeners_on_same_port(listen)?;
|
||||
}
|
||||
|
||||
let log_file = OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&log_path)
|
||||
.with_context(|| format!("failed to open log file {}", log_path.display()))?;
|
||||
let log_file_stderr = log_file
|
||||
.try_clone()
|
||||
.with_context(|| format!("failed to clone log file handle {}", log_path.display()))?;
|
||||
|
||||
let mut cmdline = format!(
|
||||
"tail -f /dev/null | RUST_BACKTRACE=full RUST_LOG=warn,codex_=trace {}",
|
||||
shell_quote(&codex_bin.display().to_string())
|
||||
);
|
||||
for override_kv in config_overrides {
|
||||
cmdline.push_str(&format!(" --config {}", shell_quote(override_kv)));
|
||||
}
|
||||
cmdline.push_str(&format!(" app-server --listen {}", shell_quote(listen)));
|
||||
|
||||
let child = Command::new("nohup")
|
||||
.arg("sh")
|
||||
.arg("-c")
|
||||
.arg(cmdline)
|
||||
.stdin(Stdio::null())
|
||||
.stdout(Stdio::from(log_file))
|
||||
.stderr(Stdio::from(log_file_stderr))
|
||||
.spawn()
|
||||
.with_context(|| format!("failed to start `{}` app-server", codex_bin.display()))?;
|
||||
|
||||
let pid = child.id();
|
||||
|
||||
println!("started codex app-server");
|
||||
println!("listen: {listen}");
|
||||
println!("pid: {pid} (launcher process)");
|
||||
println!("log: {}", log_path.display());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn kill_listeners_on_same_port(listen: &str) -> Result<()> {
|
||||
let url = Url::parse(listen).with_context(|| format!("invalid --listen URL `{listen}`"))?;
|
||||
let port = url
|
||||
.port_or_known_default()
|
||||
.with_context(|| format!("unable to infer port from --listen URL `{listen}`"))?;
|
||||
|
||||
let output = Command::new("lsof")
|
||||
.arg("-nP")
|
||||
.arg(format!("-tiTCP:{port}"))
|
||||
.arg("-sTCP:LISTEN")
|
||||
.output()
|
||||
.with_context(|| format!("failed to run lsof for port {port}"))?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let pids: Vec<u32> = String::from_utf8_lossy(&output.stdout)
|
||||
.lines()
|
||||
.filter_map(|line| line.trim().parse::<u32>().ok())
|
||||
.collect();
|
||||
|
||||
if pids.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for pid in pids {
|
||||
println!("killing listener pid {pid} on port {port}");
|
||||
let pid_str = pid.to_string();
|
||||
let term_status = Command::new("kill")
|
||||
.arg(&pid_str)
|
||||
.status()
|
||||
.with_context(|| format!("failed to send SIGTERM to pid {pid}"))?;
|
||||
if !term_status.success() {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
thread::sleep(Duration::from_millis(300));
|
||||
|
||||
let output = Command::new("lsof")
|
||||
.arg("-nP")
|
||||
.arg(format!("-tiTCP:{port}"))
|
||||
.arg("-sTCP:LISTEN")
|
||||
.output()
|
||||
.with_context(|| format!("failed to re-check listeners on port {port}"))?;
|
||||
if !output.status.success() {
|
||||
return Ok(());
|
||||
}
|
||||
let remaining: Vec<u32> = String::from_utf8_lossy(&output.stdout)
|
||||
.lines()
|
||||
.filter_map(|line| line.trim().parse::<u32>().ok())
|
||||
.collect();
|
||||
for pid in remaining {
|
||||
println!("force killing remaining listener pid {pid} on port {port}");
|
||||
let _ = Command::new("kill").arg("-9").arg(pid.to_string()).status();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn shell_quote(input: &str) -> String {
|
||||
format!("'{}'", input.replace('\'', "'\\''"))
|
||||
}
|
||||
|
||||
fn send_message(
|
||||
endpoint: &Endpoint,
|
||||
config_overrides: &[String],
|
||||
user_message: String,
|
||||
) -> Result<()> {
|
||||
let mut client = CodexClient::connect(endpoint, config_overrides)?;
|
||||
fn send_message(codex_bin: &Path, config_overrides: &[String], user_message: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -449,19 +242,9 @@ pub fn send_message_v2(
|
||||
config_overrides: &[String],
|
||||
user_message: String,
|
||||
dynamic_tools: &Option<Vec<DynamicToolSpec>>,
|
||||
) -> Result<()> {
|
||||
let endpoint = Endpoint::SpawnCodex(codex_bin.to_path_buf());
|
||||
send_message_v2_endpoint(&endpoint, config_overrides, user_message, dynamic_tools)
|
||||
}
|
||||
|
||||
fn send_message_v2_endpoint(
|
||||
endpoint: &Endpoint,
|
||||
config_overrides: &[String],
|
||||
user_message: String,
|
||||
dynamic_tools: &Option<Vec<DynamicToolSpec>>,
|
||||
) -> Result<()> {
|
||||
send_message_v2_with_policies(
|
||||
endpoint,
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
user_message,
|
||||
None,
|
||||
@@ -471,7 +254,7 @@ fn send_message_v2_endpoint(
|
||||
}
|
||||
|
||||
fn resume_message_v2(
|
||||
endpoint: &Endpoint,
|
||||
codex_bin: &Path,
|
||||
config_overrides: &[String],
|
||||
thread_id: String,
|
||||
user_message: String,
|
||||
@@ -479,7 +262,7 @@ fn resume_message_v2(
|
||||
) -> Result<()> {
|
||||
ensure_dynamic_tools_unused(dynamic_tools, "resume-message-v2")?;
|
||||
|
||||
let mut client = CodexClient::connect(endpoint, config_overrides)?;
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -505,28 +288,8 @@ fn resume_message_v2(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn thread_resume_follow(
|
||||
endpoint: &Endpoint,
|
||||
config_overrides: &[String],
|
||||
thread_id: String,
|
||||
) -> Result<()> {
|
||||
let mut client = CodexClient::connect(endpoint, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
|
||||
let resume_response = client.thread_resume(ThreadResumeParams {
|
||||
thread_id,
|
||||
..Default::default()
|
||||
})?;
|
||||
println!("< thread/resume response: {resume_response:?}");
|
||||
println!("< streaming notifications until process is terminated");
|
||||
|
||||
client.stream_notifications_forever()
|
||||
}
|
||||
|
||||
fn trigger_cmd_approval(
|
||||
endpoint: &Endpoint,
|
||||
codex_bin: &Path,
|
||||
config_overrides: &[String],
|
||||
user_message: Option<String>,
|
||||
dynamic_tools: &Option<Vec<DynamicToolSpec>>,
|
||||
@@ -535,7 +298,7 @@ fn trigger_cmd_approval(
|
||||
"Run `touch /tmp/should-trigger-approval` so I can confirm the file exists.";
|
||||
let message = user_message.unwrap_or_else(|| default_prompt.to_string());
|
||||
send_message_v2_with_policies(
|
||||
endpoint,
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
message,
|
||||
Some(AskForApproval::OnRequest),
|
||||
@@ -547,7 +310,7 @@ fn trigger_cmd_approval(
|
||||
}
|
||||
|
||||
fn trigger_patch_approval(
|
||||
endpoint: &Endpoint,
|
||||
codex_bin: &Path,
|
||||
config_overrides: &[String],
|
||||
user_message: Option<String>,
|
||||
dynamic_tools: &Option<Vec<DynamicToolSpec>>,
|
||||
@@ -556,7 +319,7 @@ fn trigger_patch_approval(
|
||||
"Create a file named APPROVAL_DEMO.txt containing a short hello message using apply_patch.";
|
||||
let message = user_message.unwrap_or_else(|| default_prompt.to_string());
|
||||
send_message_v2_with_policies(
|
||||
endpoint,
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
message,
|
||||
Some(AskForApproval::OnRequest),
|
||||
@@ -568,13 +331,13 @@ fn trigger_patch_approval(
|
||||
}
|
||||
|
||||
fn no_trigger_cmd_approval(
|
||||
endpoint: &Endpoint,
|
||||
codex_bin: &Path,
|
||||
config_overrides: &[String],
|
||||
dynamic_tools: &Option<Vec<DynamicToolSpec>>,
|
||||
) -> Result<()> {
|
||||
let prompt = "Run `touch should_not_trigger_approval.txt`";
|
||||
send_message_v2_with_policies(
|
||||
endpoint,
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
prompt.to_string(),
|
||||
None,
|
||||
@@ -584,14 +347,14 @@ fn no_trigger_cmd_approval(
|
||||
}
|
||||
|
||||
fn send_message_v2_with_policies(
|
||||
endpoint: &Endpoint,
|
||||
codex_bin: &Path,
|
||||
config_overrides: &[String],
|
||||
user_message: String,
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_policy: Option<SandboxPolicy>,
|
||||
dynamic_tools: &Option<Vec<DynamicToolSpec>>,
|
||||
) -> Result<()> {
|
||||
let mut client = CodexClient::connect(endpoint, config_overrides)?;
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -622,13 +385,13 @@ fn send_message_v2_with_policies(
|
||||
}
|
||||
|
||||
fn send_follow_up_v2(
|
||||
endpoint: &Endpoint,
|
||||
codex_bin: &Path,
|
||||
config_overrides: &[String],
|
||||
first_message: String,
|
||||
follow_up_message: String,
|
||||
dynamic_tools: &Option<Vec<DynamicToolSpec>>,
|
||||
) -> Result<()> {
|
||||
let mut client = CodexClient::connect(endpoint, config_overrides)?;
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -668,8 +431,8 @@ fn send_follow_up_v2(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_login(endpoint: &Endpoint, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::connect(endpoint, config_overrides)?;
|
||||
fn test_login(codex_bin: &Path, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -698,8 +461,8 @@ fn test_login(endpoint: &Endpoint, config_overrides: &[String]) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_account_rate_limits(endpoint: &Endpoint, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::connect(endpoint, config_overrides)?;
|
||||
fn get_account_rate_limits(codex_bin: &Path, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -710,8 +473,8 @@ fn get_account_rate_limits(endpoint: &Endpoint, config_overrides: &[String]) ->
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn model_list(endpoint: &Endpoint, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::connect(endpoint, config_overrides)?;
|
||||
fn model_list(codex_bin: &Path, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -722,26 +485,6 @@ fn model_list(endpoint: &Endpoint, config_overrides: &[String]) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn thread_list(endpoint: &Endpoint, config_overrides: &[String], limit: u32) -> Result<()> {
|
||||
let mut client = CodexClient::connect(endpoint, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
|
||||
let response = client.thread_list(ThreadListParams {
|
||||
cursor: None,
|
||||
limit: Some(limit),
|
||||
sort_key: None,
|
||||
model_providers: None,
|
||||
source_kinds: None,
|
||||
archived: None,
|
||||
cwd: None,
|
||||
})?;
|
||||
println!("< thread/list response: {response:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_dynamic_tools_unused(
|
||||
dynamic_tools: &Option<Vec<DynamicToolSpec>>,
|
||||
command: &str,
|
||||
@@ -776,32 +519,15 @@ fn parse_dynamic_tools_arg(dynamic_tools: &Option<String>) -> Result<Option<Vec<
|
||||
Ok(Some(tools))
|
||||
}
|
||||
|
||||
enum ClientTransport {
|
||||
Stdio {
|
||||
child: Child,
|
||||
stdin: Option<ChildStdin>,
|
||||
stdout: BufReader<ChildStdout>,
|
||||
},
|
||||
WebSocket {
|
||||
url: String,
|
||||
socket: Box<WebSocket<MaybeTlsStream<TcpStream>>>,
|
||||
},
|
||||
}
|
||||
|
||||
struct CodexClient {
|
||||
transport: ClientTransport,
|
||||
child: Child,
|
||||
stdin: Option<ChildStdin>,
|
||||
stdout: BufReader<ChildStdout>,
|
||||
pending_notifications: VecDeque<JSONRPCNotification>,
|
||||
}
|
||||
|
||||
impl CodexClient {
|
||||
fn connect(endpoint: &Endpoint, config_overrides: &[String]) -> Result<Self> {
|
||||
match endpoint {
|
||||
Endpoint::SpawnCodex(codex_bin) => Self::spawn_stdio(codex_bin, config_overrides),
|
||||
Endpoint::ConnectWs(url) => Self::connect_websocket(url),
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_stdio(codex_bin: &Path, config_overrides: &[String]) -> Result<Self> {
|
||||
fn spawn(codex_bin: &Path, config_overrides: &[String]) -> Result<Self> {
|
||||
let codex_bin_display = codex_bin.display();
|
||||
let mut cmd = Command::new(codex_bin);
|
||||
for override_kv in config_overrides {
|
||||
@@ -825,27 +551,9 @@ impl CodexClient {
|
||||
.context("codex app-server stdout unavailable")?;
|
||||
|
||||
Ok(Self {
|
||||
transport: ClientTransport::Stdio {
|
||||
child: codex_app_server,
|
||||
stdin: Some(stdin),
|
||||
stdout: BufReader::new(stdout),
|
||||
},
|
||||
pending_notifications: VecDeque::new(),
|
||||
})
|
||||
}
|
||||
|
||||
fn connect_websocket(url: &str) -> Result<Self> {
|
||||
let parsed = Url::parse(url).with_context(|| format!("invalid websocket URL `{url}`"))?;
|
||||
let (socket, _response) = connect(parsed.as_str()).with_context(|| {
|
||||
format!(
|
||||
"failed to connect to websocket app-server at `{url}`; if no server is running, start one with `codex-app-server-test-client serve --listen {url}`"
|
||||
)
|
||||
})?;
|
||||
Ok(Self {
|
||||
transport: ClientTransport::WebSocket {
|
||||
url: url.to_string(),
|
||||
socket: Box::new(socket),
|
||||
},
|
||||
child: codex_app_server,
|
||||
stdin: Some(stdin),
|
||||
stdout: BufReader::new(stdout),
|
||||
pending_notifications: VecDeque::new(),
|
||||
})
|
||||
}
|
||||
@@ -867,16 +575,7 @@ impl CodexClient {
|
||||
},
|
||||
};
|
||||
|
||||
let response: InitializeResponse = self.send_request(request, request_id, "initialize")?;
|
||||
|
||||
// Complete the initialize handshake.
|
||||
let initialized = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
self.write_jsonrpc_message(initialized)?;
|
||||
|
||||
Ok(response)
|
||||
self.send_request(request, request_id, "initialize")
|
||||
}
|
||||
|
||||
fn start_thread(&mut self) -> Result<NewConversationResponse> {
|
||||
@@ -1002,16 +701,6 @@ impl CodexClient {
|
||||
self.send_request(request, request_id, "model/list")
|
||||
}
|
||||
|
||||
fn thread_list(&mut self, params: ThreadListParams) -> Result<ThreadListResponse> {
|
||||
let request_id = self.request_id();
|
||||
let request = ClientRequest::ThreadList {
|
||||
request_id: request_id.clone(),
|
||||
params,
|
||||
};
|
||||
|
||||
self.send_request(request, request_id, "thread/list")
|
||||
}
|
||||
|
||||
fn stream_conversation(&mut self, conversation_id: &ThreadId) -> Result<()> {
|
||||
loop {
|
||||
let notification = self.next_notification()?;
|
||||
@@ -1146,12 +835,6 @@ impl CodexClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn stream_notifications_forever(&mut self) -> Result<()> {
|
||||
loop {
|
||||
let _ = self.next_notification()?;
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_event(
|
||||
&self,
|
||||
notification: JSONRPCNotification,
|
||||
@@ -1199,7 +882,17 @@ impl CodexClient {
|
||||
let request_json = serde_json::to_string(request)?;
|
||||
let request_pretty = serde_json::to_string_pretty(request)?;
|
||||
print_multiline_with_prefix("> ", &request_pretty);
|
||||
self.write_payload(&request_json)
|
||||
|
||||
if let Some(stdin) = self.stdin.as_mut() {
|
||||
writeln!(stdin, "{request_json}")?;
|
||||
stdin
|
||||
.flush()
|
||||
.context("failed to flush request to codex app-server")?;
|
||||
} else {
|
||||
bail!("codex app-server stdin closed");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn wait_for_response<T>(&mut self, request_id: RequestId, method: &str) -> Result<T>
|
||||
@@ -1254,8 +947,17 @@ impl CodexClient {
|
||||
|
||||
fn read_jsonrpc_message(&mut self) -> Result<JSONRPCMessage> {
|
||||
loop {
|
||||
let raw = self.read_payload()?;
|
||||
let trimmed = raw.trim();
|
||||
let mut response_line = String::new();
|
||||
let bytes = self
|
||||
.stdout
|
||||
.read_line(&mut response_line)
|
||||
.context("failed to read from codex app-server")?;
|
||||
|
||||
if bytes == 0 {
|
||||
bail!("codex app-server closed stdout");
|
||||
}
|
||||
|
||||
let trimmed = response_line.trim();
|
||||
if trimmed.is_empty() {
|
||||
continue;
|
||||
}
|
||||
@@ -1384,56 +1086,16 @@ impl CodexClient {
|
||||
let payload = serde_json::to_string(&message)?;
|
||||
let pretty = serde_json::to_string_pretty(&message)?;
|
||||
print_multiline_with_prefix("> ", &pretty);
|
||||
self.write_payload(&payload)
|
||||
}
|
||||
|
||||
fn write_payload(&mut self, payload: &str) -> Result<()> {
|
||||
match &mut self.transport {
|
||||
ClientTransport::Stdio { stdin, .. } => {
|
||||
if let Some(stdin) = stdin.as_mut() {
|
||||
writeln!(stdin, "{payload}")?;
|
||||
stdin
|
||||
.flush()
|
||||
.context("failed to flush payload to codex app-server")?;
|
||||
return Ok(());
|
||||
}
|
||||
bail!("codex app-server stdin closed")
|
||||
}
|
||||
ClientTransport::WebSocket { socket, url } => {
|
||||
socket
|
||||
.send(Message::Text(payload.to_string().into()))
|
||||
.with_context(|| format!("failed to write websocket message to `{url}`"))?;
|
||||
Ok(())
|
||||
}
|
||||
if let Some(stdin) = self.stdin.as_mut() {
|
||||
writeln!(stdin, "{payload}")?;
|
||||
stdin
|
||||
.flush()
|
||||
.context("failed to flush response to codex app-server")?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
fn read_payload(&mut self) -> Result<String> {
|
||||
match &mut self.transport {
|
||||
ClientTransport::Stdio { stdout, .. } => {
|
||||
let mut response_line = String::new();
|
||||
let bytes = stdout
|
||||
.read_line(&mut response_line)
|
||||
.context("failed to read from codex app-server")?;
|
||||
if bytes == 0 {
|
||||
bail!("codex app-server closed stdout");
|
||||
}
|
||||
Ok(response_line)
|
||||
}
|
||||
ClientTransport::WebSocket { socket, url } => loop {
|
||||
let frame = socket
|
||||
.read()
|
||||
.with_context(|| format!("failed to read websocket message from `{url}`"))?;
|
||||
match frame {
|
||||
Message::Text(text) => return Ok(text.to_string()),
|
||||
Message::Binary(_) | Message::Ping(_) | Message::Pong(_) => continue,
|
||||
Message::Close(_) => {
|
||||
bail!("websocket app-server at `{url}` closed the connection")
|
||||
}
|
||||
Message::Frame(_) => continue,
|
||||
}
|
||||
},
|
||||
}
|
||||
bail!("codex app-server stdin closed")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1445,25 +1107,21 @@ fn print_multiline_with_prefix(prefix: &str, payload: &str) {
|
||||
|
||||
impl Drop for CodexClient {
|
||||
fn drop(&mut self) {
|
||||
let ClientTransport::Stdio { child, stdin, .. } = &mut self.transport else {
|
||||
return;
|
||||
};
|
||||
let _ = self.stdin.take();
|
||||
|
||||
let _ = stdin.take();
|
||||
|
||||
if let Ok(Some(status)) = child.try_wait() {
|
||||
if let Ok(Some(status)) = self.child.try_wait() {
|
||||
println!("[codex app-server exited: {status}]");
|
||||
return;
|
||||
}
|
||||
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
|
||||
if let Ok(Some(status)) = child.try_wait() {
|
||||
if let Ok(Some(status)) = self.child.try_wait() {
|
||||
println!("[codex app-server exited: {status}]");
|
||||
return;
|
||||
}
|
||||
|
||||
let _ = child.kill();
|
||||
let _ = child.wait();
|
||||
let _ = self.child.kill();
|
||||
let _ = self.child.wait();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ Example with notification opt-out:
|
||||
- `thread/start` — create a new thread; emits `thread/started` and auto-subscribes you to turn/item events for that thread.
|
||||
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it.
|
||||
- `thread/fork` — fork an existing thread into a new thread id by copying the stored history; emits `thread/started` and auto-subscribes you to turn/item events for the new thread.
|
||||
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders`, `sourceKinds`, `archived`, and `cwd` filters.
|
||||
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders` filtering.
|
||||
- `thread/loaded/list` — list the thread ids currently loaded in memory.
|
||||
- `thread/read` — read a stored thread by id without resuming it; optionally include turns via `includeTurns`.
|
||||
- `thread/archive` — move a thread’s rollout file into the archived directory; returns `{}` on success.
|
||||
@@ -131,7 +131,7 @@ Example with notification opt-out:
|
||||
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
|
||||
- `review/start` — kick off Codex’s automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
|
||||
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
|
||||
- `model/list` — list available models (set `includeHidden: true` to include entries with `hidden: true`), with reasoning effort options and optional `upgrade` model ids.
|
||||
- `model/list` — list available models (with reasoning effort options and optional `upgrade` model ids).
|
||||
- `experimentalFeature/list` — list feature flags with stage metadata (`beta`, `underDevelopment`, `stable`, etc.), enabled/default-enabled state, and cursor pagination. For non-beta flags, `displayName`/`description`/`announcement` are `null`.
|
||||
- `collaborationMode/list` — list available collaboration mode presets (experimental, no pagination).
|
||||
- `skills/list` — list skills for one or more `cwd` values (optional `forceReload`).
|
||||
@@ -209,8 +209,6 @@ To branch from a stored session, call `thread/fork` with the `thread.id`. This c
|
||||
{ "method": "thread/started", "params": { "thread": { … } } }
|
||||
```
|
||||
|
||||
Experimental API: `thread/start`, `thread/resume`, and `thread/fork` accept `persistExtendedHistory: true` to persist a richer subset of ThreadItems for non-lossy history when calling `thread/read`, `thread/resume`, and `thread/fork` later. This does not backfill events that were not persisted previously.
|
||||
|
||||
### Example: List threads (with pagination & filters)
|
||||
|
||||
`thread/list` lets you render a history UI. Results default to `createdAt` (newest first) descending. Pass any combination of:
|
||||
@@ -221,7 +219,6 @@ Experimental API: `thread/start`, `thread/resume`, and `thread/fork` accept `per
|
||||
- `modelProviders` — restrict results to specific providers; unset, null, or an empty array will include all providers.
|
||||
- `sourceKinds` — restrict results to specific sources; omit or pass `[]` for interactive sessions only (`cli`, `vscode`).
|
||||
- `archived` — when `true`, list archived threads only. When `false` or `null`, list non-archived threads (default).
|
||||
- `cwd` — restrict results to threads whose session cwd exactly matches this path.
|
||||
|
||||
Example:
|
||||
|
||||
@@ -535,13 +532,6 @@ Examples:
|
||||
- Opt out of legacy session setup event: `codex/event/session_configured`
|
||||
- Opt out of streamed agent text deltas: `item/agentMessage/delta`
|
||||
|
||||
### Fuzzy file search events (experimental)
|
||||
|
||||
The fuzzy file search session API emits per-query notifications:
|
||||
|
||||
- `fuzzyFileSearch/sessionUpdated` — `{ sessionId, query, files }` with the current matching files for the active query.
|
||||
- `fuzzyFileSearch/sessionCompleted` — `{ sessionId, query }` once indexing/matching for that query has completed.
|
||||
|
||||
### Turn events
|
||||
|
||||
The app-server streams JSON-RPC notifications while a turn is running. Each turn starts with `turn/started` (initial `turn`) and ends with `turn/completed` (final `turn` status). Token usage events stream separately via `thread/tokenUsage/updated`. Clients subscribe to the events they care about, rendering each item incrementally as updates arrive. The per-item lifecycle is always: `item/started` → zero or more item-specific deltas → `item/completed`.
|
||||
@@ -771,7 +761,7 @@ To enable or disable a skill by path:
|
||||
|
||||
## Apps
|
||||
|
||||
Use `app/list` to fetch available apps (connectors). Each entry includes metadata like the app `id`, display `name`, `installUrl`, whether it is currently accessible, and whether it is enabled in config.
|
||||
Use `app/list` to fetch available apps (connectors). Each entry includes metadata like the app `id`, display `name`, `installUrl`, and whether it is currently accessible.
|
||||
|
||||
```json
|
||||
{ "method": "app/list", "id": 50, "params": {
|
||||
@@ -790,8 +780,7 @@ Use `app/list` to fetch available apps (connectors). Each entry includes metadat
|
||||
"logoUrlDark": null,
|
||||
"distributionChannel": null,
|
||||
"installUrl": "https://chatgpt.com/apps/demo-app/demo-app",
|
||||
"isAccessible": true,
|
||||
"isEnabled": true
|
||||
"isAccessible": true
|
||||
}
|
||||
],
|
||||
"nextCursor": null
|
||||
@@ -817,8 +806,7 @@ The server also emits `app/list/updated` notifications whenever either source (a
|
||||
"logoUrlDark": null,
|
||||
"distributionChannel": null,
|
||||
"installUrl": "https://chatgpt.com/apps/demo-app/demo-app",
|
||||
"isAccessible": true,
|
||||
"isEnabled": true
|
||||
"isAccessible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ use crate::codex_message_processor::read_summary_from_rollout;
|
||||
use crate::codex_message_processor::summary_to_thread;
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::outgoing_message::ClientRequestResult;
|
||||
use crate::outgoing_message::ThreadScopedOutgoingMessageSender;
|
||||
use crate::thread_state::ThreadState;
|
||||
use crate::thread_state::TurnSummary;
|
||||
@@ -206,7 +205,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
reason,
|
||||
proposed_execpolicy_amendment,
|
||||
parsed_cmd,
|
||||
..
|
||||
}) => match api_version {
|
||||
ApiVersion::V1 => {
|
||||
let params = ExecCommandApprovalParams {
|
||||
@@ -719,10 +717,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
.await;
|
||||
};
|
||||
|
||||
if !ev.affects_turn_status() {
|
||||
return;
|
||||
}
|
||||
|
||||
let turn_error = TurnError {
|
||||
message: ev.message,
|
||||
codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from),
|
||||
@@ -893,7 +887,11 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
|
||||
let item_id = patch_end_event.call_id.clone();
|
||||
|
||||
let status: PatchApplyStatus = (&patch_end_event.status).into();
|
||||
let status = if patch_end_event.success {
|
||||
PatchApplyStatus::Completed
|
||||
} else {
|
||||
PatchApplyStatus::Failed
|
||||
};
|
||||
let changes = convert_patch_changes(&patch_end_event.changes);
|
||||
complete_file_change_item(
|
||||
conversation_id,
|
||||
@@ -1000,11 +998,14 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
aggregated_output,
|
||||
exit_code,
|
||||
duration,
|
||||
status,
|
||||
..
|
||||
} = exec_command_end_event;
|
||||
|
||||
let status: CommandExecutionStatus = (&status).into();
|
||||
let status = if exit_code == 0 {
|
||||
CommandExecutionStatus::Completed
|
||||
} else {
|
||||
CommandExecutionStatus::Failed
|
||||
};
|
||||
let command_actions = parsed_cmd
|
||||
.into_iter()
|
||||
.map(V2ParsedCommand::from)
|
||||
@@ -1410,25 +1411,12 @@ async fn handle_error(
|
||||
|
||||
async fn on_patch_approval_response(
|
||||
call_id: String,
|
||||
receiver: oneshot::Receiver<ClientRequestResult>,
|
||||
receiver: oneshot::Receiver<JsonValue>,
|
||||
codex: Arc<CodexThread>,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
let value = match response {
|
||||
Ok(Ok(value)) => value,
|
||||
Ok(Err(err)) => {
|
||||
error!("request failed with client error: {err:?}");
|
||||
if let Err(submit_err) = codex
|
||||
.submit(Op::PatchApproval {
|
||||
id: call_id.clone(),
|
||||
decision: ReviewDecision::Denied,
|
||||
})
|
||||
.await
|
||||
{
|
||||
error!("failed to submit denied PatchApproval after request failure: {submit_err}");
|
||||
}
|
||||
return;
|
||||
}
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
error!("request failed: {err:?}");
|
||||
if let Err(submit_err) = codex
|
||||
@@ -1466,16 +1454,12 @@ async fn on_patch_approval_response(
|
||||
async fn on_exec_approval_response(
|
||||
call_id: String,
|
||||
turn_id: String,
|
||||
receiver: oneshot::Receiver<ClientRequestResult>,
|
||||
receiver: oneshot::Receiver<JsonValue>,
|
||||
conversation: Arc<CodexThread>,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
let value = match response {
|
||||
Ok(Ok(value)) => value,
|
||||
Ok(Err(err)) => {
|
||||
error!("request failed with client error: {err:?}");
|
||||
return;
|
||||
}
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
error!("request failed: {err:?}");
|
||||
return;
|
||||
@@ -1507,28 +1491,12 @@ async fn on_exec_approval_response(
|
||||
|
||||
async fn on_request_user_input_response(
|
||||
event_turn_id: String,
|
||||
receiver: oneshot::Receiver<ClientRequestResult>,
|
||||
receiver: oneshot::Receiver<JsonValue>,
|
||||
conversation: Arc<CodexThread>,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
let value = match response {
|
||||
Ok(Ok(value)) => value,
|
||||
Ok(Err(err)) => {
|
||||
error!("request failed with client error: {err:?}");
|
||||
let empty = CoreRequestUserInputResponse {
|
||||
answers: HashMap::new(),
|
||||
};
|
||||
if let Err(err) = conversation
|
||||
.submit(Op::UserInputAnswer {
|
||||
id: event_turn_id,
|
||||
response: empty,
|
||||
})
|
||||
.await
|
||||
{
|
||||
error!("failed to submit UserInputAnswer: {err}");
|
||||
}
|
||||
return;
|
||||
}
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
error!("request failed: {err:?}");
|
||||
let empty = CoreRequestUserInputResponse {
|
||||
@@ -1663,14 +1631,14 @@ async fn on_file_change_request_approval_response(
|
||||
conversation_id: ThreadId,
|
||||
item_id: String,
|
||||
changes: Vec<FileUpdateChange>,
|
||||
receiver: oneshot::Receiver<ClientRequestResult>,
|
||||
receiver: oneshot::Receiver<JsonValue>,
|
||||
codex: Arc<CodexThread>,
|
||||
outgoing: ThreadScopedOutgoingMessageSender,
|
||||
thread_state: Arc<Mutex<ThreadState>>,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
let (decision, completion_status) = match response {
|
||||
Ok(Ok(value)) => {
|
||||
Ok(value) => {
|
||||
let response = serde_json::from_value::<FileChangeRequestApprovalResponse>(value)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("failed to deserialize FileChangeRequestApprovalResponse: {err}");
|
||||
@@ -1685,10 +1653,6 @@ async fn on_file_change_request_approval_response(
|
||||
// Only short-circuit on declines/cancels/failures.
|
||||
(decision, completion_status)
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
error!("request failed with client error: {err:?}");
|
||||
(ReviewDecision::Denied, Some(PatchApplyStatus::Failed))
|
||||
}
|
||||
Err(err) => {
|
||||
error!("request failed: {err:?}");
|
||||
(ReviewDecision::Denied, Some(PatchApplyStatus::Failed))
|
||||
@@ -1727,13 +1691,13 @@ async fn on_command_execution_request_approval_response(
|
||||
command: String,
|
||||
cwd: PathBuf,
|
||||
command_actions: Vec<V2ParsedCommand>,
|
||||
receiver: oneshot::Receiver<ClientRequestResult>,
|
||||
receiver: oneshot::Receiver<JsonValue>,
|
||||
conversation: Arc<CodexThread>,
|
||||
outgoing: ThreadScopedOutgoingMessageSender,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
let (decision, completion_status) = match response {
|
||||
Ok(Ok(value)) => {
|
||||
Ok(value) => {
|
||||
let response = serde_json::from_value::<CommandExecutionRequestApprovalResponse>(value)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("failed to deserialize CommandExecutionRequestApprovalResponse: {err}");
|
||||
@@ -1768,10 +1732,6 @@ async fn on_command_execution_request_approval_response(
|
||||
};
|
||||
(decision, completion_status)
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
error!("request failed with client error: {err:?}");
|
||||
(ReviewDecision::Denied, Some(CommandExecutionStatus::Failed))
|
||||
}
|
||||
Err(err) => {
|
||||
error!("request failed: {err:?}");
|
||||
(ReviewDecision::Denied, Some(CommandExecutionStatus::Failed))
|
||||
|
||||
@@ -266,13 +266,6 @@ use crate::thread_state::ThreadStateManager;
|
||||
const THREAD_LIST_DEFAULT_LIMIT: usize = 25;
|
||||
const THREAD_LIST_MAX_LIMIT: usize = 100;
|
||||
|
||||
struct ThreadListFilters {
|
||||
model_providers: Option<Vec<String>>,
|
||||
source_kinds: Option<Vec<ThreadSourceKind>>,
|
||||
archived: bool,
|
||||
cwd: Option<PathBuf>,
|
||||
}
|
||||
|
||||
// Duration before a ChatGPT login attempt is abandoned.
|
||||
const LOGIN_CHATGPT_TIMEOUT: Duration = Duration::from_secs(10 * 60);
|
||||
const APP_LIST_LOAD_TIMEOUT: Duration = Duration::from_secs(90);
|
||||
@@ -1684,22 +1677,12 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
|
||||
let cwd = params.cwd.unwrap_or_else(|| self.config.cwd.clone());
|
||||
let env = create_env(&self.config.permissions.shell_environment_policy, None);
|
||||
let env = create_env(&self.config.shell_environment_policy, None);
|
||||
let timeout_ms = params
|
||||
.timeout_ms
|
||||
.and_then(|timeout_ms| u64::try_from(timeout_ms).ok());
|
||||
let managed_network_requirements_enabled =
|
||||
self.config.managed_network_requirements_enabled();
|
||||
let started_network_proxy = match self.config.permissions.network.as_ref() {
|
||||
Some(spec) => match spec
|
||||
.start_proxy(
|
||||
self.config.permissions.sandbox_policy.get(),
|
||||
None,
|
||||
None,
|
||||
managed_network_requirements_enabled,
|
||||
)
|
||||
.await
|
||||
{
|
||||
let started_network_proxy = match self.config.network.as_ref() {
|
||||
Some(spec) => match spec.start_proxy().await {
|
||||
Ok(started) => Some(started),
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
@@ -1722,7 +1705,6 @@ impl CodexMessageProcessor {
|
||||
network: started_network_proxy
|
||||
.as_ref()
|
||||
.map(codex_core::config::StartedNetworkProxy::proxy),
|
||||
network_attempt_id: None,
|
||||
sandbox_permissions: SandboxPermissions::UseDefault,
|
||||
windows_sandbox_level,
|
||||
justification: None,
|
||||
@@ -1731,7 +1713,7 @@ impl CodexMessageProcessor {
|
||||
|
||||
let requested_policy = params.sandbox_policy.map(|policy| policy.to_core());
|
||||
let effective_policy = match requested_policy {
|
||||
Some(policy) => match self.config.permissions.sandbox_policy.can_set(&policy) {
|
||||
Some(policy) => match self.config.sandbox_policy.can_set(&policy) {
|
||||
Ok(()) => policy,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
@@ -1743,7 +1725,7 @@ impl CodexMessageProcessor {
|
||||
return;
|
||||
}
|
||||
},
|
||||
None => self.config.permissions.sandbox_policy.get().clone(),
|
||||
None => self.config.sandbox_policy.get().clone(),
|
||||
};
|
||||
|
||||
let codex_linux_sandbox_exe = self.config.codex_linux_sandbox_exe.clone();
|
||||
@@ -1912,7 +1894,6 @@ impl CodexMessageProcessor {
|
||||
experimental_raw_events,
|
||||
personality,
|
||||
ephemeral,
|
||||
persist_extended_history,
|
||||
} = params;
|
||||
let mut typesafe_overrides = self.build_thread_config_overrides(
|
||||
model,
|
||||
@@ -1972,7 +1953,7 @@ impl CodexMessageProcessor {
|
||||
|
||||
match self
|
||||
.thread_manager
|
||||
.start_thread_with_tools(config, core_dynamic_tools, persist_extended_history)
|
||||
.start_thread_with_tools(config, core_dynamic_tools)
|
||||
.await
|
||||
{
|
||||
Ok(new_conv) => {
|
||||
@@ -2448,7 +2429,6 @@ impl CodexMessageProcessor {
|
||||
model_providers,
|
||||
source_kinds,
|
||||
archived,
|
||||
cwd,
|
||||
} = params;
|
||||
|
||||
let requested_page_size = limit
|
||||
@@ -2463,13 +2443,10 @@ impl CodexMessageProcessor {
|
||||
.list_threads_common(
|
||||
requested_page_size,
|
||||
cursor,
|
||||
model_providers,
|
||||
source_kinds,
|
||||
core_sort_key,
|
||||
ThreadListFilters {
|
||||
model_providers,
|
||||
source_kinds,
|
||||
archived: archived.unwrap_or(false),
|
||||
cwd: cwd.map(PathBuf::from),
|
||||
},
|
||||
archived.unwrap_or(false),
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -2700,13 +2677,6 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
|
||||
async fn thread_resume(&mut self, request_id: ConnectionRequestId, params: ThreadResumeParams) {
|
||||
if self
|
||||
.resume_running_thread(request_id.clone(), ¶ms)
|
||||
.await
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let ThreadResumeParams {
|
||||
thread_id,
|
||||
history,
|
||||
@@ -2720,25 +2690,80 @@ impl CodexMessageProcessor {
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
personality,
|
||||
persist_extended_history,
|
||||
} = params;
|
||||
|
||||
let thread_history = if let Some(history) = history {
|
||||
let Some(thread_history) = self
|
||||
.resume_thread_from_history(request_id.clone(), history.as_slice())
|
||||
.await
|
||||
else {
|
||||
if history.is_empty() {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
"history must not be empty".to_string(),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
};
|
||||
thread_history
|
||||
}
|
||||
InitialHistory::Forked(history.into_iter().map(RolloutItem::ResponseItem).collect())
|
||||
} else if let Some(path) = path {
|
||||
match RolloutRecorder::get_rollout_history(&path).await {
|
||||
Ok(initial_history) => initial_history,
|
||||
Err(err) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("failed to load rollout `{}`: {err}", path.display()),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let Some(thread_history) = self
|
||||
.resume_thread_from_rollout(request_id.clone(), &thread_id, path.as_ref())
|
||||
.await
|
||||
else {
|
||||
return;
|
||||
let existing_thread_id = match ThreadId::from_string(&thread_id) {
|
||||
Ok(id) => id,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("invalid thread id: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
thread_history
|
||||
|
||||
let path = match find_thread_path_by_id_str(
|
||||
&self.config.codex_home,
|
||||
&existing_thread_id.to_string(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Some(p)) => p,
|
||||
Ok(None) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("no rollout found for thread id {existing_thread_id}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
Err(err) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("failed to locate thread id {existing_thread_id}: {err}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match RolloutRecorder::get_rollout_history(&path).await {
|
||||
Ok(initial_history) => initial_history,
|
||||
Err(err) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("failed to load rollout `{}`: {err}", path.display()),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let history_cwd = thread_history.session_cwd();
|
||||
@@ -2780,12 +2805,7 @@ impl CodexMessageProcessor {
|
||||
|
||||
match self
|
||||
.thread_manager
|
||||
.resume_thread_with_history(
|
||||
config,
|
||||
thread_history,
|
||||
self.auth_manager.clone(),
|
||||
persist_extended_history,
|
||||
)
|
||||
.resume_thread_with_history(config, thread_history, self.auth_manager.clone())
|
||||
.await
|
||||
{
|
||||
Ok(NewThread {
|
||||
@@ -2819,17 +2839,41 @@ impl CodexMessageProcessor {
|
||||
);
|
||||
}
|
||||
|
||||
let Some(thread) = self
|
||||
.load_thread_from_rollout_or_send_internal(
|
||||
request_id.clone(),
|
||||
thread_id,
|
||||
rollout_path.as_path(),
|
||||
fallback_model_provider.as_str(),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return;
|
||||
let mut thread = match read_summary_from_rollout(
|
||||
rollout_path.as_path(),
|
||||
fallback_model_provider.as_str(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(summary) => summary_to_thread(summary),
|
||||
Err(err) => {
|
||||
self.send_internal_error(
|
||||
request_id,
|
||||
format!(
|
||||
"failed to load rollout `{}` for thread {thread_id}: {err}",
|
||||
rollout_path.display()
|
||||
),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
match read_rollout_items_from_rollout(rollout_path.as_path()).await {
|
||||
Ok(items) => {
|
||||
thread.turns = build_turns_from_rollout_items(&items);
|
||||
}
|
||||
Err(err) => {
|
||||
self.send_internal_error(
|
||||
request_id,
|
||||
format!(
|
||||
"failed to load rollout `{}` for thread {thread_id}: {err}",
|
||||
rollout_path.display()
|
||||
),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let response = ThreadResumeResponse {
|
||||
thread,
|
||||
@@ -2854,278 +2898,6 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
async fn resume_running_thread(
|
||||
&mut self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: &ThreadResumeParams,
|
||||
) -> bool {
|
||||
if let Ok(existing_thread_id) = ThreadId::from_string(¶ms.thread_id)
|
||||
&& let Ok(existing_thread) = self.thread_manager.get_thread(existing_thread_id).await
|
||||
{
|
||||
if params.history.is_some() {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!(
|
||||
"cannot resume thread {existing_thread_id} with history while it is already running"
|
||||
),
|
||||
)
|
||||
.await;
|
||||
return true;
|
||||
}
|
||||
|
||||
let rollout_path = if let Some(path) = existing_thread.rollout_path() {
|
||||
if path.exists() {
|
||||
path
|
||||
} else {
|
||||
match find_thread_path_by_id_str(
|
||||
&self.config.codex_home,
|
||||
&existing_thread_id.to_string(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Some(path)) => path,
|
||||
Ok(None) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("no rollout found for thread id {existing_thread_id}"),
|
||||
)
|
||||
.await;
|
||||
return true;
|
||||
}
|
||||
Err(err) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("failed to locate thread id {existing_thread_id}: {err}"),
|
||||
)
|
||||
.await;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match find_thread_path_by_id_str(
|
||||
&self.config.codex_home,
|
||||
&existing_thread_id.to_string(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Some(path)) => path,
|
||||
Ok(None) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("no rollout found for thread id {existing_thread_id}"),
|
||||
)
|
||||
.await;
|
||||
return true;
|
||||
}
|
||||
Err(err) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("failed to locate thread id {existing_thread_id}: {err}"),
|
||||
)
|
||||
.await;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(requested_path) = params.path.as_ref()
|
||||
&& requested_path != &rollout_path
|
||||
{
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!(
|
||||
"cannot resume running thread {existing_thread_id} with mismatched path: requested `{}`, active `{}`",
|
||||
requested_path.display(),
|
||||
rollout_path.display()
|
||||
),
|
||||
)
|
||||
.await;
|
||||
return true;
|
||||
}
|
||||
|
||||
if let Err(err) = self
|
||||
.ensure_conversation_listener(
|
||||
existing_thread_id,
|
||||
request_id.connection_id,
|
||||
false,
|
||||
ApiVersion::V2,
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(
|
||||
"failed to attach listener for thread {}: {}",
|
||||
existing_thread_id,
|
||||
err.message
|
||||
);
|
||||
}
|
||||
|
||||
let config_snapshot = existing_thread.config_snapshot().await;
|
||||
let mismatch_details = collect_resume_override_mismatches(params, &config_snapshot);
|
||||
if !mismatch_details.is_empty() {
|
||||
tracing::warn!(
|
||||
"thread/resume overrides ignored for running thread {}: {}",
|
||||
existing_thread_id,
|
||||
mismatch_details.join("; ")
|
||||
);
|
||||
}
|
||||
|
||||
let Some(thread) = self
|
||||
.load_thread_from_rollout_or_send_internal(
|
||||
request_id.clone(),
|
||||
existing_thread_id,
|
||||
rollout_path.as_path(),
|
||||
config_snapshot.model_provider_id.as_str(),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return true;
|
||||
};
|
||||
|
||||
let ThreadConfigSnapshot {
|
||||
model,
|
||||
model_provider_id,
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
cwd,
|
||||
reasoning_effort,
|
||||
..
|
||||
} = config_snapshot;
|
||||
let response = ThreadResumeResponse {
|
||||
thread,
|
||||
model,
|
||||
model_provider: model_provider_id,
|
||||
cwd,
|
||||
approval_policy: approval_policy.into(),
|
||||
sandbox: sandbox_policy.into(),
|
||||
reasoning_effort,
|
||||
};
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
async fn resume_thread_from_history(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
history: &[ResponseItem],
|
||||
) -> Option<InitialHistory> {
|
||||
if history.is_empty() {
|
||||
self.send_invalid_request_error(request_id, "history must not be empty".to_string())
|
||||
.await;
|
||||
return None;
|
||||
}
|
||||
Some(InitialHistory::Forked(
|
||||
history
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(RolloutItem::ResponseItem)
|
||||
.collect(),
|
||||
))
|
||||
}
|
||||
|
||||
async fn resume_thread_from_rollout(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
thread_id: &str,
|
||||
path: Option<&PathBuf>,
|
||||
) -> Option<InitialHistory> {
|
||||
let rollout_path = if let Some(path) = path {
|
||||
path.clone()
|
||||
} else {
|
||||
let existing_thread_id = match ThreadId::from_string(thread_id) {
|
||||
Ok(id) => id,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("invalid thread id: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
match find_thread_path_by_id_str(
|
||||
&self.config.codex_home,
|
||||
&existing_thread_id.to_string(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Some(path)) => path,
|
||||
Ok(None) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("no rollout found for thread id {existing_thread_id}"),
|
||||
)
|
||||
.await;
|
||||
return None;
|
||||
}
|
||||
Err(err) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("failed to locate thread id {existing_thread_id}: {err}"),
|
||||
)
|
||||
.await;
|
||||
return None;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match RolloutRecorder::get_rollout_history(&rollout_path).await {
|
||||
Ok(initial_history) => Some(initial_history),
|
||||
Err(err) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("failed to load rollout `{}`: {err}", rollout_path.display()),
|
||||
)
|
||||
.await;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_thread_from_rollout_or_send_internal(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
thread_id: ThreadId,
|
||||
rollout_path: &Path,
|
||||
fallback_provider: &str,
|
||||
) -> Option<Thread> {
|
||||
let mut thread = match read_summary_from_rollout(rollout_path, fallback_provider).await {
|
||||
Ok(summary) => summary_to_thread(summary),
|
||||
Err(err) => {
|
||||
self.send_internal_error(
|
||||
request_id,
|
||||
format!(
|
||||
"failed to load rollout `{}` for thread {thread_id}: {err}",
|
||||
rollout_path.display()
|
||||
),
|
||||
)
|
||||
.await;
|
||||
return None;
|
||||
}
|
||||
};
|
||||
match read_rollout_items_from_rollout(rollout_path).await {
|
||||
Ok(items) => {
|
||||
thread.turns = build_turns_from_rollout_items(&items);
|
||||
Some(thread)
|
||||
}
|
||||
Err(err) => {
|
||||
self.send_internal_error(
|
||||
request_id,
|
||||
format!(
|
||||
"failed to load rollout `{}` for thread {thread_id}: {err}",
|
||||
rollout_path.display()
|
||||
),
|
||||
)
|
||||
.await;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn thread_fork(&mut self, request_id: ConnectionRequestId, params: ThreadForkParams) {
|
||||
let ThreadForkParams {
|
||||
thread_id,
|
||||
@@ -3138,7 +2910,6 @@ impl CodexMessageProcessor {
|
||||
config: cli_overrides,
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
persist_extended_history,
|
||||
} = params;
|
||||
|
||||
let (rollout_path, source_thread_id) = if let Some(path) = path {
|
||||
@@ -3250,12 +3021,7 @@ impl CodexMessageProcessor {
|
||||
..
|
||||
} = match self
|
||||
.thread_manager
|
||||
.fork_thread(
|
||||
usize::MAX,
|
||||
config,
|
||||
rollout_path.clone(),
|
||||
persist_extended_history,
|
||||
)
|
||||
.fork_thread(usize::MAX, config, rollout_path.clone())
|
||||
.await
|
||||
{
|
||||
Ok(thread) => thread,
|
||||
@@ -3442,13 +3208,10 @@ impl CodexMessageProcessor {
|
||||
.list_threads_common(
|
||||
requested_page_size,
|
||||
cursor,
|
||||
model_providers,
|
||||
None,
|
||||
CoreThreadSortKey::UpdatedAt,
|
||||
ThreadListFilters {
|
||||
model_providers,
|
||||
source_kinds: None,
|
||||
archived: false,
|
||||
cwd: None,
|
||||
},
|
||||
false,
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -3466,15 +3229,11 @@ impl CodexMessageProcessor {
|
||||
&self,
|
||||
requested_page_size: usize,
|
||||
cursor: Option<String>,
|
||||
model_providers: Option<Vec<String>>,
|
||||
source_kinds: Option<Vec<ThreadSourceKind>>,
|
||||
sort_key: CoreThreadSortKey,
|
||||
filters: ThreadListFilters,
|
||||
archived: bool,
|
||||
) -> Result<(Vec<ConversationSummary>, Option<String>), JSONRPCErrorError> {
|
||||
let ThreadListFilters {
|
||||
model_providers,
|
||||
source_kinds,
|
||||
archived,
|
||||
cwd,
|
||||
} = filters;
|
||||
let mut cursor_obj: Option<RolloutCursor> = match cursor.as_ref() {
|
||||
Some(cursor_str) => {
|
||||
Some(parse_cursor(cursor_str).ok_or_else(|| JSONRPCErrorError {
|
||||
@@ -3555,9 +3314,6 @@ impl CodexMessageProcessor {
|
||||
if source_kind_filter
|
||||
.as_ref()
|
||||
.is_none_or(|filter| source_kind_matches(&summary.source, filter))
|
||||
&& cwd
|
||||
.as_ref()
|
||||
.is_none_or(|expected_cwd| &summary.cwd == expected_cwd)
|
||||
{
|
||||
filtered.push(summary);
|
||||
if filtered.len() >= remaining {
|
||||
@@ -3603,15 +3359,10 @@ impl CodexMessageProcessor {
|
||||
request_id: ConnectionRequestId,
|
||||
params: ModelListParams,
|
||||
) {
|
||||
let ModelListParams {
|
||||
limit,
|
||||
cursor,
|
||||
include_hidden,
|
||||
} = params;
|
||||
let ModelListParams { limit, cursor } = params;
|
||||
let mut config = (*config).clone();
|
||||
config.features.enable(Feature::RemoteModels);
|
||||
let models =
|
||||
supported_models(thread_manager, &config, include_hidden.unwrap_or(false)).await;
|
||||
let models = supported_models(thread_manager, &config).await;
|
||||
let total = models.len();
|
||||
|
||||
if total == 0 {
|
||||
@@ -4211,7 +3962,7 @@ impl CodexMessageProcessor {
|
||||
|
||||
match self
|
||||
.thread_manager
|
||||
.resume_thread_with_history(config, thread_history, self.auth_manager.clone(), false)
|
||||
.resume_thread_with_history(config, thread_history, self.auth_manager.clone())
|
||||
.await
|
||||
{
|
||||
Ok(NewThread {
|
||||
@@ -4411,7 +4162,7 @@ impl CodexMessageProcessor {
|
||||
..
|
||||
} = match self
|
||||
.thread_manager
|
||||
.fork_thread(usize::MAX, config, rollout_path.clone(), false)
|
||||
.fork_thread(usize::MAX, config, rollout_path.clone())
|
||||
.await
|
||||
{
|
||||
Ok(thread) => thread,
|
||||
@@ -4825,11 +4576,6 @@ impl CodexMessageProcessor {
|
||||
None => 0,
|
||||
};
|
||||
|
||||
let (mut accessible_connectors, mut all_connectors) = tokio::join!(
|
||||
connectors::list_cached_accessible_connectors_from_mcp_tools(&config),
|
||||
connectors::list_cached_all_connectors(&config)
|
||||
);
|
||||
|
||||
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel();
|
||||
|
||||
let accessible_config = config.clone();
|
||||
@@ -4844,17 +4590,16 @@ impl CodexMessageProcessor {
|
||||
let _ = accessible_tx.send(AppListLoadResult::Accessible(result));
|
||||
});
|
||||
|
||||
let all_config = config.clone();
|
||||
tokio::spawn(async move {
|
||||
let result = connectors::list_all_connectors_with_options(&all_config, force_refetch)
|
||||
let result = connectors::list_all_connectors_with_options(&config, force_refetch)
|
||||
.await
|
||||
.map_err(|err| format!("failed to list apps: {err}"));
|
||||
let _ = tx.send(AppListLoadResult::Directory(result));
|
||||
});
|
||||
|
||||
let mut accessible_connectors: Option<Vec<AppInfo>> = None;
|
||||
let mut all_connectors: Option<Vec<AppInfo>> = None;
|
||||
let app_list_deadline = tokio::time::Instant::now() + APP_LIST_LOAD_TIMEOUT;
|
||||
let mut accessible_loaded = false;
|
||||
let mut all_loaded = false;
|
||||
|
||||
loop {
|
||||
let result = match tokio::time::timeout_at(app_list_deadline, rx.recv()).await {
|
||||
@@ -4885,7 +4630,6 @@ impl CodexMessageProcessor {
|
||||
match result {
|
||||
AppListLoadResult::Accessible(Ok(connectors)) => {
|
||||
accessible_connectors = Some(connectors);
|
||||
accessible_loaded = true;
|
||||
}
|
||||
AppListLoadResult::Accessible(Err(err)) => {
|
||||
let error = JSONRPCErrorError {
|
||||
@@ -4898,7 +4642,6 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
AppListLoadResult::Directory(Ok(connectors)) => {
|
||||
all_connectors = Some(connectors);
|
||||
all_loaded = true;
|
||||
}
|
||||
AppListLoadResult::Directory(Err(err)) => {
|
||||
let error = JSONRPCErrorError {
|
||||
@@ -4911,16 +4654,13 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
let merged = connectors::with_app_enabled_state(
|
||||
Self::merge_loaded_apps(
|
||||
all_connectors.as_deref(),
|
||||
accessible_connectors.as_deref(),
|
||||
),
|
||||
&config,
|
||||
let merged = Self::merge_loaded_apps(
|
||||
all_connectors.as_deref(),
|
||||
accessible_connectors.as_deref(),
|
||||
);
|
||||
Self::send_app_list_updated_notification(&outgoing, merged.clone()).await;
|
||||
|
||||
if accessible_loaded && all_loaded {
|
||||
if accessible_connectors.is_some() && all_connectors.is_some() {
|
||||
match Self::paginate_apps(merged.as_slice(), start, limit) {
|
||||
Ok(response) => {
|
||||
outgoing.send_response(request_id, response).await;
|
||||
@@ -4939,10 +4679,9 @@ impl CodexMessageProcessor {
|
||||
all_connectors: Option<&[AppInfo]>,
|
||||
accessible_connectors: Option<&[AppInfo]>,
|
||||
) -> Vec<AppInfo> {
|
||||
let all_connectors_loaded = all_connectors.is_some();
|
||||
let all = all_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
let accessible = accessible_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
connectors::merge_connectors_with_accessible(all, accessible, all_connectors_loaded)
|
||||
connectors::merge_connectors_with_accessible(all, accessible)
|
||||
}
|
||||
|
||||
fn paginate_apps(
|
||||
@@ -5400,13 +5139,10 @@ impl CodexMessageProcessor {
|
||||
&mut self,
|
||||
request_id: &ConnectionRequestId,
|
||||
parent_thread_id: ThreadId,
|
||||
parent_thread: Arc<CodexThread>,
|
||||
review_request: ReviewRequest,
|
||||
display_text: &str,
|
||||
) -> std::result::Result<(), JSONRPCErrorError> {
|
||||
let rollout_path = if let Some(path) = parent_thread.rollout_path() {
|
||||
path
|
||||
} else {
|
||||
let rollout_path =
|
||||
find_thread_path_by_id_str(&self.config.codex_home, &parent_thread_id.to_string())
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
@@ -5418,8 +5154,7 @@ impl CodexMessageProcessor {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("no rollout found for thread id {parent_thread_id}"),
|
||||
data: None,
|
||||
})?
|
||||
};
|
||||
})?;
|
||||
|
||||
let mut config = self.config.as_ref().clone();
|
||||
if let Some(review_model) = &config.review_model {
|
||||
@@ -5433,7 +5168,7 @@ impl CodexMessageProcessor {
|
||||
..
|
||||
} = self
|
||||
.thread_manager
|
||||
.fork_thread(usize::MAX, config, rollout_path, false)
|
||||
.fork_thread(usize::MAX, config, rollout_path)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
@@ -5542,7 +5277,6 @@ impl CodexMessageProcessor {
|
||||
.start_detached_review(
|
||||
&request_id,
|
||||
parent_thread_id,
|
||||
parent_thread,
|
||||
review_request,
|
||||
display_text.as_str(),
|
||||
)
|
||||
@@ -6023,101 +5757,6 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
fn collect_resume_override_mismatches(
|
||||
request: &ThreadResumeParams,
|
||||
config_snapshot: &ThreadConfigSnapshot,
|
||||
) -> Vec<String> {
|
||||
let mut mismatch_details = Vec::new();
|
||||
|
||||
if let Some(requested_model) = request.model.as_deref()
|
||||
&& requested_model != config_snapshot.model
|
||||
{
|
||||
mismatch_details.push(format!(
|
||||
"model requested={requested_model} active={}",
|
||||
config_snapshot.model
|
||||
));
|
||||
}
|
||||
if let Some(requested_provider) = request.model_provider.as_deref()
|
||||
&& requested_provider != config_snapshot.model_provider_id
|
||||
{
|
||||
mismatch_details.push(format!(
|
||||
"model_provider requested={requested_provider} active={}",
|
||||
config_snapshot.model_provider_id
|
||||
));
|
||||
}
|
||||
if let Some(requested_cwd) = request.cwd.as_deref() {
|
||||
let requested_cwd_path = std::path::PathBuf::from(requested_cwd);
|
||||
if requested_cwd_path != config_snapshot.cwd {
|
||||
mismatch_details.push(format!(
|
||||
"cwd requested={} active={}",
|
||||
requested_cwd_path.display(),
|
||||
config_snapshot.cwd.display()
|
||||
));
|
||||
}
|
||||
}
|
||||
if let Some(requested_approval) = request.approval_policy.as_ref() {
|
||||
let active_approval: AskForApproval = config_snapshot.approval_policy.into();
|
||||
if requested_approval != &active_approval {
|
||||
mismatch_details.push(format!(
|
||||
"approval_policy requested={requested_approval:?} active={active_approval:?}"
|
||||
));
|
||||
}
|
||||
}
|
||||
if let Some(requested_sandbox) = request.sandbox.as_ref() {
|
||||
let sandbox_matches = matches!(
|
||||
(requested_sandbox, &config_snapshot.sandbox_policy),
|
||||
(
|
||||
SandboxMode::ReadOnly,
|
||||
codex_protocol::protocol::SandboxPolicy::ReadOnly { .. }
|
||||
) | (
|
||||
SandboxMode::WorkspaceWrite,
|
||||
codex_protocol::protocol::SandboxPolicy::WorkspaceWrite { .. }
|
||||
) | (
|
||||
SandboxMode::DangerFullAccess,
|
||||
codex_protocol::protocol::SandboxPolicy::DangerFullAccess
|
||||
) | (
|
||||
SandboxMode::DangerFullAccess,
|
||||
codex_protocol::protocol::SandboxPolicy::ExternalSandbox { .. }
|
||||
)
|
||||
);
|
||||
if !sandbox_matches {
|
||||
mismatch_details.push(format!(
|
||||
"sandbox requested={requested_sandbox:?} active={:?}",
|
||||
config_snapshot.sandbox_policy
|
||||
));
|
||||
}
|
||||
}
|
||||
if let Some(requested_personality) = request.personality.as_ref()
|
||||
&& config_snapshot.personality.as_ref() != Some(requested_personality)
|
||||
{
|
||||
mismatch_details.push(format!(
|
||||
"personality requested={requested_personality:?} active={:?}",
|
||||
config_snapshot.personality
|
||||
));
|
||||
}
|
||||
|
||||
if request.config.is_some() {
|
||||
mismatch_details
|
||||
.push("config overrides were provided and ignored while running".to_string());
|
||||
}
|
||||
if request.base_instructions.is_some() {
|
||||
mismatch_details
|
||||
.push("baseInstructions override was provided and ignored while running".to_string());
|
||||
}
|
||||
if request.developer_instructions.is_some() {
|
||||
mismatch_details.push(
|
||||
"developerInstructions override was provided and ignored while running".to_string(),
|
||||
);
|
||||
}
|
||||
if request.persist_extended_history {
|
||||
mismatch_details.push(
|
||||
"persistExtendedHistory override was provided and ignored while running".to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
mismatch_details
|
||||
}
|
||||
|
||||
fn skills_to_info(
|
||||
skills: &[codex_core::skills::SkillMetadata],
|
||||
disabled_paths: &std::collections::HashSet<PathBuf>,
|
||||
|
||||
@@ -7,35 +7,14 @@ use std::sync::Arc;
|
||||
use tokio::sync::oneshot;
|
||||
use tracing::error;
|
||||
|
||||
use crate::outgoing_message::ClientRequestResult;
|
||||
|
||||
pub(crate) async fn on_call_response(
|
||||
call_id: String,
|
||||
receiver: oneshot::Receiver<ClientRequestResult>,
|
||||
receiver: oneshot::Receiver<serde_json::Value>,
|
||||
conversation: Arc<CodexThread>,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
let value = match response {
|
||||
Ok(Ok(value)) => value,
|
||||
Ok(Err(err)) => {
|
||||
error!("request failed with client error: {err:?}");
|
||||
let fallback = CoreDynamicToolResponse {
|
||||
content_items: vec![CoreDynamicToolCallOutputContentItem::InputText {
|
||||
text: "dynamic tool request failed".to_string(),
|
||||
}],
|
||||
success: false,
|
||||
};
|
||||
if let Err(err) = conversation
|
||||
.submit(Op::DynamicToolResponse {
|
||||
id: call_id.clone(),
|
||||
response: fallback,
|
||||
})
|
||||
.await
|
||||
{
|
||||
error!("failed to submit DynamicToolResponse: {err}");
|
||||
}
|
||||
return;
|
||||
}
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
error!("request failed: {err:?}");
|
||||
let fallback = CoreDynamicToolResponse {
|
||||
|
||||
@@ -6,7 +6,6 @@ use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use codex_app_server_protocol::FuzzyFileSearchResult;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionCompletedNotification;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionUpdatedNotification;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_file_search as file_search;
|
||||
@@ -196,20 +195,6 @@ impl SessionReporterImpl {
|
||||
outgoing.send_server_notification(notification).await;
|
||||
});
|
||||
}
|
||||
|
||||
fn send_complete(&self) {
|
||||
if self.shared.canceled.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let session_id = self.shared.session_id.clone();
|
||||
let outgoing = self.shared.outgoing.clone();
|
||||
self.shared.runtime.spawn(async move {
|
||||
let notification = ServerNotification::FuzzyFileSearchSessionCompleted(
|
||||
FuzzyFileSearchSessionCompletedNotification { session_id },
|
||||
);
|
||||
outgoing.send_server_notification(notification).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl file_search::SessionReporter for SessionReporterImpl {
|
||||
@@ -217,9 +202,7 @@ impl file_search::SessionReporter for SessionReporterImpl {
|
||||
self.send_snapshot(snapshot);
|
||||
}
|
||||
|
||||
fn on_complete(&self) {
|
||||
self.send_complete();
|
||||
}
|
||||
fn on_complete(&self) {}
|
||||
}
|
||||
|
||||
fn collect_files(snapshot: &file_search::FileSearchSnapshot) -> Vec<FuzzyFileSearchResult> {
|
||||
|
||||
@@ -86,20 +86,9 @@ impl ExternalAuthRefresher for ExternalAuthRefreshBridge {
|
||||
.await;
|
||||
|
||||
let result = match timeout(EXTERNAL_AUTH_REFRESH_TIMEOUT, rx).await {
|
||||
Ok(result) => {
|
||||
// Two failure scenarios:
|
||||
// 1) `oneshot::Receiver` failed (sender dropped) => request canceled/channel closed.
|
||||
// 2) client answered with JSON-RPC error payload => propagate code/message.
|
||||
let result = result.map_err(|err| {
|
||||
std::io::Error::other(format!("auth refresh request canceled: {err}"))
|
||||
})?;
|
||||
result.map_err(|err| {
|
||||
std::io::Error::other(format!(
|
||||
"auth refresh request failed: code={} message={}",
|
||||
err.code, err.message
|
||||
))
|
||||
})?
|
||||
}
|
||||
Ok(result) => result.map_err(|err| {
|
||||
std::io::Error::other(format!("auth refresh request canceled: {err}"))
|
||||
})?,
|
||||
Err(_) => {
|
||||
let _canceled = self.outgoing.cancel_request(&request_id).await;
|
||||
return Err(std::io::Error::other(format!(
|
||||
|
||||
@@ -8,16 +8,12 @@ use codex_core::models_manager::manager::RefreshStrategy;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
|
||||
pub async fn supported_models(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config: &Config,
|
||||
include_hidden: bool,
|
||||
) -> Vec<Model> {
|
||||
pub async fn supported_models(thread_manager: Arc<ThreadManager>, config: &Config) -> Vec<Model> {
|
||||
thread_manager
|
||||
.list_models(config, RefreshStrategy::OnlineIfUncached)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|preset| include_hidden || preset.show_in_picker)
|
||||
.filter(|preset| preset.show_in_picker)
|
||||
.map(model_from_preset)
|
||||
.collect()
|
||||
}
|
||||
@@ -29,7 +25,6 @@ fn model_from_preset(preset: ModelPreset) -> Model {
|
||||
upgrade: preset.upgrade.map(|upgrade| upgrade.id),
|
||||
display_name: preset.display_name.to_string(),
|
||||
description: preset.description.to_string(),
|
||||
hidden: !preset.show_in_picker,
|
||||
supported_reasoning_efforts: reasoning_efforts_from_preset(
|
||||
preset.supported_reasoning_efforts,
|
||||
),
|
||||
|
||||
@@ -20,8 +20,6 @@ use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
#[cfg(test)]
|
||||
use codex_protocol::account::PlanType;
|
||||
|
||||
pub(crate) type ClientRequestResult = std::result::Result<Result, JSONRPCErrorError>;
|
||||
|
||||
/// Stable identifier for a transport connection.
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
pub(crate) struct ConnectionId(pub(crate) u64);
|
||||
@@ -48,7 +46,7 @@ pub(crate) enum OutgoingEnvelope {
|
||||
pub(crate) struct OutgoingMessageSender {
|
||||
next_server_request_id: AtomicI64,
|
||||
sender: mpsc::Sender<OutgoingEnvelope>,
|
||||
request_id_to_callback: Mutex<HashMap<RequestId, oneshot::Sender<ClientRequestResult>>>,
|
||||
request_id_to_callback: Mutex<HashMap<RequestId, oneshot::Sender<Result>>>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -71,7 +69,7 @@ impl ThreadScopedOutgoingMessageSender {
|
||||
pub(crate) async fn send_request(
|
||||
&self,
|
||||
payload: ServerRequestPayload,
|
||||
) -> oneshot::Receiver<ClientRequestResult> {
|
||||
) -> oneshot::Receiver<Result> {
|
||||
if self.connection_ids.is_empty() {
|
||||
let (_tx, rx) = oneshot::channel();
|
||||
return rx;
|
||||
@@ -120,7 +118,7 @@ impl OutgoingMessageSender {
|
||||
&self,
|
||||
connection_ids: &[ConnectionId],
|
||||
request: ServerRequestPayload,
|
||||
) -> oneshot::Receiver<ClientRequestResult> {
|
||||
) -> oneshot::Receiver<Result> {
|
||||
let (_id, rx) = self
|
||||
.send_request_with_id_to_connections(connection_ids, request)
|
||||
.await;
|
||||
@@ -130,7 +128,7 @@ impl OutgoingMessageSender {
|
||||
pub(crate) async fn send_request_with_id(
|
||||
&self,
|
||||
request: ServerRequestPayload,
|
||||
) -> (RequestId, oneshot::Receiver<ClientRequestResult>) {
|
||||
) -> (RequestId, oneshot::Receiver<Result>) {
|
||||
self.send_request_with_id_to_connections(&[], request).await
|
||||
}
|
||||
|
||||
@@ -138,7 +136,7 @@ impl OutgoingMessageSender {
|
||||
&self,
|
||||
connection_ids: &[ConnectionId],
|
||||
request: ServerRequestPayload,
|
||||
) -> (RequestId, oneshot::Receiver<ClientRequestResult>) {
|
||||
) -> (RequestId, oneshot::Receiver<Result>) {
|
||||
let id = RequestId::Integer(self.next_server_request_id.fetch_add(1, Ordering::Relaxed));
|
||||
let outgoing_message_id = id.clone();
|
||||
let (tx_approve, rx_approve) = oneshot::channel();
|
||||
@@ -192,7 +190,7 @@ impl OutgoingMessageSender {
|
||||
|
||||
match entry {
|
||||
Some((id, sender)) => {
|
||||
if let Err(err) = sender.send(Ok(result)) {
|
||||
if let Err(err) = sender.send(result) {
|
||||
warn!("could not notify callback for {id:?} due to: {err:?}");
|
||||
}
|
||||
}
|
||||
@@ -209,11 +207,8 @@ impl OutgoingMessageSender {
|
||||
};
|
||||
|
||||
match entry {
|
||||
Some((id, sender)) => {
|
||||
Some((id, _sender)) => {
|
||||
warn!("client responded with error for {id:?}: {error:?}");
|
||||
if let Err(err) = sender.send(Err(error)) {
|
||||
warn!("could not notify callback for {id:?} due to: {err:?}");
|
||||
}
|
||||
}
|
||||
None => {
|
||||
warn!("could not find callback for {id:?}");
|
||||
@@ -395,13 +390,11 @@ mod tests {
|
||||
use codex_app_server_protocol::AccountLoginCompletedNotification;
|
||||
use codex_app_server_protocol::AccountRateLimitsUpdatedNotification;
|
||||
use codex_app_server_protocol::AccountUpdatedNotification;
|
||||
use codex_app_server_protocol::ApplyPatchApprovalParams;
|
||||
use codex_app_server_protocol::AuthMode;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::LoginChatGptCompleteNotification;
|
||||
use codex_app_server_protocol::RateLimitSnapshot;
|
||||
use codex_app_server_protocol::RateLimitWindow;
|
||||
use codex_protocol::ThreadId;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use tokio::time::timeout;
|
||||
@@ -616,38 +609,4 @@ mod tests {
|
||||
other => panic!("expected targeted error envelope, got: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn notify_client_error_forwards_error_to_waiter() {
|
||||
let (tx, _rx) = mpsc::channel::<OutgoingEnvelope>(4);
|
||||
let outgoing = OutgoingMessageSender::new(tx);
|
||||
|
||||
let (request_id, wait_for_result) = outgoing
|
||||
.send_request_with_id(ServerRequestPayload::ApplyPatchApproval(
|
||||
ApplyPatchApprovalParams {
|
||||
conversation_id: ThreadId::new(),
|
||||
call_id: "call-id".to_string(),
|
||||
file_changes: HashMap::new(),
|
||||
reason: None,
|
||||
grant_root: None,
|
||||
},
|
||||
))
|
||||
.await;
|
||||
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: "refresh failed".to_string(),
|
||||
data: None,
|
||||
};
|
||||
|
||||
outgoing
|
||||
.notify_client_error(request_id, error.clone())
|
||||
.await;
|
||||
|
||||
let result = timeout(Duration::from_secs(1), wait_for_result)
|
||||
.await
|
||||
.expect("wait should not time out")
|
||||
.expect("waiter should receive a callback");
|
||||
assert_eq!(result, Err(error));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,7 +41,6 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
experimental_supported_tools: Vec::new(),
|
||||
input_modalities: default_input_modalities(),
|
||||
prefer_websockets: false,
|
||||
used_fallback_model_metadata: false,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use app_test_support::McpProcess;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionCompletedNotification;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionUpdatedNotification;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
@@ -14,7 +13,6 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
|
||||
const SHORT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(500);
|
||||
const STOP_GRACE_PERIOD: std::time::Duration = std::time::Duration::from_millis(250);
|
||||
const SESSION_UPDATED_METHOD: &str = "fuzzyFileSearch/sessionUpdated";
|
||||
const SESSION_COMPLETED_METHOD: &str = "fuzzyFileSearch/sessionCompleted";
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
enum FileExpectation {
|
||||
@@ -62,29 +60,6 @@ async fn wait_for_session_updated(
|
||||
);
|
||||
}
|
||||
|
||||
async fn wait_for_session_completed(
|
||||
mcp: &mut McpProcess,
|
||||
session_id: &str,
|
||||
) -> Result<FuzzyFileSearchSessionCompletedNotification> {
|
||||
for _ in 0..20 {
|
||||
let notification = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message(SESSION_COMPLETED_METHOD),
|
||||
)
|
||||
.await??;
|
||||
let params = notification
|
||||
.params
|
||||
.ok_or_else(|| anyhow!("missing notification params"))?;
|
||||
let payload =
|
||||
serde_json::from_value::<FuzzyFileSearchSessionCompletedNotification>(params)?;
|
||||
if payload.session_id == session_id {
|
||||
return Ok(payload);
|
||||
}
|
||||
}
|
||||
|
||||
anyhow::bail!("did not receive expected session completion for sessionId={session_id}");
|
||||
}
|
||||
|
||||
async fn assert_update_request_fails_for_missing_session(
|
||||
mcp: &mut McpProcess,
|
||||
session_id: &str,
|
||||
@@ -296,41 +271,12 @@ async fn test_fuzzy_file_search_session_streams_updates() -> Result<()> {
|
||||
assert_eq!(payload.files.len(), 1);
|
||||
assert_eq!(payload.files[0].root, root_path);
|
||||
assert_eq!(payload.files[0].path, "alpha.txt");
|
||||
let completed = wait_for_session_completed(&mut mcp, session_id).await?;
|
||||
assert_eq!(completed.session_id, session_id);
|
||||
|
||||
mcp.stop_fuzzy_file_search_session(session_id).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn test_fuzzy_file_search_session_no_updates_after_complete_until_query_edited() -> Result<()>
|
||||
{
|
||||
let codex_home = TempDir::new()?;
|
||||
let root = TempDir::new()?;
|
||||
std::fs::write(root.path().join("alpha.txt"), "contents")?;
|
||||
let mut mcp = initialized_mcp(&codex_home).await?;
|
||||
|
||||
let root_path = root.path().to_string_lossy().to_string();
|
||||
let session_id = "session-complete-invariant";
|
||||
mcp.start_fuzzy_file_search_session(session_id, vec![root_path])
|
||||
.await?;
|
||||
|
||||
mcp.update_fuzzy_file_search_session(session_id, "alp")
|
||||
.await?;
|
||||
wait_for_session_updated(&mut mcp, session_id, "alp", FileExpectation::NonEmpty).await?;
|
||||
wait_for_session_completed(&mut mcp, session_id).await?;
|
||||
assert_no_session_updates_for(&mut mcp, session_id, STOP_GRACE_PERIOD, SHORT_READ_TIMEOUT)
|
||||
.await?;
|
||||
|
||||
mcp.update_fuzzy_file_search_session(session_id, "alpha")
|
||||
.await?;
|
||||
wait_for_session_updated(&mut mcp, session_id, "alpha", FileExpectation::NonEmpty).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn test_fuzzy_file_search_session_update_before_start_errors() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
@@ -399,7 +345,6 @@ async fn test_fuzzy_file_search_session_multiple_query_updates_work() -> Result<
|
||||
alp_payload.files.iter().all(|file| file.root == root_path),
|
||||
true
|
||||
);
|
||||
wait_for_session_completed(&mut mcp, session_id).await?;
|
||||
|
||||
mcp.update_fuzzy_file_search_session(session_id, "zzzz")
|
||||
.await?;
|
||||
@@ -407,7 +352,6 @@ async fn test_fuzzy_file_search_session_multiple_query_updates_work() -> Result<
|
||||
wait_for_session_updated(&mut mcp, session_id, "zzzz", FileExpectation::Any).await?;
|
||||
assert_eq!(zzzz_payload.query, "zzzz");
|
||||
assert_eq!(zzzz_payload.files.is_empty(), true);
|
||||
wait_for_session_completed(&mut mcp, session_id).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -564,7 +564,6 @@ fn append_rollout_turn_context(path: &Path, timestamp: &str, model: &str) -> std
|
||||
cwd: PathBuf::from("/"),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
network: None,
|
||||
model: model.to_string(),
|
||||
personality: None,
|
||||
collaboration_mode: None,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex as StdMutex;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
@@ -87,7 +86,6 @@ async fn list_apps_uses_thread_feature_flag_when_thread_id_is_provided() -> Resu
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
}];
|
||||
let tools = vec![connector_tool("beta", "Beta App")?];
|
||||
let (server_url, server_handle) =
|
||||
@@ -175,78 +173,6 @@ connectors = false
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_apps_reports_is_enabled_from_config() -> Result<()> {
|
||||
let connectors = vec![AppInfo {
|
||||
id: "beta".to_string(),
|
||||
name: "Beta".to_string(),
|
||||
description: Some("Beta connector".to_string()),
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
}];
|
||||
let tools = vec![connector_tool("beta", "Beta App")?];
|
||||
let (server_url, server_handle) =
|
||||
start_apps_server_with_delays(connectors, tools, Duration::ZERO, Duration::ZERO).await?;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
std::fs::write(
|
||||
codex_home.path().join("config.toml"),
|
||||
format!(
|
||||
r#"
|
||||
chatgpt_base_url = "{server_url}"
|
||||
|
||||
[features]
|
||||
connectors = true
|
||||
|
||||
[apps.beta]
|
||||
enabled = false
|
||||
"#
|
||||
),
|
||||
)?;
|
||||
write_chatgpt_auth(
|
||||
codex_home.path(),
|
||||
ChatGptAuthFixture::new("chatgpt-token")
|
||||
.account_id("account-123")
|
||||
.chatgpt_user_id("user-123")
|
||||
.chatgpt_account_id("account-123"),
|
||||
AuthCredentialsStoreMode::File,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let request_id = mcp
|
||||
.send_apps_list_request(AppsListParams {
|
||||
limit: None,
|
||||
cursor: None,
|
||||
thread_id: None,
|
||||
force_refetch: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let response: JSONRPCResponse = timeout(
|
||||
DEFAULT_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await??;
|
||||
let AppsListResponse {
|
||||
data: response_data,
|
||||
next_cursor,
|
||||
} = to_response(response)?;
|
||||
assert!(next_cursor.is_none());
|
||||
assert_eq!(response_data.len(), 1);
|
||||
assert_eq!(response_data[0].id, "beta");
|
||||
assert!(!response_data[0].is_enabled);
|
||||
|
||||
server_handle.abort();
|
||||
let _ = server_handle.await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_apps_emits_updates_and_returns_after_both_lists_load() -> Result<()> {
|
||||
let connectors = vec![
|
||||
@@ -259,7 +185,6 @@ async fn list_apps_emits_updates_and_returns_after_both_lists_load() -> Result<(
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
AppInfo {
|
||||
id: "beta".to_string(),
|
||||
@@ -270,7 +195,6 @@ async fn list_apps_emits_updates_and_returns_after_both_lists_load() -> Result<(
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -315,7 +239,6 @@ async fn list_apps_emits_updates_and_returns_after_both_lists_load() -> Result<(
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/beta-app/beta".to_string()),
|
||||
is_accessible: true,
|
||||
is_enabled: true,
|
||||
}];
|
||||
|
||||
let first_update = read_app_list_updated_notification(&mut mcp).await?;
|
||||
@@ -331,7 +254,6 @@ async fn list_apps_emits_updates_and_returns_after_both_lists_load() -> Result<(
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/beta/beta".to_string()),
|
||||
is_accessible: true,
|
||||
is_enabled: true,
|
||||
},
|
||||
AppInfo {
|
||||
id: "alpha".to_string(),
|
||||
@@ -342,7 +264,6 @@ async fn list_apps_emits_updates_and_returns_after_both_lists_load() -> Result<(
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -379,7 +300,6 @@ async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
AppInfo {
|
||||
id: "beta".to_string(),
|
||||
@@ -390,7 +310,6 @@ async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -439,7 +358,6 @@ async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
AppInfo {
|
||||
id: "beta".to_string(),
|
||||
@@ -450,7 +368,6 @@ async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/beta/beta".to_string()),
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
]
|
||||
);
|
||||
@@ -465,7 +382,6 @@ async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/beta/beta".to_string()),
|
||||
is_accessible: true,
|
||||
is_enabled: true,
|
||||
},
|
||||
AppInfo {
|
||||
id: "alpha".to_string(),
|
||||
@@ -476,7 +392,6 @@ async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -508,7 +423,6 @@ async fn list_apps_paginates_results() -> Result<()> {
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
AppInfo {
|
||||
id: "beta".to_string(),
|
||||
@@ -519,7 +433,6 @@ async fn list_apps_paginates_results() -> Result<()> {
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -573,7 +486,6 @@ async fn list_apps_paginates_results() -> Result<()> {
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/beta/beta".to_string()),
|
||||
is_accessible: true,
|
||||
is_enabled: true,
|
||||
}];
|
||||
|
||||
assert_eq!(first_page, expected_first);
|
||||
@@ -613,7 +525,6 @@ async fn list_apps_paginates_results() -> Result<()> {
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
}];
|
||||
|
||||
assert_eq!(second_page, expected_second);
|
||||
@@ -634,7 +545,6 @@ async fn list_apps_force_refetch_preserves_previous_cache_on_failure() -> Result
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
}];
|
||||
let tools = vec![connector_tool("beta", "Beta App")?];
|
||||
let (server_url, server_handle) =
|
||||
@@ -723,201 +633,6 @@ async fn list_apps_force_refetch_preserves_previous_cache_on_failure() -> Result
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_apps_force_refetch_patches_updates_from_cached_snapshots() -> Result<()> {
|
||||
let initial_connectors = vec![
|
||||
AppInfo {
|
||||
id: "alpha".to_string(),
|
||||
name: "Alpha".to_string(),
|
||||
description: Some("Alpha v1".to_string()),
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
AppInfo {
|
||||
id: "beta".to_string(),
|
||||
name: "Beta App".to_string(),
|
||||
description: Some("Beta v1".to_string()),
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
];
|
||||
let initial_tools = vec![connector_tool("beta", "Beta App")?];
|
||||
let (server_url, server_handle, server_control) = start_apps_server_with_delays_and_control(
|
||||
initial_connectors,
|
||||
initial_tools,
|
||||
Duration::from_millis(300),
|
||||
Duration::ZERO,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
write_connectors_config(codex_home.path(), &server_url)?;
|
||||
write_chatgpt_auth(
|
||||
codex_home.path(),
|
||||
ChatGptAuthFixture::new("chatgpt-token")
|
||||
.account_id("account-123")
|
||||
.chatgpt_user_id("user-123")
|
||||
.chatgpt_account_id("account-123"),
|
||||
AuthCredentialsStoreMode::File,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let warm_request = mcp
|
||||
.send_apps_list_request(AppsListParams {
|
||||
limit: None,
|
||||
cursor: None,
|
||||
thread_id: None,
|
||||
force_refetch: false,
|
||||
})
|
||||
.await?;
|
||||
let warm_first_update = read_app_list_updated_notification(&mut mcp).await?;
|
||||
assert_eq!(
|
||||
warm_first_update.data,
|
||||
vec![AppInfo {
|
||||
id: "beta".to_string(),
|
||||
name: "Beta App".to_string(),
|
||||
description: None,
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/beta-app/beta".to_string()),
|
||||
is_accessible: true,
|
||||
is_enabled: true,
|
||||
}]
|
||||
);
|
||||
|
||||
let warm_second_update = read_app_list_updated_notification(&mut mcp).await?;
|
||||
assert_eq!(
|
||||
warm_second_update.data,
|
||||
vec![
|
||||
AppInfo {
|
||||
id: "beta".to_string(),
|
||||
name: "Beta App".to_string(),
|
||||
description: Some("Beta v1".to_string()),
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/beta-app/beta".to_string()),
|
||||
is_accessible: true,
|
||||
is_enabled: true,
|
||||
},
|
||||
AppInfo {
|
||||
id: "alpha".to_string(),
|
||||
name: "Alpha".to_string(),
|
||||
description: Some("Alpha v1".to_string()),
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
]
|
||||
);
|
||||
|
||||
let warm_response: JSONRPCResponse = timeout(
|
||||
DEFAULT_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(warm_request)),
|
||||
)
|
||||
.await??;
|
||||
let AppsListResponse {
|
||||
data: warm_data,
|
||||
next_cursor: warm_next_cursor,
|
||||
} = to_response(warm_response)?;
|
||||
assert_eq!(warm_data, warm_second_update.data);
|
||||
assert!(warm_next_cursor.is_none());
|
||||
|
||||
server_control.set_connectors(vec![AppInfo {
|
||||
id: "alpha".to_string(),
|
||||
name: "Alpha".to_string(),
|
||||
description: Some("Alpha v2".to_string()),
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
}]);
|
||||
server_control.set_tools(Vec::new());
|
||||
|
||||
let refetch_request = mcp
|
||||
.send_apps_list_request(AppsListParams {
|
||||
limit: None,
|
||||
cursor: None,
|
||||
thread_id: None,
|
||||
force_refetch: true,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let first_update = read_app_list_updated_notification(&mut mcp).await?;
|
||||
assert_eq!(
|
||||
first_update.data,
|
||||
vec![
|
||||
AppInfo {
|
||||
id: "alpha".to_string(),
|
||||
name: "Alpha".to_string(),
|
||||
description: Some("Alpha v1".to_string()),
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
AppInfo {
|
||||
id: "beta".to_string(),
|
||||
name: "Beta App".to_string(),
|
||||
description: Some("Beta v1".to_string()),
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/beta-app/beta".to_string()),
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
},
|
||||
]
|
||||
);
|
||||
|
||||
let expected_final = vec![AppInfo {
|
||||
id: "alpha".to_string(),
|
||||
name: "Alpha".to_string(),
|
||||
description: Some("Alpha v2".to_string()),
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
}];
|
||||
let second_update = read_app_list_updated_notification(&mut mcp).await?;
|
||||
assert_eq!(second_update.data, expected_final);
|
||||
|
||||
let refetch_response: JSONRPCResponse = timeout(
|
||||
DEFAULT_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(refetch_request)),
|
||||
)
|
||||
.await??;
|
||||
let AppsListResponse {
|
||||
data: refetch_data,
|
||||
next_cursor: refetch_next_cursor,
|
||||
} = to_response(refetch_response)?;
|
||||
assert_eq!(refetch_data, expected_final);
|
||||
assert!(refetch_next_cursor.is_none());
|
||||
|
||||
server_handle.abort();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn read_app_list_updated_notification(
|
||||
mcp: &mut McpProcess,
|
||||
) -> Result<AppListUpdatedNotification> {
|
||||
@@ -937,46 +652,22 @@ async fn read_app_list_updated_notification(
|
||||
struct AppsServerState {
|
||||
expected_bearer: String,
|
||||
expected_account_id: String,
|
||||
response: Arc<StdMutex<serde_json::Value>>,
|
||||
response: serde_json::Value,
|
||||
directory_delay: Duration,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct AppListMcpServer {
|
||||
tools: Arc<StdMutex<Vec<Tool>>>,
|
||||
tools: Arc<Vec<Tool>>,
|
||||
tools_delay: Duration,
|
||||
}
|
||||
|
||||
impl AppListMcpServer {
|
||||
fn new(tools: Arc<StdMutex<Vec<Tool>>>, tools_delay: Duration) -> Self {
|
||||
fn new(tools: Arc<Vec<Tool>>, tools_delay: Duration) -> Self {
|
||||
Self { tools, tools_delay }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct AppsServerControl {
|
||||
response: Arc<StdMutex<serde_json::Value>>,
|
||||
tools: Arc<StdMutex<Vec<Tool>>>,
|
||||
}
|
||||
|
||||
impl AppsServerControl {
|
||||
fn set_connectors(&self, connectors: Vec<AppInfo>) {
|
||||
let mut response_guard = self
|
||||
.response
|
||||
.lock()
|
||||
.unwrap_or_else(std::sync::PoisonError::into_inner);
|
||||
*response_guard = json!({ "apps": connectors, "next_token": null });
|
||||
}
|
||||
|
||||
fn set_tools(&self, tools: Vec<Tool>) {
|
||||
let mut tools_guard = self
|
||||
.tools
|
||||
.lock()
|
||||
.unwrap_or_else(std::sync::PoisonError::into_inner);
|
||||
*tools_guard = tools;
|
||||
}
|
||||
}
|
||||
|
||||
impl ServerHandler for AppListMcpServer {
|
||||
fn get_info(&self) -> ServerInfo {
|
||||
ServerInfo {
|
||||
@@ -997,12 +688,8 @@ impl ServerHandler for AppListMcpServer {
|
||||
if tools_delay > Duration::ZERO {
|
||||
tokio::time::sleep(tools_delay).await;
|
||||
}
|
||||
let tools = tools
|
||||
.lock()
|
||||
.unwrap_or_else(std::sync::PoisonError::into_inner)
|
||||
.clone();
|
||||
Ok(ListToolsResult {
|
||||
tools,
|
||||
tools: (*tools).clone(),
|
||||
next_cursor: None,
|
||||
meta: None,
|
||||
})
|
||||
@@ -1016,33 +703,14 @@ async fn start_apps_server_with_delays(
|
||||
directory_delay: Duration,
|
||||
tools_delay: Duration,
|
||||
) -> Result<(String, JoinHandle<()>)> {
|
||||
let (server_url, server_handle, _server_control) =
|
||||
start_apps_server_with_delays_and_control(connectors, tools, directory_delay, tools_delay)
|
||||
.await?;
|
||||
Ok((server_url, server_handle))
|
||||
}
|
||||
|
||||
async fn start_apps_server_with_delays_and_control(
|
||||
connectors: Vec<AppInfo>,
|
||||
tools: Vec<Tool>,
|
||||
directory_delay: Duration,
|
||||
tools_delay: Duration,
|
||||
) -> Result<(String, JoinHandle<()>, AppsServerControl)> {
|
||||
let response = Arc::new(StdMutex::new(
|
||||
json!({ "apps": connectors, "next_token": null }),
|
||||
));
|
||||
let tools = Arc::new(StdMutex::new(tools));
|
||||
let state = AppsServerState {
|
||||
expected_bearer: "Bearer chatgpt-token".to_string(),
|
||||
expected_account_id: "account-123".to_string(),
|
||||
response: response.clone(),
|
||||
response: json!({ "apps": connectors, "next_token": null }),
|
||||
directory_delay,
|
||||
};
|
||||
let state = Arc::new(state);
|
||||
let server_control = AppsServerControl {
|
||||
response,
|
||||
tools: tools.clone(),
|
||||
};
|
||||
let tools = Arc::new(tools);
|
||||
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await?;
|
||||
let addr = listener.local_addr()?;
|
||||
@@ -1069,7 +737,7 @@ async fn start_apps_server_with_delays_and_control(
|
||||
let _ = axum::serve(listener, router).await;
|
||||
});
|
||||
|
||||
Ok((format!("http://{addr}"), handle, server_control))
|
||||
Ok((format!("http://{addr}"), handle))
|
||||
}
|
||||
|
||||
async fn list_directory_connectors(
|
||||
@@ -1090,12 +758,7 @@ async fn list_directory_connectors(
|
||||
.is_some_and(|value| value == state.expected_account_id);
|
||||
|
||||
if bearer_ok && account_ok {
|
||||
let response = state
|
||||
.response
|
||||
.lock()
|
||||
.unwrap_or_else(std::sync::PoisonError::into_inner)
|
||||
.clone();
|
||||
Ok(Json(response))
|
||||
Ok(Json(state.response.clone()))
|
||||
} else {
|
||||
Err(StatusCode::UNAUTHORIZED)
|
||||
}
|
||||
|
||||
@@ -33,7 +33,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(100),
|
||||
cursor: None,
|
||||
include_hidden: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -55,7 +54,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
upgrade: None,
|
||||
display_name: "gpt-5.2-codex".to_string(),
|
||||
description: "Latest frontier agentic coding model.".to_string(),
|
||||
hidden: false,
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
@@ -86,7 +84,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
upgrade: Some("gpt-5.2-codex".to_string()),
|
||||
display_name: "gpt-5.1-codex-max".to_string(),
|
||||
description: "Codex-optimized flagship for deep and fast reasoning.".to_string(),
|
||||
hidden: false,
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
@@ -117,7 +114,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
upgrade: Some("gpt-5.2-codex".to_string()),
|
||||
display_name: "gpt-5.1-codex-mini".to_string(),
|
||||
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
|
||||
hidden: false,
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
@@ -142,7 +138,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
description:
|
||||
"Latest frontier model with improvements across knowledge, reasoning and coding"
|
||||
.to_string(),
|
||||
hidden: false,
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
@@ -178,38 +173,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_models_includes_hidden_models() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_models_cache(codex_home.path())?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
|
||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let request_id = mcp
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(100),
|
||||
cursor: None,
|
||||
include_hidden: Some(true),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let response: JSONRPCResponse = timeout(
|
||||
DEFAULT_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let ModelListResponse {
|
||||
data: items,
|
||||
next_cursor,
|
||||
} = to_response::<ModelListResponse>(response)?;
|
||||
|
||||
assert!(items.iter().any(|item| item.hidden));
|
||||
assert!(next_cursor.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_models_pagination_works() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
@@ -222,7 +185,6 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: None,
|
||||
include_hidden: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -245,7 +207,6 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: Some(next_cursor.clone()),
|
||||
include_hidden: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -268,7 +229,6 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: Some(third_cursor.clone()),
|
||||
include_hidden: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -291,7 +251,6 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: Some(fourth_cursor.clone()),
|
||||
include_hidden: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -324,7 +283,6 @@ async fn list_models_rejects_invalid_cursor() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: None,
|
||||
cursor: Some("invalid".to_string()),
|
||||
include_hidden: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -255,7 +255,6 @@ async fn review_start_rejects_empty_base_branch() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg_attr(target_os = "windows", ignore = "flaky on windows CI")]
|
||||
#[tokio::test]
|
||||
async fn review_start_with_detached_delivery_returns_new_thread_id() -> Result<()> {
|
||||
let review_payload = json!({
|
||||
@@ -438,7 +437,6 @@ model_provider = "mock_provider"
|
||||
|
||||
[features]
|
||||
remote_models = false
|
||||
shell_snapshot = false
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider"
|
||||
|
||||
@@ -17,8 +17,6 @@ use codex_app_server_protocol::ThreadSourceKind;
|
||||
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::GitInfo as CoreGitInfo;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::RolloutLine;
|
||||
use codex_protocol::protocol::SessionSource as CoreSessionSource;
|
||||
use codex_protocol::protocol::SubAgentSource;
|
||||
use pretty_assertions::assert_eq;
|
||||
@@ -68,7 +66,6 @@ async fn list_threads_with_sort(
|
||||
model_providers: providers,
|
||||
source_kinds,
|
||||
archived,
|
||||
cwd: None,
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
@@ -130,26 +127,6 @@ fn set_rollout_mtime(path: &Path, updated_at_rfc3339: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_rollout_cwd(path: &Path, cwd: &Path) -> Result<()> {
|
||||
let content = fs::read_to_string(path)?;
|
||||
let mut lines: Vec<String> = content.lines().map(str::to_string).collect();
|
||||
let first_line = lines
|
||||
.first_mut()
|
||||
.ok_or_else(|| anyhow::anyhow!("rollout at {} is empty", path.display()))?;
|
||||
let mut rollout_line: RolloutLine = serde_json::from_str(first_line)?;
|
||||
let RolloutItem::SessionMeta(mut session_meta_line) = rollout_line.item else {
|
||||
return Err(anyhow::anyhow!(
|
||||
"rollout at {} does not start with session metadata",
|
||||
path.display()
|
||||
));
|
||||
};
|
||||
session_meta_line.meta.cwd = cwd.to_path_buf();
|
||||
rollout_line.item = RolloutItem::SessionMeta(session_meta_line);
|
||||
*first_line = serde_json::to_string(&rollout_line)?;
|
||||
fs::write(path, lines.join("\n") + "\n")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_basic_empty() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
@@ -323,63 +300,6 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_respects_cwd_filter() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_minimal_config(codex_home.path())?;
|
||||
|
||||
let filtered_id = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
"2025-01-02T10-00-00",
|
||||
"2025-01-02T10:00:00Z",
|
||||
"filtered",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
let unfiltered_id = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
"2025-01-02T11-00-00",
|
||||
"2025-01-02T11:00:00Z",
|
||||
"unfiltered",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
|
||||
let target_cwd = codex_home.path().join("target-cwd");
|
||||
fs::create_dir_all(&target_cwd)?;
|
||||
set_rollout_cwd(
|
||||
rollout_path(codex_home.path(), "2025-01-02T10-00-00", &filtered_id).as_path(),
|
||||
&target_cwd,
|
||||
)?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
let request_id = mcp
|
||||
.send_thread_list_request(codex_app_server_protocol::ThreadListParams {
|
||||
cursor: None,
|
||||
limit: Some(10),
|
||||
sort_key: None,
|
||||
model_providers: Some(vec!["mock_provider".to_string()]),
|
||||
source_kinds: None,
|
||||
archived: None,
|
||||
cwd: Some(target_cwd.to_string_lossy().into_owned()),
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(resp)?;
|
||||
|
||||
assert_eq!(next_cursor, None);
|
||||
assert_eq!(data.len(), 1);
|
||||
assert_eq!(data[0].id, filtered_id);
|
||||
assert_ne!(data[0].id, unfiltered_id);
|
||||
assert_eq!(data[0].cwd, target_cwd);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_empty_source_kinds_defaults_to_interactive_only() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
@@ -1187,7 +1107,6 @@ async fn thread_list_invalid_cursor_returns_error() -> Result<()> {
|
||||
model_providers: Some(vec!["mock_provider".to_string()]),
|
||||
source_kinds: None,
|
||||
archived: None,
|
||||
cwd: None,
|
||||
})
|
||||
.await?;
|
||||
let error: JSONRPCError = timeout(
|
||||
|
||||
@@ -209,412 +209,18 @@ async fn thread_resume_without_overrides_does_not_change_updated_at_or_mtime() -
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_resume_keeps_in_flight_turn_streaming() -> Result<()> {
|
||||
let server = create_mock_responses_server_repeating_assistant("Done").await;
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut primary = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, primary.initialize()).await??;
|
||||
|
||||
let start_id = primary
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
let seed_turn_id = primary
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![UserInput::Text {
|
||||
text: "seed history".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(seed_turn_id)),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
primary.clear_message_buffer();
|
||||
|
||||
let mut secondary = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, secondary.initialize()).await??;
|
||||
|
||||
let turn_id = primary
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![UserInput::Text {
|
||||
text: "respond with docs".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(turn_id)),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_notification_message("turn/started"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let resume_id = secondary
|
||||
.send_thread_resume_request(ThreadResumeParams {
|
||||
thread_id: thread.id,
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
secondary.read_stream_until_response_message(RequestId::Integer(resume_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_resume_rejects_history_when_thread_is_running() -> Result<()> {
|
||||
let server = responses::start_mock_server().await;
|
||||
let first_body = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let second_body = responses::sse(vec![responses::ev_response_created("resp-2")]);
|
||||
let _first_response_mock = responses::mount_sse_once(&server, first_body).await;
|
||||
let _second_response_mock = responses::mount_sse_once(&server, second_body).await;
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut primary = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, primary.initialize()).await??;
|
||||
|
||||
let start_id = primary
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
let seed_turn_id = primary
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![UserInput::Text {
|
||||
text: "seed history".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(seed_turn_id)),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
primary.clear_message_buffer();
|
||||
|
||||
let running_turn_id = primary
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![UserInput::Text {
|
||||
text: "keep running".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(running_turn_id)),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_notification_message("turn/started"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let resume_id = primary
|
||||
.send_thread_resume_request(ThreadResumeParams {
|
||||
thread_id: thread.id,
|
||||
history: Some(vec![ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "history override".to_string(),
|
||||
}],
|
||||
end_turn: None,
|
||||
phase: None,
|
||||
}]),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let resume_err: JSONRPCError = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_error_message(RequestId::Integer(resume_id)),
|
||||
)
|
||||
.await??;
|
||||
assert!(
|
||||
resume_err.error.message.contains("cannot resume thread")
|
||||
&& resume_err.error.message.contains("with history")
|
||||
&& resume_err.error.message.contains("running"),
|
||||
"unexpected resume error: {}",
|
||||
resume_err.error.message
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_resume_rejects_mismatched_path_when_thread_is_running() -> Result<()> {
|
||||
let server = responses::start_mock_server().await;
|
||||
let first_body = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let second_body = responses::sse(vec![responses::ev_response_created("resp-2")]);
|
||||
let _first_response_mock = responses::mount_sse_once(&server, first_body).await;
|
||||
let _second_response_mock = responses::mount_sse_once(&server, second_body).await;
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut primary = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, primary.initialize()).await??;
|
||||
|
||||
let start_id = primary
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
let seed_turn_id = primary
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![UserInput::Text {
|
||||
text: "seed history".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(seed_turn_id)),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
primary.clear_message_buffer();
|
||||
|
||||
let running_turn_id = primary
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![UserInput::Text {
|
||||
text: "keep running".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(running_turn_id)),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_notification_message("turn/started"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let resume_id = primary
|
||||
.send_thread_resume_request(ThreadResumeParams {
|
||||
thread_id: thread.id,
|
||||
path: Some(PathBuf::from("/tmp/does-not-match-running-rollout.jsonl")),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let resume_err: JSONRPCError = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_error_message(RequestId::Integer(resume_id)),
|
||||
)
|
||||
.await??;
|
||||
assert!(
|
||||
resume_err.error.message.contains("mismatched path"),
|
||||
"unexpected resume error: {}",
|
||||
resume_err.error.message
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_resume_rejoins_running_thread_even_with_override_mismatch() -> Result<()> {
|
||||
let server = responses::start_mock_server().await;
|
||||
let first_body = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let second_body = responses::sse(vec![responses::ev_response_created("resp-2")]);
|
||||
let _response_mock =
|
||||
responses::mount_sse_sequence(&server, vec![first_body, second_body]).await;
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut primary = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, primary.initialize()).await??;
|
||||
|
||||
let start_id = primary
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
let seed_turn_id = primary
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![UserInput::Text {
|
||||
text: "seed history".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(seed_turn_id)),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
primary.clear_message_buffer();
|
||||
|
||||
let running_turn_id = primary
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![UserInput::Text {
|
||||
text: "keep running".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(running_turn_id)),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_notification_message("turn/started"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let resume_id = primary
|
||||
.send_thread_resume_request(ThreadResumeParams {
|
||||
thread_id: thread.id.clone(),
|
||||
model: Some("not-the-running-model".to_string()),
|
||||
cwd: Some("/tmp".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let resume_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(resume_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadResumeResponse { model, .. } = to_response::<ThreadResumeResponse>(resume_resp)?;
|
||||
assert_eq!(model, "gpt-5.1-codex-max");
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_resume_with_overrides_defers_updated_at_until_turn_start() -> Result<()> {
|
||||
let server = create_mock_responses_server_repeating_assistant("Done").await;
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
let rollout = setup_rollout_fixture(codex_home.path(), &server.uri())?;
|
||||
|
||||
let RestartedThreadFixture {
|
||||
mut mcp,
|
||||
thread_id,
|
||||
rollout_file_path,
|
||||
} = start_materialized_thread_and_restart(codex_home.path(), "materialize").await?;
|
||||
let expected_updated_at_rfc3339 = "2025-01-07T00:00:00Z";
|
||||
set_rollout_mtime(rollout_file_path.as_path(), expected_updated_at_rfc3339)?;
|
||||
let before_modified = std::fs::metadata(&rollout_file_path)?.modified()?;
|
||||
let expected_updated_at = chrono::DateTime::parse_from_rfc3339(expected_updated_at_rfc3339)?
|
||||
.with_timezone(&Utc)
|
||||
.timestamp();
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let resume_id = mcp
|
||||
.send_thread_resume_request(ThreadResumeParams {
|
||||
thread_id,
|
||||
thread_id: rollout.conversation_id.clone(),
|
||||
model: Some("mock-model".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
@@ -624,19 +230,16 @@ async fn thread_resume_with_overrides_defers_updated_at_until_turn_start() -> Re
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadResumeResponse {
|
||||
thread: resumed_thread,
|
||||
..
|
||||
} = to_response::<ThreadResumeResponse>(resume_resp)?;
|
||||
let ThreadResumeResponse { thread, .. } = to_response::<ThreadResumeResponse>(resume_resp)?;
|
||||
|
||||
assert_eq!(resumed_thread.updated_at, expected_updated_at);
|
||||
assert_eq!(thread.updated_at, rollout.expected_updated_at);
|
||||
|
||||
let after_resume_modified = std::fs::metadata(&rollout_file_path)?.modified()?;
|
||||
assert_eq!(after_resume_modified, before_modified);
|
||||
let after_resume_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
|
||||
assert_eq!(after_resume_modified, rollout.before_modified);
|
||||
|
||||
let turn_id = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: resumed_thread.id,
|
||||
thread_id: rollout.conversation_id,
|
||||
input: vec![UserInput::Text {
|
||||
text: "Hello".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
@@ -655,8 +258,8 @@ async fn thread_resume_with_overrides_defers_updated_at_until_turn_start() -> Re
|
||||
)
|
||||
.await??;
|
||||
|
||||
let after_turn_modified = std::fs::metadata(&rollout_file_path)?.modified()?;
|
||||
assert!(after_turn_modified > before_modified);
|
||||
let after_turn_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
|
||||
assert!(after_turn_modified > rollout.before_modified);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -771,9 +374,22 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let RestartedThreadFixture {
|
||||
mut mcp, thread_id, ..
|
||||
} = start_materialized_thread_and_restart(codex_home.path(), "seed history").await?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
// Start a thread.
|
||||
let start_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
let history_text = "Hello from history";
|
||||
let history = vec![ResponseItem::Message {
|
||||
@@ -789,7 +405,7 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
|
||||
// Resume with explicit history and override the model.
|
||||
let resume_id = mcp
|
||||
.send_thread_resume_request(ThreadResumeParams {
|
||||
thread_id,
|
||||
thread_id: thread.id,
|
||||
history: Some(history),
|
||||
model: Some("mock-model".to_string()),
|
||||
model_provider: Some("mock_provider".to_string()),
|
||||
@@ -813,70 +429,6 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct RestartedThreadFixture {
|
||||
mcp: McpProcess,
|
||||
thread_id: String,
|
||||
rollout_file_path: PathBuf,
|
||||
}
|
||||
|
||||
async fn start_materialized_thread_and_restart(
|
||||
codex_home: &Path,
|
||||
seed_text: &str,
|
||||
) -> Result<RestartedThreadFixture> {
|
||||
let mut first_mcp = McpProcess::new(codex_home).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, first_mcp.initialize()).await??;
|
||||
|
||||
let start_id = first_mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
first_mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
let materialize_turn_id = first_mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![UserInput::Text {
|
||||
text: seed_text.to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
first_mcp.read_stream_until_response_message(RequestId::Integer(materialize_turn_id)),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
first_mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let thread_id = thread.id;
|
||||
let rollout_file_path = thread
|
||||
.path
|
||||
.ok_or_else(|| anyhow::anyhow!("thread path missing from thread/start response"))?;
|
||||
|
||||
drop(first_mcp);
|
||||
|
||||
let mut second_mcp = McpProcess::new(codex_home).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, second_mcp.initialize()).await??;
|
||||
|
||||
Ok(RestartedThreadFixture {
|
||||
mcp: second_mcp,
|
||||
thread_id,
|
||||
rollout_file_path,
|
||||
})
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_resume_accepts_personality_override() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
@@ -897,10 +449,10 @@ async fn thread_resume_accepts_personality_override() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut primary = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, primary.initialize()).await??;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let start_id = primary
|
||||
let start_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.2-codex".to_string()),
|
||||
..Default::default()
|
||||
@@ -908,12 +460,12 @@ async fn thread_resume_accepts_personality_override() -> Result<()> {
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
let materialize_id = primary
|
||||
let materialize_id = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![UserInput::Text {
|
||||
@@ -925,19 +477,16 @@ async fn thread_resume_accepts_personality_override() -> Result<()> {
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_response_message(RequestId::Integer(materialize_id)),
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(materialize_id)),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
primary.read_stream_until_notification_message("turn/completed"),
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let mut secondary = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, secondary.initialize()).await??;
|
||||
|
||||
let resume_id = secondary
|
||||
let resume_id = mcp
|
||||
.send_thread_resume_request(ThreadResumeParams {
|
||||
thread_id: thread.id,
|
||||
model: Some("gpt-5.2-codex".to_string()),
|
||||
@@ -947,12 +496,12 @@ async fn thread_resume_accepts_personality_override() -> Result<()> {
|
||||
.await?;
|
||||
let resume_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
secondary.read_stream_until_response_message(RequestId::Integer(resume_id)),
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
|
||||
)
|
||||
.await??;
|
||||
let resume: ThreadResumeResponse = to_response::<ThreadResumeResponse>(resume_resp)?;
|
||||
|
||||
let turn_id = secondary
|
||||
let turn_id = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: resume.thread.id,
|
||||
input: vec![UserInput::Text {
|
||||
@@ -964,13 +513,13 @@ async fn thread_resume_accepts_personality_override() -> Result<()> {
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
secondary.read_stream_until_response_message(RequestId::Integer(turn_id)),
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
secondary.read_stream_until_notification_message("turn/completed"),
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
|
||||
@@ -12,8 +12,6 @@ const LINUX_SANDBOX_ARG0: &str = "codex-linux-sandbox";
|
||||
const APPLY_PATCH_ARG0: &str = "apply_patch";
|
||||
const MISSPELLED_APPLY_PATCH_ARG0: &str = "applypatch";
|
||||
const LOCK_FILENAME: &str = ".lock";
|
||||
#[cfg(target_os = "windows")]
|
||||
const WINDOWS_TOKIO_WORKER_STACK_SIZE_BYTES: usize = 16 * 1024 * 1024;
|
||||
|
||||
/// Keeps the per-session PATH entry alive and locked for the process lifetime.
|
||||
pub struct Arg0PathEntryGuard {
|
||||
@@ -114,7 +112,7 @@ where
|
||||
|
||||
// Regular invocation – create a Tokio runtime and execute the provided
|
||||
// async entry-point.
|
||||
let runtime = build_runtime()?;
|
||||
let runtime = tokio::runtime::Runtime::new()?;
|
||||
runtime.block_on(async move {
|
||||
let codex_linux_sandbox_exe: Option<PathBuf> = if cfg!(target_os = "linux") {
|
||||
std::env::current_exe().ok()
|
||||
@@ -126,18 +124,6 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
fn build_runtime() -> anyhow::Result<tokio::runtime::Runtime> {
|
||||
let mut builder = tokio::runtime::Builder::new_multi_thread();
|
||||
builder.enable_all();
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
// Defensive hardening: Windows worker threads have lower effective
|
||||
// stack headroom, so use a larger stack for runtime workers.
|
||||
builder.thread_stack_size(WINDOWS_TOKIO_WORKER_STACK_SIZE_BYTES);
|
||||
}
|
||||
Ok(builder.build()?)
|
||||
}
|
||||
|
||||
const ILLEGAL_ENV_VAR_PREFIX: &str = "CODEX_";
|
||||
|
||||
/// Load env vars from ~/.codex/.env.
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::LazyLock;
|
||||
use std::sync::Mutex as StdMutex;
|
||||
|
||||
@@ -20,9 +19,7 @@ pub use codex_core::connectors::connector_display_label;
|
||||
use codex_core::connectors::connector_install_url;
|
||||
pub use codex_core::connectors::list_accessible_connectors_from_mcp_tools;
|
||||
pub use codex_core::connectors::list_accessible_connectors_from_mcp_tools_with_options;
|
||||
pub use codex_core::connectors::list_cached_accessible_connectors_from_mcp_tools;
|
||||
use codex_core::connectors::merge_connectors;
|
||||
pub use codex_core::connectors::with_app_enabled_state;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct DirectoryListResponse {
|
||||
@@ -75,32 +72,13 @@ pub async fn list_connectors(config: &Config) -> anyhow::Result<Vec<AppInfo>> {
|
||||
);
|
||||
let connectors = connectors_result?;
|
||||
let accessible = accessible_result?;
|
||||
Ok(with_app_enabled_state(
|
||||
merge_connectors_with_accessible(connectors, accessible, true),
|
||||
config,
|
||||
))
|
||||
Ok(merge_connectors_with_accessible(connectors, accessible))
|
||||
}
|
||||
|
||||
pub async fn list_all_connectors(config: &Config) -> anyhow::Result<Vec<AppInfo>> {
|
||||
list_all_connectors_with_options(config, false).await
|
||||
}
|
||||
|
||||
pub async fn list_cached_all_connectors(config: &Config) -> Option<Vec<AppInfo>> {
|
||||
if !config.features.enabled(Feature::Apps) {
|
||||
return Some(Vec::new());
|
||||
}
|
||||
|
||||
if init_chatgpt_token_from_auth(&config.codex_home, config.cli_auth_credentials_store_mode)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
return None;
|
||||
}
|
||||
let token_data = get_chatgpt_token_data()?;
|
||||
let cache_key = all_connectors_cache_key(config, &token_data);
|
||||
read_cached_all_connectors(&cache_key)
|
||||
}
|
||||
|
||||
pub async fn list_all_connectors_with_options(
|
||||
config: &Config,
|
||||
force_refetch: bool,
|
||||
@@ -186,20 +164,7 @@ fn write_cached_all_connectors(cache_key: AllConnectorsCacheKey, connectors: &[A
|
||||
pub fn merge_connectors_with_accessible(
|
||||
connectors: Vec<AppInfo>,
|
||||
accessible_connectors: Vec<AppInfo>,
|
||||
all_connectors_loaded: bool,
|
||||
) -> Vec<AppInfo> {
|
||||
let accessible_connectors = if all_connectors_loaded {
|
||||
let connector_ids: HashSet<&str> = connectors
|
||||
.iter()
|
||||
.map(|connector| connector.id.as_str())
|
||||
.collect();
|
||||
accessible_connectors
|
||||
.into_iter()
|
||||
.filter(|connector| connector_ids.contains(connector.id.as_str()))
|
||||
.collect()
|
||||
} else {
|
||||
accessible_connectors
|
||||
};
|
||||
let merged = merge_connectors(connectors, accessible_connectors);
|
||||
filter_disallowed_connectors(merged)
|
||||
}
|
||||
@@ -318,7 +283,6 @@ fn directory_app_to_app_info(app: DirectoryApp) -> AppInfo {
|
||||
distribution_channel: app.distribution_channel,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -377,7 +341,6 @@ mod tests {
|
||||
distribution_channel: None,
|
||||
install_url: None,
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -420,41 +383,4 @@ mod tests {
|
||||
]);
|
||||
assert_eq!(filtered, vec![app("delta")]);
|
||||
}
|
||||
|
||||
fn merged_app(id: &str, is_accessible: bool) -> AppInfo {
|
||||
AppInfo {
|
||||
id: id.to_string(),
|
||||
name: id.to_string(),
|
||||
description: None,
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
install_url: Some(connector_install_url(id, id)),
|
||||
is_accessible,
|
||||
is_enabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn excludes_accessible_connectors_not_in_all_when_all_loaded() {
|
||||
let merged = merge_connectors_with_accessible(
|
||||
vec![app("alpha")],
|
||||
vec![app("alpha"), app("beta")],
|
||||
true,
|
||||
);
|
||||
assert_eq!(merged, vec![merged_app("alpha", true)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn keeps_accessible_connectors_not_in_all_while_all_loading() {
|
||||
let merged = merge_connectors_with_accessible(
|
||||
vec![app("alpha")],
|
||||
vec![app("alpha"), app("beta")],
|
||||
false,
|
||||
);
|
||||
assert_eq!(
|
||||
merged,
|
||||
vec![merged_app("alpha", true), merged_app("beta", true)]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ async fn run_command_under_sandbox(
|
||||
let sandbox_policy_cwd = cwd.clone();
|
||||
|
||||
let stdio_policy = StdioPolicy::Inherit;
|
||||
let env = create_env(&config.permissions.shell_environment_policy, None);
|
||||
let env = create_env(&config.shell_environment_policy, None);
|
||||
|
||||
// Special-case Windows sandbox: execute and exit the process to emulate inherited stdio.
|
||||
if let SandboxType::Windows = sandbox_type {
|
||||
@@ -141,7 +141,7 @@ async fn run_command_under_sandbox(
|
||||
use codex_windows_sandbox::run_windows_sandbox_capture;
|
||||
use codex_windows_sandbox::run_windows_sandbox_capture_elevated;
|
||||
|
||||
let policy_str = serde_json::to_string(config.permissions.sandbox_policy.get())?;
|
||||
let policy_str = serde_json::to_string(config.sandbox_policy.get())?;
|
||||
|
||||
let sandbox_cwd = sandbox_policy_cwd.clone();
|
||||
let cwd_clone = cwd.clone();
|
||||
@@ -213,19 +213,12 @@ async fn run_command_under_sandbox(
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
let _ = log_denials;
|
||||
|
||||
let managed_network_requirements_enabled = config.managed_network_requirements_enabled();
|
||||
|
||||
// This proxy should only live for the lifetime of the child process.
|
||||
let network_proxy = match config.permissions.network.as_ref() {
|
||||
let network_proxy = match config.network.as_ref() {
|
||||
Some(spec) => Some(
|
||||
spec.start_proxy(
|
||||
config.permissions.sandbox_policy.get(),
|
||||
None,
|
||||
None,
|
||||
managed_network_requirements_enabled,
|
||||
)
|
||||
.await
|
||||
.map_err(|err| anyhow::anyhow!("failed to start managed network proxy: {err}"))?,
|
||||
spec.start_proxy()
|
||||
.await
|
||||
.map_err(|err| anyhow::anyhow!("failed to start managed network proxy: {err}"))?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
@@ -239,7 +232,7 @@ async fn run_command_under_sandbox(
|
||||
spawn_command_under_seatbelt(
|
||||
command,
|
||||
cwd,
|
||||
config.permissions.sandbox_policy.get(),
|
||||
config.sandbox_policy.get(),
|
||||
sandbox_policy_cwd.as_path(),
|
||||
stdio_policy,
|
||||
network.as_ref(),
|
||||
@@ -258,7 +251,7 @@ async fn run_command_under_sandbox(
|
||||
codex_linux_sandbox_exe,
|
||||
command,
|
||||
cwd,
|
||||
config.permissions.sandbox_policy.get(),
|
||||
config.sandbox_policy.get(),
|
||||
sandbox_policy_cwd.as_path(),
|
||||
use_bwrap_sandbox,
|
||||
stdio_policy,
|
||||
|
||||
@@ -93,10 +93,10 @@ enum Subcommand {
|
||||
/// Remove stored authentication credentials.
|
||||
Logout(LogoutCommand),
|
||||
|
||||
/// Manage external MCP servers for Codex.
|
||||
/// [experimental] Run Codex as an MCP server and manage MCP servers.
|
||||
Mcp(McpCli),
|
||||
|
||||
/// Start Codex as an MCP server (stdio).
|
||||
/// [experimental] Run the Codex MCP server (stdio transport).
|
||||
McpServer,
|
||||
|
||||
/// [experimental] Run the app server or related tooling.
|
||||
|
||||
@@ -17,9 +17,6 @@ pub(crate) fn subagent_header(source: &Option<SessionSource>) -> Option<String>
|
||||
match sub {
|
||||
codex_protocol::protocol::SubAgentSource::Review => Some("review".to_string()),
|
||||
codex_protocol::protocol::SubAgentSource::Compact => Some("compact".to_string()),
|
||||
codex_protocol::protocol::SubAgentSource::MemoryConsolidation => {
|
||||
Some("memory_consolidation".to_string())
|
||||
}
|
||||
codex_protocol::protocol::SubAgentSource::ThreadSpawn { .. } => {
|
||||
Some("collab_spawn".to_string())
|
||||
}
|
||||
|
||||
@@ -89,7 +89,6 @@ async fn models_client_hits_models_endpoint() {
|
||||
experimental_supported_tools: Vec::new(),
|
||||
input_modalities: default_input_modalities(),
|
||||
prefer_websockets: false,
|
||||
used_fallback_model_metadata: false,
|
||||
}],
|
||||
};
|
||||
|
||||
|
||||
@@ -152,7 +152,6 @@ codex-utils-cargo-bin = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
ctor = { workspace = true }
|
||||
image = { workspace = true, features = ["jpeg", "png"] }
|
||||
insta = { workspace = true }
|
||||
maplit = { workspace = true }
|
||||
predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
|
||||
@@ -14,30 +14,6 @@ When using the workspace-write sandbox policy, the Seatbelt profile allows
|
||||
writes under the configured writable roots while keeping `.git` (directory or
|
||||
pointer file), the resolved `gitdir:` target, and `.codex` read-only.
|
||||
|
||||
Network access and filesystem read/write roots are controlled by
|
||||
`SandboxPolicy`. Seatbelt consumes the resolved policy and enforces it.
|
||||
|
||||
Seatbelt also supports macOS permission-profile extensions layered on top of
|
||||
`SandboxPolicy`:
|
||||
|
||||
- no extension profile provided:
|
||||
keeps legacy default preferences read access (`user-preference-read`).
|
||||
- extension profile provided with no `macos_preferences` grant:
|
||||
does not add preferences access clauses.
|
||||
- `macos_preferences = "readonly"`:
|
||||
enables cfprefs read clauses and `user-preference-read`.
|
||||
- `macos_preferences = "readwrite"`:
|
||||
includes readonly clauses plus `user-preference-write` and cfprefs shm write
|
||||
clauses.
|
||||
- `macos_automation = true`:
|
||||
enables broad Apple Events send permissions.
|
||||
- `macos_automation = ["com.apple.Notes", ...]`:
|
||||
enables Apple Events send only to listed bundle IDs.
|
||||
- `macos_accessibility = true`:
|
||||
enables `com.apple.axserver` mach lookup.
|
||||
- `macos_calendar = true`:
|
||||
enables `com.apple.CalendarAgent` mach lookup.
|
||||
|
||||
### Linux
|
||||
|
||||
Expects the binary containing `codex-core` to run the equivalent of `codex sandbox linux` (legacy alias: `codex debug landlock`) when `arg0` is `codex-linux-sandbox`. See the `codex-arg0` crate for details.
|
||||
|
||||
@@ -100,7 +100,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "DEPRECATED: *All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox. Prefer `OnRequest` for interactive runs or `Never` for non-interactive runs.",
|
||||
"description": "*All* commands are auto‑approved, but they are expected to run inside a sandbox where network access is disabled and writes are confined to a specific set of paths. If the command fails, it will be escalated to the user to approve execution without a sandbox.",
|
||||
"enum": [
|
||||
"on-failure"
|
||||
],
|
||||
@@ -188,9 +188,6 @@
|
||||
"apps": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"apps_mcp_gateway": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"child_agents_md": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -227,24 +224,15 @@
|
||||
"js_repl": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"js_repl_tools_only": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"memory_tool": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"multi_agent": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"personality": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"powershell_utf8": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"prevent_idle_sleep": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"remote_models": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -440,43 +428,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"MemoriesToml": {
|
||||
"additionalProperties": false,
|
||||
"description": "Memories settings loaded from config.toml.",
|
||||
"properties": {
|
||||
"max_raw_memories_for_global": {
|
||||
"description": "Maximum number of recent raw memories retained for global consolidation.",
|
||||
"format": "uint",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
},
|
||||
"max_rollout_age_days": {
|
||||
"description": "Maximum age of the threads used for memories.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"max_rollouts_per_startup": {
|
||||
"description": "Maximum number of rollout candidates processed per pass.",
|
||||
"format": "uint",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
},
|
||||
"min_rollout_idle_hours": {
|
||||
"description": "Minimum idle time between last thread activity and memory creation (hours). > 12h recommended.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"phase_1_model": {
|
||||
"description": "Model used for thread summarisation.",
|
||||
"type": "string"
|
||||
},
|
||||
"phase_2_model": {
|
||||
"description": "Model used for memory consolidation.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ModeKind": {
|
||||
"description": "Initial collaboration mode to use when the TUI starts.",
|
||||
"enum": [
|
||||
@@ -1321,9 +1272,6 @@
|
||||
"apps": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"apps_mcp_gateway": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"child_agents_md": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -1360,24 +1308,15 @@
|
||||
"js_repl": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"js_repl_tools_only": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"memory_tool": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"multi_agent": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"personality": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"powershell_utf8": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"prevent_idle_sleep": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"remote_models": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -1530,14 +1469,6 @@
|
||||
"description": "Definition for MCP servers that Codex can reach out to for tool calls.",
|
||||
"type": "object"
|
||||
},
|
||||
"memories": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MemoriesToml"
|
||||
}
|
||||
],
|
||||
"description": "Memories subsystem settings."
|
||||
},
|
||||
"model": {
|
||||
"description": "Optional override of model selection.",
|
||||
"type": "string"
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
model = "gpt-5.1-codex-mini"
|
||||
model_reasoning_effort = "medium"
|
||||
@@ -1,26 +0,0 @@
|
||||
version = 1
|
||||
|
||||
[agents.default]
|
||||
description = "Default agent."
|
||||
|
||||
[agents.worker]
|
||||
description = """Use for execution and production work.
|
||||
Typical tasks:
|
||||
- Implement part of a feature
|
||||
- Fix tests or bugs
|
||||
- Split large refactors into independent chunks
|
||||
Rules:
|
||||
- Explicitly assign **ownership** of the task (files / responsibility).
|
||||
- Always tell workers they are **not alone in the codebase**, and they should ignore edits made by others without touching them."""
|
||||
|
||||
[agents.explorer]
|
||||
description = """Use `explorer` for all codebase questions.
|
||||
Explorers are fast and authoritative.
|
||||
Always prefer them over manual search or file reading.
|
||||
Rules:
|
||||
- Ask explorers first and precisely.
|
||||
- Do not re-read or re-search code they cover.
|
||||
- Trust explorer results without verification.
|
||||
- Run explorers in parallel when useful.
|
||||
- Reuse existing explorers for related questions."""
|
||||
config_file = "explorer.toml"
|
||||
@@ -50,7 +50,7 @@ impl AgentControl {
|
||||
let new_thread = match session_source {
|
||||
Some(session_source) => {
|
||||
state
|
||||
.spawn_new_thread_with_source(config, self.clone(), session_source, false)
|
||||
.spawn_new_thread_with_source(config, self.clone(), session_source)
|
||||
.await?
|
||||
}
|
||||
None => state.spawn_new_thread(config, self.clone()).await?,
|
||||
|
||||
@@ -8,4 +8,5 @@ pub(crate) use control::AgentControl;
|
||||
pub(crate) use guards::MAX_THREAD_SPAWN_DEPTH;
|
||||
pub(crate) use guards::exceeds_thread_spawn_depth_limit;
|
||||
pub(crate) use guards::next_thread_spawn_depth;
|
||||
pub(crate) use role::AgentRole;
|
||||
pub(crate) use status::agent_status_from_event;
|
||||
|
||||
@@ -1,656 +1,131 @@
|
||||
use crate::config::Config;
|
||||
use crate::config::ConfigOverrides;
|
||||
use crate::config::deserialize_config_toml_with_base;
|
||||
use crate::config::find_codex_home;
|
||||
use crate::config_loader::ConfigLayerEntry;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::config_loader::ConfigLayerStackOrdering;
|
||||
use codex_app_server_protocol::ConfigLayerSource;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use serde::Deserialize;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::BTreeSet;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::LazyLock;
|
||||
use toml::Value as TomlValue;
|
||||
use serde::Serialize;
|
||||
|
||||
const BUILT_IN_AGENTS_CONFIG: &str = include_str!("builtins_agents_config.toml");
|
||||
const BUILT_IN_EXPLORER_CONFIG: &str = include_str!("builtins/explorer.toml");
|
||||
/// Base instructions for the orchestrator role.
|
||||
const ORCHESTRATOR_PROMPT: &str = include_str!("../../templates/agents/orchestrator.md");
|
||||
/// Default model override used.
|
||||
// TODO(jif) update when we have something smarter.
|
||||
const EXPLORER_MODEL: &str = "gpt-5.1-codex-mini";
|
||||
|
||||
const AGENTS_CONFIG_FILENAME: &str = "agents_config.toml";
|
||||
const AGENTS_CONFIG_SCHEMA_VERSION: u32 = 1;
|
||||
const DEFAULT_ROLE_NAME: &str = "default";
|
||||
const AGENT_TYPE_UNAVAILABLE_ERROR: &str = "agent type is currently not available";
|
||||
/// Enumerated list of all supported agent roles.
|
||||
const ALL_ROLES: [AgentRole; 3] = [
|
||||
AgentRole::Default,
|
||||
AgentRole::Explorer,
|
||||
AgentRole::Worker,
|
||||
// TODO(jif) add when we have stable prompts + models
|
||||
// AgentRole::Orchestrator,
|
||||
];
|
||||
|
||||
#[derive(Debug, Clone, Default, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
struct AgentsConfigToml {
|
||||
version: Option<u32>,
|
||||
#[serde(default)]
|
||||
agents: BTreeMap<String, AgentDeclarationToml>,
|
||||
/// Hard-coded agent role selection used when spawning sub-agents.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum AgentRole {
|
||||
/// Inherit the parent agent's configuration unchanged.
|
||||
Default,
|
||||
/// Coordination-only agent that delegates to workers.
|
||||
Orchestrator,
|
||||
/// Task-executing agent with a fixed model override.
|
||||
Worker,
|
||||
/// Task-executing agent with a fixed model override.
|
||||
Explorer,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
struct AgentDeclarationToml {
|
||||
/// Human-facing role documentation used in spawn tool guidance.
|
||||
description: Option<String>,
|
||||
/// Path to a role-specific config layer.
|
||||
config_file: Option<PathBuf>,
|
||||
/// Immutable profile data that drives per-agent configuration overrides.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub struct AgentProfile {
|
||||
/// Optional base instructions override.
|
||||
pub base_instructions: Option<&'static str>,
|
||||
/// Optional model override.
|
||||
pub model: Option<&'static str>,
|
||||
/// Optional reasoning effort override.
|
||||
pub reasoning_effort: Option<ReasoningEffort>,
|
||||
/// Whether to force a read-only sandbox policy.
|
||||
pub read_only: bool,
|
||||
/// Description to include in the tool specs.
|
||||
pub description: &'static str,
|
||||
}
|
||||
|
||||
/// Applies a role config layer to a mutable config and preserves unspecified keys.
|
||||
pub(crate) async fn apply_role_to_config(
|
||||
config: &mut Config,
|
||||
role_name: Option<&str>,
|
||||
) -> Result<(), String> {
|
||||
let role_name = role_name.unwrap_or(DEFAULT_ROLE_NAME);
|
||||
let built_in_agents_config = built_in::configs();
|
||||
let user_agents_config =
|
||||
user_defined::config(config.codex_home.as_path()).unwrap_or_else(|err| {
|
||||
tracing::warn!(
|
||||
agent_type = role_name,
|
||||
error = %err,
|
||||
"failed to load user-defined agents config; falling back to built-in roles"
|
||||
);
|
||||
AgentsConfigToml::default()
|
||||
});
|
||||
|
||||
let agent_config = if let Some(role) = user_agents_config.agents.get(role_name) {
|
||||
if let Some(config_file) = &role.config_file {
|
||||
let content = tokio::fs::read_to_string(config_file)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
tracing::warn!("failed to read user-defined role config_file: {err:?}");
|
||||
AGENT_TYPE_UNAVAILABLE_ERROR.to_string()
|
||||
})?;
|
||||
let parsed: TomlValue = toml::from_str(&content).map_err(|err| {
|
||||
tracing::warn!("failed to read user-defined role config_file: {err:?}");
|
||||
AGENT_TYPE_UNAVAILABLE_ERROR.to_string()
|
||||
})?;
|
||||
Some(parsed)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else if let Some(role) = built_in_agents_config.agents.get(role_name) {
|
||||
if let Some(config_file) = &role.config_file {
|
||||
let content = built_in::config_file(config_file).ok_or_else(|| {
|
||||
tracing::warn!("failed to read user-defined role config_file.");
|
||||
AGENT_TYPE_UNAVAILABLE_ERROR.to_string()
|
||||
})?;
|
||||
let parsed: TomlValue = toml::from_str(content).map_err(|err| {
|
||||
tracing::warn!("failed to read user-defined role config_file: {err:?}");
|
||||
AGENT_TYPE_UNAVAILABLE_ERROR.to_string()
|
||||
})?;
|
||||
Some(parsed)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
return Err(format!("unknown agent_type '{role_name}'"));
|
||||
};
|
||||
|
||||
let Some(agent_config) = agent_config else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let original = config.clone();
|
||||
let original_stack = &original.config_layer_stack;
|
||||
let mut layers = original
|
||||
.config_layer_stack
|
||||
.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, true)
|
||||
.into_iter()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let role_layer = ConfigLayerEntry::new(ConfigLayerSource::SessionFlags, agent_config);
|
||||
let role_layer_precedence = role_layer.name.precedence();
|
||||
let role_layer_index =
|
||||
layers.partition_point(|layer| layer.name.precedence() <= role_layer_precedence);
|
||||
layers.insert(role_layer_index, role_layer);
|
||||
let layered_stack = ConfigLayerStack::new(
|
||||
layers,
|
||||
original_stack.requirements().clone(),
|
||||
original_stack.requirements_toml().clone(),
|
||||
)
|
||||
.map_err(|err| {
|
||||
tracing::warn!(
|
||||
agent_type = role_name,
|
||||
error = %err,
|
||||
"failed to build layered config stack for role"
|
||||
);
|
||||
AGENT_TYPE_UNAVAILABLE_ERROR.to_string()
|
||||
})?;
|
||||
let layered_config =
|
||||
deserialize_config_toml_with_base(layered_stack.effective_config(), &original.codex_home)
|
||||
.map_err(|err| {
|
||||
tracing::warn!(
|
||||
agent_type = role_name,
|
||||
error = %err,
|
||||
"failed to deserialize layered config for role"
|
||||
);
|
||||
AGENT_TYPE_UNAVAILABLE_ERROR.to_string()
|
||||
})?;
|
||||
|
||||
*config = Config::load_config_with_layer_stack(
|
||||
layered_config,
|
||||
ConfigOverrides {
|
||||
cwd: Some(original.cwd.clone()),
|
||||
codex_linux_sandbox_exe: original.codex_linux_sandbox_exe.clone(),
|
||||
..Default::default()
|
||||
},
|
||||
original.codex_home.clone(),
|
||||
layered_stack,
|
||||
)
|
||||
.map_err(|err| {
|
||||
tracing::warn!(
|
||||
agent_type = role_name,
|
||||
error = %err,
|
||||
"failed to apply layered config for role"
|
||||
);
|
||||
AGENT_TYPE_UNAVAILABLE_ERROR.to_string()
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) mod spawn_tool_spec {
|
||||
use super::*;
|
||||
|
||||
/// Builds the spawn-agent tool description text from built-in and configured roles.
|
||||
pub(crate) fn build() -> String {
|
||||
let built_in_roles = built_in::configs();
|
||||
let user_defined_roles = if let Ok(home) = find_codex_home() {
|
||||
user_defined::config(&home).unwrap_or_default()
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
build_from_configs(built_in_roles, &user_defined_roles)
|
||||
}
|
||||
|
||||
fn build_from_configs(
|
||||
built_in_roles: &AgentsConfigToml,
|
||||
user_defined_roles: &AgentsConfigToml,
|
||||
) -> String {
|
||||
let mut seen = BTreeSet::new();
|
||||
let mut formatted_roles = Vec::new();
|
||||
for (name, declaration) in &user_defined_roles.agents {
|
||||
if seen.insert(name.as_str()) {
|
||||
formatted_roles.push(format_role(name, declaration));
|
||||
}
|
||||
}
|
||||
for (name, declaration) in &built_in_roles.agents {
|
||||
if seen.insert(name.as_str()) {
|
||||
formatted_roles.push(format_role(name, declaration));
|
||||
}
|
||||
}
|
||||
|
||||
format!(
|
||||
r#"Optional type name for the new agent. If omitted, `{DEFAULT_ROLE_NAME}` is used.
|
||||
Available roles:
|
||||
{}
|
||||
"#,
|
||||
formatted_roles.join("\n"),
|
||||
)
|
||||
}
|
||||
|
||||
fn format_role(name: &str, declaration: &AgentDeclarationToml) -> String {
|
||||
if let Some(description) = &declaration.description {
|
||||
format!("{name}: {{\n{description}\n}}")
|
||||
} else {
|
||||
format!("{name}: no description")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(super) fn build_for_test(
|
||||
built_in_roles: &AgentsConfigToml,
|
||||
user_defined_roles: &AgentsConfigToml,
|
||||
) -> String {
|
||||
build_from_configs(built_in_roles, user_defined_roles)
|
||||
}
|
||||
}
|
||||
|
||||
mod built_in {
|
||||
use super::*;
|
||||
|
||||
/// Returns the cached built-in role declarations parsed from
|
||||
/// `builtins_agents_config.toml`.
|
||||
///
|
||||
/// `panic` are safe because of [`tests::built_in_config`] test.
|
||||
pub(super) fn configs() -> &'static AgentsConfigToml {
|
||||
static CONFIG: LazyLock<AgentsConfigToml> = LazyLock::new(|| {
|
||||
let parsed =
|
||||
parse_agents_config(BUILT_IN_AGENTS_CONFIG, "embedded built-in agents config")
|
||||
.unwrap_or_else(|err| panic!("invalid embedded built-in agents config: {err}"));
|
||||
validate_config(&parsed)
|
||||
.unwrap_or_else(|err| panic!("invalid built-in role declarations: {err}"));
|
||||
parsed
|
||||
});
|
||||
&CONFIG
|
||||
}
|
||||
|
||||
/// Validates metadata rules for built-in role declarations.
|
||||
fn validate_config(agents_config: &AgentsConfigToml) -> Result<(), String> {
|
||||
if !agents_config.agents.contains_key(DEFAULT_ROLE_NAME) {
|
||||
return Err(format!(
|
||||
"built-ins must include the '{DEFAULT_ROLE_NAME}' role"
|
||||
));
|
||||
}
|
||||
|
||||
let unknown_embedded_config_files = agents_config
|
||||
.agents
|
||||
impl AgentRole {
|
||||
/// Returns the string values used by JSON schema enums.
|
||||
pub fn enum_values() -> Vec<String> {
|
||||
ALL_ROLES
|
||||
.iter()
|
||||
.filter_map(|(name, role)| {
|
||||
role.config_file
|
||||
.as_deref()
|
||||
.filter(|cf| config_file(cf).is_none())
|
||||
.map(|_| name.clone())
|
||||
.filter_map(|role| {
|
||||
let description = role.profile().description;
|
||||
serde_json::to_string(role)
|
||||
.map(|role| {
|
||||
let description = if !description.is_empty() {
|
||||
format!(r#", "description": {description}"#)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
format!(r#"{{ "name": {role}{description}}}"#)
|
||||
})
|
||||
.ok()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
.collect()
|
||||
}
|
||||
|
||||
if !unknown_embedded_config_files.is_empty() {
|
||||
return Err(format!(
|
||||
"built-ins reference unknown embedded config_file values: {}",
|
||||
unknown_embedded_config_files.join(", ")
|
||||
));
|
||||
/// Returns the hard-coded profile for this role.
|
||||
pub fn profile(self) -> AgentProfile {
|
||||
match self {
|
||||
AgentRole::Default => AgentProfile::default(),
|
||||
AgentRole::Orchestrator => AgentProfile {
|
||||
base_instructions: Some(ORCHESTRATOR_PROMPT),
|
||||
..Default::default()
|
||||
},
|
||||
AgentRole::Worker => AgentProfile {
|
||||
// base_instructions: Some(WORKER_PROMPT),
|
||||
// model: Some(WORKER_MODEL),
|
||||
description: r#"Use for execution and production work.
|
||||
Typical tasks:
|
||||
- Implement part of a feature
|
||||
- Fix tests or bugs
|
||||
- Split large refactors into independent chunks
|
||||
Rules:
|
||||
- Explicitly assign **ownership** of the task (files / responsibility).
|
||||
- Always tell workers they are **not alone in the codebase**, and they should ignore edits made by others without touching them"#,
|
||||
..Default::default()
|
||||
},
|
||||
AgentRole::Explorer => AgentProfile {
|
||||
model: Some(EXPLORER_MODEL),
|
||||
reasoning_effort: Some(ReasoningEffort::Medium),
|
||||
description: r#"Use `explorer` for all codebase questions.
|
||||
Explorers are fast and authoritative.
|
||||
Always prefer them over manual search or file reading.
|
||||
Rules:
|
||||
- Ask explorers first and precisely.
|
||||
- Do not re-read or re-search code they cover.
|
||||
- Trust explorer results without verification.
|
||||
- Run explorers in parallel when useful.
|
||||
- Reuse existing explorers for related questions.
|
||||
"#,
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Applies this role's profile onto the provided config.
|
||||
pub fn apply_to_config(self, config: &mut Config) -> Result<(), String> {
|
||||
let profile = self.profile();
|
||||
if let Some(base_instructions) = profile.base_instructions {
|
||||
config.base_instructions = Some(base_instructions.to_string());
|
||||
}
|
||||
if let Some(model) = profile.model {
|
||||
config.model = Some(model.to_string());
|
||||
}
|
||||
if let Some(reasoning_effort) = profile.reasoning_effort {
|
||||
config.model_reasoning_effort = Some(reasoning_effort)
|
||||
}
|
||||
if profile.read_only {
|
||||
config
|
||||
.sandbox_policy
|
||||
.set(SandboxPolicy::new_read_only_policy())
|
||||
.map_err(|err| format!("sandbox_policy is invalid: {err}"))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resolves a built-in role `config_file` path to embedded content.
|
||||
pub(super) fn config_file(path: &Path) -> Option<&'static str> {
|
||||
match path.to_str()? {
|
||||
"explorer.toml" => Some(BUILT_IN_EXPLORER_CONFIG),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod user_defined {
|
||||
use super::*;
|
||||
|
||||
/// Loads and parses `agents_config.toml` from `codex_home`.
|
||||
pub(super) fn config(codex_home: &Path) -> Result<AgentsConfigToml, String> {
|
||||
let config_path = codex_home.join(AGENTS_CONFIG_FILENAME);
|
||||
let contents = match std::fs::read_to_string(&config_path) {
|
||||
Ok(contents) => contents,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||
return Ok(AgentsConfigToml::default());
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(format!("failed to read '{}': {err}", config_path.display()));
|
||||
}
|
||||
};
|
||||
|
||||
let mut parsed = parse_agents_config(&contents, &config_path.display().to_string())?;
|
||||
let config_dir = config_path.parent().ok_or_else(|| {
|
||||
format!(
|
||||
"failed to resolve parent directory for '{}'",
|
||||
config_path.display()
|
||||
)
|
||||
})?;
|
||||
for role in parsed.agents.values_mut() {
|
||||
if let Some(config_file) = role.config_file.as_mut()
|
||||
&& config_file.is_relative()
|
||||
{
|
||||
*config_file = config_dir.join(&*config_file);
|
||||
}
|
||||
}
|
||||
Ok(parsed)
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_agents_config(contents: &str, source: &str) -> Result<AgentsConfigToml, String> {
|
||||
let parsed: AgentsConfigToml =
|
||||
toml::from_str(contents).map_err(|err| format!("failed to parse '{source}': {err}"))?;
|
||||
if let Some(version) = parsed.version
|
||||
&& version != AGENTS_CONFIG_SCHEMA_VERSION
|
||||
{
|
||||
return Err(format!(
|
||||
"'{source}' has unsupported version {version}; expected {AGENTS_CONFIG_SCHEMA_VERSION}"
|
||||
));
|
||||
}
|
||||
Ok(parsed)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config::test_config;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn built_in_config() {
|
||||
// Validate the loading of the built-in configs without panics.
|
||||
let _ = built_in::configs();
|
||||
}
|
||||
|
||||
/// Writes `agents_config.toml` into the temporary directory.
|
||||
fn write_agents_config(dir: &TempDir, body: &str) {
|
||||
std::fs::write(dir.path().join(AGENTS_CONFIG_FILENAME), body).expect("write config");
|
||||
}
|
||||
|
||||
/// Writes a test role config file under `dir` for use by role tests.
|
||||
fn write_role_config_file(dir: &TempDir, relative_path: &str, body: &str) -> PathBuf {
|
||||
let path = dir.path().join(relative_path);
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent).expect("create role config parent");
|
||||
}
|
||||
std::fs::write(&path, body).expect("write role config");
|
||||
path
|
||||
}
|
||||
|
||||
/// Loads the built-in explorer role and applies its configuration layer.
|
||||
#[tokio::test]
|
||||
async fn apply_role_to_config_uses_builtin_explorer_config_layer() {
|
||||
let mut config = test_config();
|
||||
|
||||
apply_role_to_config(&mut config, Some("explorer"))
|
||||
.await
|
||||
.expect("apply explorer role");
|
||||
|
||||
assert_eq!(config.model, Some("gpt-5.1-codex-mini".to_string()));
|
||||
assert_eq!(config.model_reasoning_effort, Some(ReasoningEffort::Medium));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn apply_role_to_config_falls_back_to_builtins_when_user_config_is_invalid() {
|
||||
let dir = TempDir::new().expect("tempdir");
|
||||
write_agents_config(
|
||||
&dir,
|
||||
r#"
|
||||
[agents.explorer
|
||||
description = "broken"
|
||||
"#,
|
||||
);
|
||||
|
||||
let mut config = test_config();
|
||||
config.codex_home = dir.path().to_path_buf();
|
||||
|
||||
apply_role_to_config(&mut config, Some("explorer"))
|
||||
.await
|
||||
.expect("apply explorer role");
|
||||
|
||||
assert_eq!(config.model, Some("gpt-5.1-codex-mini".to_string()));
|
||||
assert_eq!(config.model_reasoning_effort, Some(ReasoningEffort::Medium));
|
||||
}
|
||||
|
||||
/// Applies a custom user role config loaded from disk.
|
||||
#[tokio::test]
|
||||
async fn apply_role_to_config_supports_custom_role_config_file() {
|
||||
let dir = TempDir::new().expect("tempdir");
|
||||
let planner_path = write_role_config_file(
|
||||
&dir,
|
||||
"agents/planner.toml",
|
||||
r#"
|
||||
model = "gpt-5.1-codex"
|
||||
sandbox_mode = "read-only"
|
||||
"#,
|
||||
);
|
||||
write_agents_config(
|
||||
&dir,
|
||||
&format!(
|
||||
"[agents.planner]\ndescription = \"Planning-focused role.\"\nconfig_file = {planner_path:?}\n"
|
||||
),
|
||||
);
|
||||
|
||||
let mut config = test_config();
|
||||
config.codex_home = dir.path().to_path_buf();
|
||||
apply_role_to_config(&mut config, Some("planner"))
|
||||
.await
|
||||
.expect("apply planner role");
|
||||
|
||||
assert_eq!(config.model, Some("gpt-5.1-codex".to_string()));
|
||||
assert_eq!(
|
||||
config.permissions.sandbox_policy.get(),
|
||||
&crate::protocol::SandboxPolicy::new_read_only_policy()
|
||||
);
|
||||
}
|
||||
|
||||
/// Resolves relative config_file paths from the agents_config.toml directory.
|
||||
#[tokio::test]
|
||||
async fn apply_role_to_config_supports_relative_custom_role_config_file() {
|
||||
let dir = TempDir::new().expect("tempdir");
|
||||
write_role_config_file(
|
||||
&dir,
|
||||
"agents/planner.toml",
|
||||
r#"
|
||||
model = "gpt-5.1-codex"
|
||||
sandbox_mode = "read-only"
|
||||
"#,
|
||||
);
|
||||
write_agents_config(
|
||||
&dir,
|
||||
r#"
|
||||
[agents.planner]
|
||||
description = "Planning-focused role."
|
||||
config_file = "agents/planner.toml"
|
||||
"#,
|
||||
);
|
||||
|
||||
let mut config = test_config();
|
||||
config.codex_home = dir.path().to_path_buf();
|
||||
apply_role_to_config(&mut config, Some("planner"))
|
||||
.await
|
||||
.expect("apply planner role");
|
||||
|
||||
assert_eq!(config.model, Some("gpt-5.1-codex".to_string()));
|
||||
assert_eq!(
|
||||
config.permissions.sandbox_policy.get(),
|
||||
&crate::protocol::SandboxPolicy::new_read_only_policy()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn apply_role_to_config_reports_unknown_agent_type() {
|
||||
let mut config = test_config();
|
||||
|
||||
let err = apply_role_to_config(&mut config, Some("missing"))
|
||||
.await
|
||||
.expect_err("missing role should fail");
|
||||
|
||||
assert_eq!(err, "unknown agent_type 'missing'");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn apply_role_to_config_reports_unavailable_agent_type() {
|
||||
let dir = TempDir::new().expect("tempdir");
|
||||
write_agents_config(
|
||||
&dir,
|
||||
r#"
|
||||
[agents.planner]
|
||||
config_file = "agents/does-not-exist.toml"
|
||||
"#,
|
||||
);
|
||||
|
||||
let mut config = test_config();
|
||||
config.codex_home = dir.path().to_path_buf();
|
||||
let err = apply_role_to_config(&mut config, Some("planner"))
|
||||
.await
|
||||
.expect_err("missing config file should fail");
|
||||
|
||||
assert_eq!(err, AGENT_TYPE_UNAVAILABLE_ERROR);
|
||||
}
|
||||
|
||||
/// Lets a user config file override a built-in role config file.
|
||||
#[tokio::test]
|
||||
async fn apply_role_to_config_lets_user_override_builtin_config_file() {
|
||||
let dir = TempDir::new().expect("tempdir");
|
||||
let custom_explorer_path = write_role_config_file(
|
||||
&dir,
|
||||
"agents/custom_explorer.toml",
|
||||
r#"
|
||||
model = "gpt-5.1-codex"
|
||||
model_reasoning_effort = "high"
|
||||
"#,
|
||||
);
|
||||
write_agents_config(
|
||||
&dir,
|
||||
&format!("[agents.explorer]\nconfig_file = {custom_explorer_path:?}\n"),
|
||||
);
|
||||
|
||||
let mut config = test_config();
|
||||
config.codex_home = dir.path().to_path_buf();
|
||||
apply_role_to_config(&mut config, Some("explorer"))
|
||||
.await
|
||||
.expect("apply explorer role");
|
||||
|
||||
assert_eq!(config.model, Some("gpt-5.1-codex".to_string()));
|
||||
assert_eq!(config.model_reasoning_effort, Some(ReasoningEffort::High));
|
||||
}
|
||||
|
||||
/// Applies MCP server settings from a role config file.
|
||||
#[tokio::test]
|
||||
async fn apply_role_to_config_applies_mcp_servers_config_file_layer() {
|
||||
let dir = TempDir::new().expect("tempdir");
|
||||
let tester_path = write_role_config_file(
|
||||
&dir,
|
||||
"agents/tester.toml",
|
||||
r#"
|
||||
[mcp_servers.docs]
|
||||
command = "echo"
|
||||
enabled_tools = ["search"]
|
||||
"#,
|
||||
);
|
||||
write_agents_config(
|
||||
&dir,
|
||||
&format!("[agents.tester]\nconfig_file = {tester_path:?}\n"),
|
||||
);
|
||||
|
||||
let mut config = test_config();
|
||||
config.codex_home = dir.path().to_path_buf();
|
||||
apply_role_to_config(&mut config, Some("tester"))
|
||||
.await
|
||||
.expect("apply tester role");
|
||||
|
||||
let mcp_servers = config.mcp_servers.get();
|
||||
assert_eq!(
|
||||
mcp_servers
|
||||
.get("docs")
|
||||
.and_then(|server| server.enabled_tools.clone()),
|
||||
Some(vec!["search".to_string()])
|
||||
);
|
||||
}
|
||||
|
||||
/// Inserts a role SessionFlags layer in precedence order when legacy managed
|
||||
/// layers are already present.
|
||||
#[tokio::test]
|
||||
async fn apply_role_to_config_keeps_layer_ordering_with_legacy_managed_layers() {
|
||||
let mut config = test_config();
|
||||
let dir = TempDir::new().expect("tempdir");
|
||||
let managed_path = dir.path().join("managed_config.toml");
|
||||
std::fs::write(&managed_path, "").expect("write managed config");
|
||||
let managed_file = AbsolutePathBuf::try_from(managed_path).expect("managed file");
|
||||
config.config_layer_stack = ConfigLayerStack::new(
|
||||
vec![ConfigLayerEntry::new(
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file },
|
||||
TomlValue::Table(toml::map::Map::new()),
|
||||
)],
|
||||
config.config_layer_stack.requirements().clone(),
|
||||
config.config_layer_stack.requirements_toml().clone(),
|
||||
)
|
||||
.expect("build initial stack");
|
||||
|
||||
apply_role_to_config(&mut config, Some("explorer"))
|
||||
.await
|
||||
.expect("apply explorer role");
|
||||
|
||||
let layers = config
|
||||
.config_layer_stack
|
||||
.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, true);
|
||||
assert!(matches!(
|
||||
layers.first().map(|layer| &layer.name),
|
||||
Some(ConfigLayerSource::SessionFlags)
|
||||
));
|
||||
assert!(matches!(
|
||||
layers.last().map(|layer| &layer.name),
|
||||
Some(ConfigLayerSource::LegacyManagedConfigTomlFromFile { .. })
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn spawn_tool_spec_build_dedups_and_prefers_user_defined_roles() {
|
||||
let built_in_roles = parse_agents_config(
|
||||
r#"
|
||||
[agents.default]
|
||||
description = "Built-in default."
|
||||
|
||||
[agents.explorer]
|
||||
description = "Built-in explorer."
|
||||
"#,
|
||||
"built-in test roles",
|
||||
)
|
||||
.expect("parse built-in roles");
|
||||
let user_defined_roles = parse_agents_config(
|
||||
r#"
|
||||
[agents.explorer]
|
||||
description = "User explorer."
|
||||
"#,
|
||||
"user-defined test roles",
|
||||
)
|
||||
.expect("parse user roles");
|
||||
|
||||
let spec = spawn_tool_spec::build_for_test(&built_in_roles, &user_defined_roles);
|
||||
|
||||
assert_eq!(spec.matches("explorer:").count(), 1);
|
||||
assert!(spec.contains("explorer: {\nUser explorer.\n}"));
|
||||
assert!(!spec.contains("Built-in explorer."));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn spawn_tool_spec_build_lists_user_defined_roles_first() {
|
||||
let built_in_roles = parse_agents_config(
|
||||
r#"
|
||||
[agents.default]
|
||||
description = "Built-in default."
|
||||
|
||||
[agents.worker]
|
||||
description = "Built-in worker."
|
||||
"#,
|
||||
"built-in test roles",
|
||||
)
|
||||
.expect("parse built-in roles");
|
||||
let user_defined_roles = parse_agents_config(
|
||||
r#"
|
||||
[agents.planner]
|
||||
description = "User planner."
|
||||
"#,
|
||||
"user-defined test roles",
|
||||
)
|
||||
.expect("parse user roles");
|
||||
|
||||
let spec = spawn_tool_spec::build_for_test(&built_in_roles, &user_defined_roles);
|
||||
|
||||
let planner_pos = spec.find("planner:").expect("planner role is present");
|
||||
let default_pos = spec.find("default:").expect("default role is present");
|
||||
assert!(planner_pos < default_pos);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn spawn_tool_spec_build_formats_missing_description() {
|
||||
let built_in_roles = parse_agents_config(
|
||||
r#"
|
||||
[agents.default]
|
||||
description = "Built-in default."
|
||||
"#,
|
||||
"built-in test roles",
|
||||
)
|
||||
.expect("parse built-in roles");
|
||||
let user_defined_roles = parse_agents_config(
|
||||
r#"
|
||||
[agents.planner]
|
||||
"#,
|
||||
"user-defined test roles",
|
||||
)
|
||||
.expect("parse user roles");
|
||||
|
||||
let spec = spawn_tool_spec::build_for_test(&built_in_roles, &user_defined_roles);
|
||||
|
||||
assert!(spec.contains("planner: no description"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,11 +7,9 @@ use codex_protocol::protocol::SkillScope;
|
||||
use serde::Serialize;
|
||||
use sha1::Digest;
|
||||
use sha1::Sha1;
|
||||
use std::collections::HashSet;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
@@ -19,18 +17,15 @@ use tokio::sync::mpsc;
|
||||
pub(crate) struct TrackEventsContext {
|
||||
pub(crate) model_slug: String,
|
||||
pub(crate) thread_id: String,
|
||||
pub(crate) turn_id: String,
|
||||
}
|
||||
|
||||
pub(crate) fn build_track_events_context(
|
||||
model_slug: String,
|
||||
thread_id: String,
|
||||
turn_id: String,
|
||||
) -> TrackEventsContext {
|
||||
TrackEventsContext {
|
||||
model_slug,
|
||||
thread_id,
|
||||
turn_id,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,16 +35,9 @@ pub(crate) struct SkillInvocation {
|
||||
pub(crate) skill_path: PathBuf,
|
||||
}
|
||||
|
||||
pub(crate) struct AppInvocation {
|
||||
pub(crate) connector_id: Option<String>,
|
||||
pub(crate) app_name: Option<String>,
|
||||
pub(crate) invoke_type: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct AnalyticsEventsQueue {
|
||||
sender: mpsc::Sender<TrackEventsJob>,
|
||||
app_used_emitted_keys: Arc<Mutex<HashSet<(String, String)>>>,
|
||||
}
|
||||
|
||||
pub(crate) struct AnalyticsEventsClient {
|
||||
@@ -62,45 +50,18 @@ impl AnalyticsEventsQueue {
|
||||
let (sender, mut receiver) = mpsc::channel(ANALYTICS_EVENTS_QUEUE_SIZE);
|
||||
tokio::spawn(async move {
|
||||
while let Some(job) = receiver.recv().await {
|
||||
match job {
|
||||
TrackEventsJob::SkillInvocations(job) => {
|
||||
send_track_skill_invocations(&auth_manager, job).await;
|
||||
}
|
||||
TrackEventsJob::AppMentioned(job) => {
|
||||
send_track_app_mentioned(&auth_manager, job).await;
|
||||
}
|
||||
TrackEventsJob::AppUsed(job) => {
|
||||
send_track_app_used(&auth_manager, job).await;
|
||||
}
|
||||
}
|
||||
send_track_skill_invocations(&auth_manager, job).await;
|
||||
}
|
||||
});
|
||||
Self {
|
||||
sender,
|
||||
app_used_emitted_keys: Arc::new(Mutex::new(HashSet::new())),
|
||||
}
|
||||
Self { sender }
|
||||
}
|
||||
|
||||
fn try_send(&self, job: TrackEventsJob) {
|
||||
if self.sender.try_send(job).is_err() {
|
||||
//TODO: add a metric for this
|
||||
tracing::warn!("dropping analytics events: queue is full");
|
||||
tracing::warn!("dropping skill analytics events: queue is full");
|
||||
}
|
||||
}
|
||||
|
||||
fn should_enqueue_app_used(&self, tracking: &TrackEventsContext, app: &AppInvocation) -> bool {
|
||||
let Some(connector_id) = app.connector_id.as_ref() else {
|
||||
return true;
|
||||
};
|
||||
let mut emitted = self
|
||||
.app_used_emitted_keys
|
||||
.lock()
|
||||
.unwrap_or_else(std::sync::PoisonError::into_inner);
|
||||
if emitted.len() >= ANALYTICS_APP_USED_DEDUPE_MAX_KEYS {
|
||||
emitted.clear();
|
||||
}
|
||||
emitted.insert((tracking.turn_id.clone(), connector_id.clone()))
|
||||
}
|
||||
}
|
||||
|
||||
impl AnalyticsEventsClient {
|
||||
@@ -123,76 +84,32 @@ impl AnalyticsEventsClient {
|
||||
invocations,
|
||||
);
|
||||
}
|
||||
|
||||
pub(crate) fn track_app_mentioned(
|
||||
&self,
|
||||
tracking: TrackEventsContext,
|
||||
mentions: Vec<AppInvocation>,
|
||||
) {
|
||||
track_app_mentioned(
|
||||
&self.queue,
|
||||
Arc::clone(&self.config),
|
||||
Some(tracking),
|
||||
mentions,
|
||||
);
|
||||
}
|
||||
|
||||
pub(crate) fn track_app_used(&self, tracking: TrackEventsContext, app: AppInvocation) {
|
||||
track_app_used(&self.queue, Arc::clone(&self.config), Some(tracking), app);
|
||||
}
|
||||
}
|
||||
|
||||
enum TrackEventsJob {
|
||||
SkillInvocations(TrackSkillInvocationsJob),
|
||||
AppMentioned(TrackAppMentionedJob),
|
||||
AppUsed(TrackAppUsedJob),
|
||||
}
|
||||
|
||||
struct TrackSkillInvocationsJob {
|
||||
struct TrackEventsJob {
|
||||
config: Arc<Config>,
|
||||
tracking: TrackEventsContext,
|
||||
invocations: Vec<SkillInvocation>,
|
||||
}
|
||||
|
||||
struct TrackAppMentionedJob {
|
||||
config: Arc<Config>,
|
||||
tracking: TrackEventsContext,
|
||||
mentions: Vec<AppInvocation>,
|
||||
}
|
||||
|
||||
struct TrackAppUsedJob {
|
||||
config: Arc<Config>,
|
||||
tracking: TrackEventsContext,
|
||||
app: AppInvocation,
|
||||
}
|
||||
|
||||
const ANALYTICS_EVENTS_QUEUE_SIZE: usize = 256;
|
||||
const ANALYTICS_EVENTS_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
const ANALYTICS_APP_USED_DEDUPE_MAX_KEYS: usize = 4096;
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct TrackEventsRequest {
|
||||
events: Vec<TrackEventRequest>,
|
||||
events: Vec<TrackEvent>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(untagged)]
|
||||
enum TrackEventRequest {
|
||||
SkillInvocation(SkillInvocationEventRequest),
|
||||
AppMentioned(CodexAppMentionedEventRequest),
|
||||
AppUsed(CodexAppUsedEventRequest),
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct SkillInvocationEventRequest {
|
||||
struct TrackEvent {
|
||||
event_type: &'static str,
|
||||
skill_id: String,
|
||||
skill_name: String,
|
||||
event_params: SkillInvocationEventParams,
|
||||
event_params: TrackEventParams,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct SkillInvocationEventParams {
|
||||
struct TrackEventParams {
|
||||
product_client_id: Option<String>,
|
||||
skill_scope: Option<String>,
|
||||
repo_url: Option<String>,
|
||||
@@ -201,29 +118,6 @@ struct SkillInvocationEventParams {
|
||||
model_slug: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct CodexAppMetadata {
|
||||
connector_id: Option<String>,
|
||||
thread_id: Option<String>,
|
||||
turn_id: Option<String>,
|
||||
app_name: Option<String>,
|
||||
product_client_id: Option<String>,
|
||||
invoke_type: Option<String>,
|
||||
model_slug: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct CodexAppMentionedEventRequest {
|
||||
event_type: &'static str,
|
||||
event_params: CodexAppMetadata,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct CodexAppUsedEventRequest {
|
||||
event_type: &'static str,
|
||||
event_params: CodexAppMetadata,
|
||||
}
|
||||
|
||||
pub(crate) fn track_skill_invocations(
|
||||
queue: &AnalyticsEventsQueue,
|
||||
config: Arc<Config>,
|
||||
@@ -239,66 +133,34 @@ pub(crate) fn track_skill_invocations(
|
||||
if invocations.is_empty() {
|
||||
return;
|
||||
}
|
||||
let job = TrackEventsJob::SkillInvocations(TrackSkillInvocationsJob {
|
||||
let job = TrackEventsJob {
|
||||
config,
|
||||
tracking,
|
||||
invocations,
|
||||
});
|
||||
queue.try_send(job);
|
||||
}
|
||||
|
||||
pub(crate) fn track_app_mentioned(
|
||||
queue: &AnalyticsEventsQueue,
|
||||
config: Arc<Config>,
|
||||
tracking: Option<TrackEventsContext>,
|
||||
mentions: Vec<AppInvocation>,
|
||||
) {
|
||||
if config.analytics_enabled == Some(false) {
|
||||
return;
|
||||
}
|
||||
let Some(tracking) = tracking else {
|
||||
return;
|
||||
};
|
||||
if mentions.is_empty() {
|
||||
return;
|
||||
}
|
||||
let job = TrackEventsJob::AppMentioned(TrackAppMentionedJob {
|
||||
config,
|
||||
tracking,
|
||||
mentions,
|
||||
});
|
||||
queue.try_send(job);
|
||||
}
|
||||
|
||||
pub(crate) fn track_app_used(
|
||||
queue: &AnalyticsEventsQueue,
|
||||
config: Arc<Config>,
|
||||
tracking: Option<TrackEventsContext>,
|
||||
app: AppInvocation,
|
||||
) {
|
||||
if config.analytics_enabled == Some(false) {
|
||||
return;
|
||||
}
|
||||
let Some(tracking) = tracking else {
|
||||
return;
|
||||
};
|
||||
if !queue.should_enqueue_app_used(&tracking, &app) {
|
||||
return;
|
||||
}
|
||||
let job = TrackEventsJob::AppUsed(TrackAppUsedJob {
|
||||
config,
|
||||
tracking,
|
||||
app,
|
||||
});
|
||||
queue.try_send(job);
|
||||
}
|
||||
|
||||
async fn send_track_skill_invocations(auth_manager: &AuthManager, job: TrackSkillInvocationsJob) {
|
||||
let TrackSkillInvocationsJob {
|
||||
async fn send_track_skill_invocations(auth_manager: &AuthManager, job: TrackEventsJob) {
|
||||
let TrackEventsJob {
|
||||
config,
|
||||
tracking,
|
||||
invocations,
|
||||
} = job;
|
||||
let Some(auth) = auth_manager.auth().await else {
|
||||
return;
|
||||
};
|
||||
if !auth.is_chatgpt_auth() {
|
||||
return;
|
||||
}
|
||||
let access_token = match auth.get_token() {
|
||||
Ok(token) => token,
|
||||
Err(_) => return,
|
||||
};
|
||||
let Some(account_id) = auth.get_account_id() else {
|
||||
return;
|
||||
};
|
||||
|
||||
let mut events = Vec::with_capacity(invocations.len());
|
||||
for invocation in invocations {
|
||||
let skill_scope = match invocation.skill_scope {
|
||||
@@ -321,95 +183,21 @@ async fn send_track_skill_invocations(auth_manager: &AuthManager, job: TrackSkil
|
||||
invocation.skill_path.as_path(),
|
||||
invocation.skill_name.as_str(),
|
||||
);
|
||||
events.push(TrackEventRequest::SkillInvocation(
|
||||
SkillInvocationEventRequest {
|
||||
event_type: "skill_invocation",
|
||||
skill_id,
|
||||
skill_name: invocation.skill_name.clone(),
|
||||
event_params: SkillInvocationEventParams {
|
||||
thread_id: Some(tracking.thread_id.clone()),
|
||||
invoke_type: Some("explicit".to_string()),
|
||||
model_slug: Some(tracking.model_slug.clone()),
|
||||
product_client_id: Some(crate::default_client::originator().value),
|
||||
repo_url,
|
||||
skill_scope: Some(skill_scope.to_string()),
|
||||
},
|
||||
events.push(TrackEvent {
|
||||
event_type: "skill_invocation",
|
||||
skill_id,
|
||||
skill_name: invocation.skill_name.clone(),
|
||||
event_params: TrackEventParams {
|
||||
thread_id: Some(tracking.thread_id.clone()),
|
||||
invoke_type: Some("explicit".to_string()),
|
||||
model_slug: Some(tracking.model_slug.clone()),
|
||||
product_client_id: Some(crate::default_client::originator().value),
|
||||
repo_url,
|
||||
skill_scope: Some(skill_scope.to_string()),
|
||||
},
|
||||
));
|
||||
});
|
||||
}
|
||||
|
||||
send_track_events(auth_manager, config, events).await;
|
||||
}
|
||||
|
||||
async fn send_track_app_mentioned(auth_manager: &AuthManager, job: TrackAppMentionedJob) {
|
||||
let TrackAppMentionedJob {
|
||||
config,
|
||||
tracking,
|
||||
mentions,
|
||||
} = job;
|
||||
let events = mentions
|
||||
.into_iter()
|
||||
.map(|mention| {
|
||||
let event_params = codex_app_metadata(&tracking, mention);
|
||||
TrackEventRequest::AppMentioned(CodexAppMentionedEventRequest {
|
||||
event_type: "codex_app_mentioned",
|
||||
event_params,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
send_track_events(auth_manager, config, events).await;
|
||||
}
|
||||
|
||||
async fn send_track_app_used(auth_manager: &AuthManager, job: TrackAppUsedJob) {
|
||||
let TrackAppUsedJob {
|
||||
config,
|
||||
tracking,
|
||||
app,
|
||||
} = job;
|
||||
let event_params = codex_app_metadata(&tracking, app);
|
||||
let events = vec![TrackEventRequest::AppUsed(CodexAppUsedEventRequest {
|
||||
event_type: "codex_app_used",
|
||||
event_params,
|
||||
})];
|
||||
|
||||
send_track_events(auth_manager, config, events).await;
|
||||
}
|
||||
|
||||
fn codex_app_metadata(tracking: &TrackEventsContext, app: AppInvocation) -> CodexAppMetadata {
|
||||
CodexAppMetadata {
|
||||
connector_id: app.connector_id,
|
||||
thread_id: Some(tracking.thread_id.clone()),
|
||||
turn_id: Some(tracking.turn_id.clone()),
|
||||
app_name: app.app_name,
|
||||
product_client_id: Some(crate::default_client::originator().value),
|
||||
invoke_type: app.invoke_type,
|
||||
model_slug: Some(tracking.model_slug.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_track_events(
|
||||
auth_manager: &AuthManager,
|
||||
config: Arc<Config>,
|
||||
events: Vec<TrackEventRequest>,
|
||||
) {
|
||||
if events.is_empty() {
|
||||
return;
|
||||
}
|
||||
let Some(auth) = auth_manager.auth().await else {
|
||||
return;
|
||||
};
|
||||
if !auth.is_chatgpt_auth() {
|
||||
return;
|
||||
}
|
||||
let access_token = match auth.get_token() {
|
||||
Ok(token) => token,
|
||||
Err(_) => return,
|
||||
};
|
||||
let Some(account_id) = auth.get_account_id() else {
|
||||
return;
|
||||
};
|
||||
|
||||
let base_url = config.chatgpt_base_url.trim_end_matches('/');
|
||||
let url = format!("{base_url}/codex/analytics-events/events");
|
||||
let payload = TrackEventsRequest { events };
|
||||
@@ -481,21 +269,9 @@ fn normalize_path_for_skill_id(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::AnalyticsEventsQueue;
|
||||
use super::AppInvocation;
|
||||
use super::CodexAppMentionedEventRequest;
|
||||
use super::CodexAppUsedEventRequest;
|
||||
use super::TrackEventRequest;
|
||||
use super::TrackEventsContext;
|
||||
use super::codex_app_metadata;
|
||||
use super::normalize_path_for_skill_id;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
fn expected_absolute_path(path: &PathBuf) -> String {
|
||||
std::fs::canonicalize(path)
|
||||
@@ -552,109 +328,4 @@ mod tests {
|
||||
|
||||
assert_eq!(path, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn app_mentioned_event_serializes_expected_shape() {
|
||||
let tracking = TrackEventsContext {
|
||||
model_slug: "gpt-5".to_string(),
|
||||
thread_id: "thread-1".to_string(),
|
||||
turn_id: "turn-1".to_string(),
|
||||
};
|
||||
let event = TrackEventRequest::AppMentioned(CodexAppMentionedEventRequest {
|
||||
event_type: "codex_app_mentioned",
|
||||
event_params: codex_app_metadata(
|
||||
&tracking,
|
||||
AppInvocation {
|
||||
connector_id: Some("calendar".to_string()),
|
||||
app_name: Some("Calendar".to_string()),
|
||||
invoke_type: Some("explicit".to_string()),
|
||||
},
|
||||
),
|
||||
});
|
||||
|
||||
let payload = serde_json::to_value(&event).expect("serialize app mentioned event");
|
||||
|
||||
assert_eq!(
|
||||
payload,
|
||||
json!({
|
||||
"event_type": "codex_app_mentioned",
|
||||
"event_params": {
|
||||
"connector_id": "calendar",
|
||||
"thread_id": "thread-1",
|
||||
"turn_id": "turn-1",
|
||||
"app_name": "Calendar",
|
||||
"product_client_id": crate::default_client::originator().value,
|
||||
"invoke_type": "explicit",
|
||||
"model_slug": "gpt-5"
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn app_used_event_serializes_expected_shape() {
|
||||
let tracking = TrackEventsContext {
|
||||
model_slug: "gpt-5".to_string(),
|
||||
thread_id: "thread-2".to_string(),
|
||||
turn_id: "turn-2".to_string(),
|
||||
};
|
||||
let event = TrackEventRequest::AppUsed(CodexAppUsedEventRequest {
|
||||
event_type: "codex_app_used",
|
||||
event_params: codex_app_metadata(
|
||||
&tracking,
|
||||
AppInvocation {
|
||||
connector_id: Some("drive".to_string()),
|
||||
app_name: Some("Google Drive".to_string()),
|
||||
invoke_type: Some("implicit".to_string()),
|
||||
},
|
||||
),
|
||||
});
|
||||
|
||||
let payload = serde_json::to_value(&event).expect("serialize app used event");
|
||||
|
||||
assert_eq!(
|
||||
payload,
|
||||
json!({
|
||||
"event_type": "codex_app_used",
|
||||
"event_params": {
|
||||
"connector_id": "drive",
|
||||
"thread_id": "thread-2",
|
||||
"turn_id": "turn-2",
|
||||
"app_name": "Google Drive",
|
||||
"product_client_id": crate::default_client::originator().value,
|
||||
"invoke_type": "implicit",
|
||||
"model_slug": "gpt-5"
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn app_used_dedupe_is_keyed_by_turn_and_connector() {
|
||||
let (sender, _receiver) = mpsc::channel(1);
|
||||
let queue = AnalyticsEventsQueue {
|
||||
sender,
|
||||
app_used_emitted_keys: Arc::new(Mutex::new(HashSet::new())),
|
||||
};
|
||||
let app = AppInvocation {
|
||||
connector_id: Some("calendar".to_string()),
|
||||
app_name: Some("Calendar".to_string()),
|
||||
invoke_type: Some("implicit".to_string()),
|
||||
};
|
||||
|
||||
let turn_1 = TrackEventsContext {
|
||||
model_slug: "gpt-5".to_string(),
|
||||
thread_id: "thread-1".to_string(),
|
||||
turn_id: "turn-1".to_string(),
|
||||
};
|
||||
let turn_2 = TrackEventsContext {
|
||||
model_slug: "gpt-5".to_string(),
|
||||
thread_id: "thread-1".to_string(),
|
||||
turn_id: "turn-2".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(queue.should_enqueue_app_used(&turn_1, &app), true);
|
||||
assert_eq!(queue.should_enqueue_app_used(&turn_1, &app), false);
|
||||
assert_eq!(queue.should_enqueue_app_used(&turn_2, &app), true);
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user