Compare commits

..

2 Commits

Author SHA1 Message Date
kevin zhao
34809c9880 timezones 2025-10-30 18:17:52 -04:00
kevin zhao
325e35388c adding system date to env context 2025-10-30 18:00:32 -04:00
193 changed files with 4595 additions and 13664 deletions

View File

@@ -46,7 +46,7 @@ jobs:
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"
- name: Upload staged npm package artifact
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: codex-npm-staging
path: ${{ steps.stage_npm_package.outputs.pack_output }}

View File

@@ -16,7 +16,7 @@ jobs:
outputs:
codex_output: ${{ steps.codex.outputs.final-message }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Prepare Codex inputs
env:
@@ -87,7 +87,7 @@ jobs:
issues: write
steps:
- name: Comment on issue
uses: actions/github-script@v8
uses: actions/github-script@v7
env:
CODEX_OUTPUT: ${{ needs.gather-duplicates.outputs.codex_output }}
with:

View File

@@ -16,7 +16,7 @@ jobs:
outputs:
codex_output: ${{ steps.codex.outputs.final-message }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- id: codex
uses: openai/codex-action@main

View File

@@ -350,7 +350,7 @@ jobs:
fi
fi
- uses: actions/upload-artifact@v5
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.target }}
# Upload the per-binary .zst files as well as the new .tar.gz

View File

@@ -33,7 +33,7 @@ Then simply run `codex` to get started:
codex
```
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-upgrade-codex-isnt-upgrading-me).
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-update-codex-isnt-upgrading-me).
<details>
<summary>You can also go to the <a href="https://github.com/openai/codex/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
@@ -75,13 +75,11 @@ Codex CLI supports a rich set of configuration options, with preferences stored
- [**Getting started**](./docs/getting-started.md)
- [CLI usage](./docs/getting-started.md#cli-usage)
- [Slash Commands](./docs/slash_commands.md)
- [Running with a prompt as input](./docs/getting-started.md#running-with-a-prompt-as-input)
- [Example prompts](./docs/getting-started.md#example-prompts)
- [Custom prompts](./docs/prompts.md)
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
- [**Configuration**](./docs/config.md)
- [Example config](./docs/example-config.md)
- [Configuration](./docs/config.md)
- [**Sandbox & approvals**](./docs/sandbox.md)
- [**Authentication**](./docs/authentication.md)
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)

View File

@@ -1,5 +0,0 @@
[target.'cfg(all(windows, target_env = "msvc"))']
rustflags = ["-C", "link-arg=/STACK:8388608"]
[target.'cfg(all(windows, target_env = "gnu"))']
rustflags = ["-C", "link-arg=-Wl,--stack,8388608"]

118
codex-rs/Cargo.lock generated
View File

@@ -172,9 +172,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.100"
version = "1.0.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61"
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
[[package]]
name = "app_test_support"
@@ -186,11 +186,9 @@ dependencies = [
"chrono",
"codex-app-server-protocol",
"codex-core",
"codex-protocol",
"serde",
"serde_json",
"tokio",
"uuid",
"wiremock",
]
@@ -873,7 +871,6 @@ dependencies = [
"anyhow",
"clap",
"codex-protocol",
"mcp-types",
"paste",
"pretty_assertions",
"schemars 0.8.22",
@@ -894,7 +891,7 @@ dependencies = [
"pretty_assertions",
"similar",
"tempfile",
"thiserror 2.0.17",
"thiserror 2.0.16",
"tree-sitter",
"tree-sitter-bash",
]
@@ -986,7 +983,6 @@ dependencies = [
"codex-rmcp-client",
"codex-stdio-to-uds",
"codex-tui",
"codex-windows-sandbox",
"ctor 0.5.0",
"owo-colors",
"predicates",
@@ -995,7 +991,6 @@ dependencies = [
"supports-color",
"tempfile",
"tokio",
"toml",
]
[[package]]
@@ -1036,7 +1031,7 @@ dependencies = [
"diffy",
"serde",
"serde_json",
"thiserror 2.0.17",
"thiserror 2.0.16",
]
[[package]]
@@ -1077,7 +1072,6 @@ dependencies = [
"codex-utils-readiness",
"codex-utils-string",
"codex-utils-tokenizer",
"codex-windows-sandbox",
"core-foundation 0.9.4",
"core_test_support",
"dirs",
@@ -1088,7 +1082,7 @@ dependencies = [
"futures",
"http",
"image",
"indexmap 2.12.0",
"indexmap 2.10.0",
"keyring",
"landlock",
"libc",
@@ -1112,7 +1106,7 @@ dependencies = [
"strum_macros 0.27.2",
"tempfile",
"test-log",
"thiserror 2.0.17",
"thiserror 2.0.16",
"time",
"tokio",
"tokio-test",
@@ -1218,7 +1212,7 @@ dependencies = [
"schemars 0.8.22",
"serde",
"tempfile",
"thiserror 2.0.17",
"thiserror 2.0.16",
"ts-rs",
"walkdir",
]
@@ -1473,7 +1467,6 @@ dependencies = [
"regex-lite",
"serde",
"serde_json",
"serial_test",
"shlex",
"strum 0.27.2",
"strum_macros 0.27.2",
@@ -1511,7 +1504,7 @@ dependencies = [
"codex-utils-cache",
"image",
"tempfile",
"thiserror 2.0.17",
"thiserror 2.0.16",
"tokio",
]
@@ -1539,7 +1532,7 @@ version = "0.0.0"
dependencies = [
"assert_matches",
"async-trait",
"thiserror 2.0.17",
"thiserror 2.0.16",
"time",
"tokio",
]
@@ -1554,22 +1547,10 @@ version = "0.0.0"
dependencies = [
"anyhow",
"pretty_assertions",
"thiserror 2.0.17",
"thiserror 2.0.16",
"tiktoken-rs",
]
[[package]]
name = "codex-windows-sandbox"
version = "0.1.0"
dependencies = [
"anyhow",
"dirs-next",
"rand 0.8.5",
"serde",
"serde_json",
"windows-sys 0.52.0",
]
[[package]]
name = "color-eyre"
version = "0.6.5"
@@ -1755,7 +1736,8 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "crossterm"
version = "0.28.1"
source = "git+https://github.com/nornagon/crossterm?branch=nornagon%2Fcolor-query#87db8bfa6dc99427fd3b071681b07fc31c6ce995"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6"
dependencies = [
"bitflags 2.10.0",
"crossterm_winapi",
@@ -2726,7 +2708,7 @@ dependencies = [
"futures-core",
"futures-sink",
"http",
"indexmap 2.12.0",
"indexmap 2.10.0",
"slab",
"tokio",
"tokio-util",
@@ -2770,12 +2752,6 @@ dependencies = [
"foldhash",
]
[[package]]
name = "hashbrown"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d"
[[package]]
name = "heck"
version = "0.5.0"
@@ -3211,14 +3187,13 @@ dependencies = [
[[package]]
name = "indexmap"
version = "2.12.0"
version = "2.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f"
checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661"
dependencies = [
"equivalent",
"hashbrown 0.16.0",
"hashbrown 0.15.4",
"serde",
"serde_core",
]
[[package]]
@@ -3506,7 +3481,7 @@ checksum = "b3d2ef408b88e913bfc6594f5e693d57676f6463ded7d8bf994175364320c706"
dependencies = [
"enumflags2",
"libc",
"thiserror 2.0.17",
"thiserror 2.0.16",
]
[[package]]
@@ -4186,7 +4161,7 @@ dependencies = [
"futures-sink",
"js-sys",
"pin-project-lite",
"thiserror 2.0.17",
"thiserror 2.0.16",
"tracing",
]
@@ -4229,7 +4204,7 @@ dependencies = [
"prost",
"reqwest",
"serde_json",
"thiserror 2.0.17",
"thiserror 2.0.16",
"tokio",
"tonic",
"tracing",
@@ -4269,7 +4244,7 @@ dependencies = [
"percent-encoding",
"rand 0.9.2",
"serde_json",
"thiserror 2.0.17",
"thiserror 2.0.16",
"tokio",
"tokio-stream",
]
@@ -4380,7 +4355,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
dependencies = [
"fixedbitset",
"indexmap 2.12.0",
"indexmap 2.10.0",
]
[[package]]
@@ -4448,7 +4423,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3af6b589e163c5a788fab00ce0c0366f6efbb9959c2f9874b224936af7fce7e1"
dependencies = [
"base64",
"indexmap 2.12.0",
"indexmap 2.10.0",
"quick-xml",
"serde",
"time",
@@ -4614,7 +4589,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3ef4f2f0422f23a82ec9f628ea2acd12871c81a9362b02c43c1aa86acfc3ba1"
dependencies = [
"futures",
"indexmap 2.12.0",
"indexmap 2.10.0",
"nix 0.30.1",
"tokio",
"tracing",
@@ -4701,7 +4676,7 @@ dependencies = [
"rustc-hash 2.1.1",
"rustls",
"socket2 0.6.0",
"thiserror 2.0.17",
"thiserror 2.0.16",
"tokio",
"tracing",
"web-time",
@@ -4722,7 +4697,7 @@ dependencies = [
"rustls",
"rustls-pki-types",
"slab",
"thiserror 2.0.17",
"thiserror 2.0.16",
"tinyvec",
"tracing",
"web-time",
@@ -4883,7 +4858,7 @@ checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b"
dependencies = [
"getrandom 0.2.16",
"libredox",
"thiserror 2.0.17",
"thiserror 2.0.16",
]
[[package]]
@@ -5012,9 +4987,9 @@ dependencies = [
[[package]]
name = "rmcp"
version = "0.8.5"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5947688160b56fb6c827e3c20a72c90392a1d7e9dec74749197aa1780ac42ca"
checksum = "1fdad1258f7259fdc0f2dfc266939c82c3b5d1fd72bcde274d600cdc27e60243"
dependencies = [
"base64",
"bytes",
@@ -5034,7 +5009,7 @@ dependencies = [
"serde",
"serde_json",
"sse-stream",
"thiserror 2.0.17",
"thiserror 2.0.16",
"tokio",
"tokio-stream",
"tokio-util",
@@ -5046,9 +5021,9 @@ dependencies = [
[[package]]
name = "rmcp-macros"
version = "0.8.5"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01263441d3f8635c628e33856c468b96ebbce1af2d3699ea712ca71432d4ee7a"
checksum = "ede0589a208cc7ce81d1be68aa7e74b917fcd03c81528408bab0457e187dcd9b"
dependencies = [
"darling 0.21.3",
"proc-macro2",
@@ -5559,7 +5534,7 @@ version = "1.0.145"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
dependencies = [
"indexmap 2.12.0",
"indexmap 2.10.0",
"itoa",
"memchr",
"ryu",
@@ -5620,7 +5595,7 @@ dependencies = [
"chrono",
"hex",
"indexmap 1.9.3",
"indexmap 2.12.0",
"indexmap 2.10.0",
"schemars 0.9.0",
"schemars 1.0.4",
"serde",
@@ -5689,12 +5664,6 @@ dependencies = [
"digest",
]
[[package]]
name = "sha1_smol"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d"
[[package]]
name = "sha2"
version = "0.10.9"
@@ -6203,11 +6172,11 @@ dependencies = [
[[package]]
name = "thiserror"
version = "2.0.17"
version = "2.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8"
checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0"
dependencies = [
"thiserror-impl 2.0.17",
"thiserror-impl 2.0.16",
]
[[package]]
@@ -6223,9 +6192,9 @@ dependencies = [
[[package]]
name = "thiserror-impl"
version = "2.0.17"
version = "2.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913"
checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960"
dependencies = [
"proc-macro2",
"quote",
@@ -6444,7 +6413,7 @@ version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75129e1dc5000bfbaa9fee9d1b21f974f9fbad9daec557a521ee6e080825f6e8"
dependencies = [
"indexmap 2.12.0",
"indexmap 2.10.0",
"serde",
"serde_spanned",
"toml_datetime",
@@ -6468,7 +6437,7 @@ version = "0.23.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7211ff1b8f0d3adae1663b7da9ffe396eabe1ca25f0b0bee42b0da29a9ddce93"
dependencies = [
"indexmap 2.12.0",
"indexmap 2.10.0",
"toml_datetime",
"toml_parser",
"toml_writer",
@@ -6527,7 +6496,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9"
dependencies = [
"futures-core",
"futures-util",
"indexmap 2.12.0",
"indexmap 2.10.0",
"pin-project-lite",
"slab",
"sync_wrapper",
@@ -6705,7 +6674,7 @@ checksum = "adc5f880ad8d8f94e88cb81c3557024cf1a8b75e3b504c50481ed4f5a6006ff3"
dependencies = [
"regex",
"streaming-iterator",
"thiserror 2.0.17",
"thiserror 2.0.16",
"tree-sitter",
]
@@ -6728,7 +6697,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ef1b7a6d914a34127ed8e1fa927eb7088903787bcded4fa3eef8f85ee1568be"
dependencies = [
"serde_json",
"thiserror 2.0.17",
"thiserror 2.0.16",
"ts-rs-macros",
"uuid",
]
@@ -6882,7 +6851,6 @@ dependencies = [
"getrandom 0.3.3",
"js-sys",
"serde",
"sha1_smol",
"wasm-bindgen",
]

View File

@@ -87,7 +87,6 @@ codex-utils-pty = { path = "utils/pty" }
codex-utils-readiness = { path = "utils/readiness" }
codex-utils-string = { path = "utils/string" }
codex-utils-tokenizer = { path = "utils/tokenizer" }
codex-windows-sandbox = { path = "windows-sandbox" }
core_test_support = { path = "core/tests/common" }
mcp-types = { path = "mcp-types" }
mcp_test_support = { path = "mcp-server/tests/common" }
@@ -128,7 +127,7 @@ icu_provider = { version = "2.1", features = ["sync"] }
icu_locale_core = "2.1"
ignore = "0.4.23"
image = { version = "^0.25.8", default-features = false }
indexmap = "2.12.0"
indexmap = "2.6.0"
insta = "1.43.2"
itertools = "0.14.0"
keyring = "3.6"
@@ -162,7 +161,7 @@ ratatui = "0.29.0"
ratatui-macros = "0.6.0"
regex-lite = "0.1.7"
reqwest = "0.12"
rmcp = { version = "0.8.5", default-features = false }
rmcp = { version = "0.8.3", default-features = false }
schemars = "0.8.22"
seccompiler = "0.5.0"
sentry = "0.34.0"
@@ -182,7 +181,7 @@ sys-locale = "0.3.2"
tempfile = "3.23.0"
test-log = "0.2.18"
textwrap = "0.16.2"
thiserror = "2.0.17"
thiserror = "2.0.16"
time = "0.3"
tiny_http = "0.12"
tokio = "1"
@@ -211,7 +210,6 @@ walkdir = "2.5.0"
webbrowser = "1.0"
which = "6"
wildmatch = "2.5.0"
wiremock = "0.6"
zeroize = "1.8.1"
@@ -256,12 +254,7 @@ unwrap_used = "deny"
# cargo-shear cannot see the platform-specific openssl-sys usage, so we
# silence the false positive here instead of deleting a real dependency.
[workspace.metadata.cargo-shear]
ignored = [
"icu_provider",
"openssl-sys",
"codex-utils-readiness",
"codex-utils-tokenizer",
]
ignored = ["icu_provider", "openssl-sys", "codex-utils-readiness", "codex-utils-tokenizer"]
[profile.release]
lto = "fat"
@@ -281,7 +274,6 @@ opt-level = 0
# Uncomment to debug local changes.
# ratatui = { path = "../../ratatui" }
ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" }
crossterm = { git = "https://github.com/nornagon/crossterm", branch = "nornagon/color-query" }
# Uncomment to debug local changes.
# rmcp = { path = "../../rust-sdk/crates/rmcp" }

View File

@@ -63,9 +63,6 @@ codex sandbox macos [--full-auto] [COMMAND]...
# Linux
codex sandbox linux [--full-auto] [COMMAND]...
# Windows
codex sandbox windows [--full-auto] [COMMAND]...
# Legacy aliases
codex debug seatbelt [--full-auto] [COMMAND]...
codex debug landlock [--full-auto] [COMMAND]...

View File

@@ -14,7 +14,6 @@ workspace = true
anyhow = { workspace = true }
clap = { workspace = true, features = ["derive"] }
codex-protocol = { workspace = true }
mcp-types = { workspace = true }
paste = { workspace = true }
schemars = { workspace = true }
serde = { workspace = true, features = ["derive"] }

View File

@@ -2,27 +2,20 @@ use crate::ClientNotification;
use crate::ClientRequest;
use crate::ServerNotification;
use crate::ServerRequest;
use crate::export_client_notification_schemas;
use crate::export_client_param_schemas;
use crate::export_client_response_schemas;
use crate::export_client_responses;
use crate::export_server_notification_schemas;
use crate::export_server_param_schemas;
use crate::export_server_response_schemas;
use crate::export_server_responses;
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::FileChange;
use codex_protocol::protocol::SandboxPolicy;
use schemars::JsonSchema;
use schemars::schema::RootSchema;
use schemars::schema_for;
use serde::Serialize;
use serde_json::Map;
use serde_json::Value;
use std::collections::HashMap;
use std::collections::BTreeMap;
use std::collections::HashSet;
use std::ffi::OsStr;
use std::fs;
@@ -35,29 +28,83 @@ use ts_rs::TS;
const HEADER: &str = "// GENERATED CODE! DO NOT MODIFY BY HAND!\n\n";
#[derive(Clone)]
pub struct GeneratedSchema {
namespace: Option<String>,
logical_name: String,
value: Value,
in_v1_dir: bool,
macro_rules! for_each_schema_type {
($macro:ident) => {
$macro!(crate::RequestId);
$macro!(crate::JSONRPCMessage);
$macro!(crate::JSONRPCRequest);
$macro!(crate::JSONRPCNotification);
$macro!(crate::JSONRPCResponse);
$macro!(crate::JSONRPCError);
$macro!(crate::JSONRPCErrorError);
$macro!(crate::AddConversationListenerParams);
$macro!(crate::AddConversationSubscriptionResponse);
$macro!(crate::ApplyPatchApprovalParams);
$macro!(crate::ApplyPatchApprovalResponse);
$macro!(crate::ArchiveConversationParams);
$macro!(crate::ArchiveConversationResponse);
$macro!(crate::AuthMode);
$macro!(crate::AuthStatusChangeNotification);
$macro!(crate::CancelLoginChatGptParams);
$macro!(crate::CancelLoginChatGptResponse);
$macro!(crate::ClientInfo);
$macro!(crate::ClientNotification);
$macro!(crate::ClientRequest);
$macro!(crate::ConversationSummary);
$macro!(crate::ExecCommandApprovalParams);
$macro!(crate::ExecCommandApprovalResponse);
$macro!(crate::ExecOneOffCommandParams);
$macro!(crate::ExecOneOffCommandResponse);
$macro!(crate::FuzzyFileSearchParams);
$macro!(crate::FuzzyFileSearchResponse);
$macro!(crate::FuzzyFileSearchResult);
$macro!(crate::GetAuthStatusParams);
$macro!(crate::GetAuthStatusResponse);
$macro!(crate::GetUserAgentResponse);
$macro!(crate::GetUserSavedConfigResponse);
$macro!(crate::GitDiffToRemoteParams);
$macro!(crate::GitDiffToRemoteResponse);
$macro!(crate::GitSha);
$macro!(crate::InitializeParams);
$macro!(crate::InitializeResponse);
$macro!(crate::InputItem);
$macro!(crate::InterruptConversationParams);
$macro!(crate::InterruptConversationResponse);
$macro!(crate::ListConversationsParams);
$macro!(crate::ListConversationsResponse);
$macro!(crate::LoginApiKeyParams);
$macro!(crate::LoginApiKeyResponse);
$macro!(crate::LoginChatGptCompleteNotification);
$macro!(crate::LoginChatGptResponse);
$macro!(crate::LogoutChatGptParams);
$macro!(crate::LogoutChatGptResponse);
$macro!(crate::NewConversationParams);
$macro!(crate::NewConversationResponse);
$macro!(crate::Profile);
$macro!(crate::RemoveConversationListenerParams);
$macro!(crate::RemoveConversationSubscriptionResponse);
$macro!(crate::ResumeConversationParams);
$macro!(crate::ResumeConversationResponse);
$macro!(crate::SandboxSettings);
$macro!(crate::SendUserMessageParams);
$macro!(crate::SendUserMessageResponse);
$macro!(crate::SendUserTurnParams);
$macro!(crate::SendUserTurnResponse);
$macro!(crate::ServerNotification);
$macro!(crate::ServerRequest);
$macro!(crate::SessionConfiguredNotification);
$macro!(crate::SetDefaultModelParams);
$macro!(crate::SetDefaultModelResponse);
$macro!(crate::Tools);
$macro!(crate::UserInfoResponse);
$macro!(crate::UserSavedConfig);
$macro!(codex_protocol::protocol::EventMsg);
$macro!(codex_protocol::protocol::FileChange);
$macro!(codex_protocol::parse_command::ParsedCommand);
$macro!(codex_protocol::protocol::SandboxPolicy);
};
}
impl GeneratedSchema {
fn namespace(&self) -> Option<&str> {
self.namespace.as_deref()
}
fn logical_name(&self) -> &str {
&self.logical_name
}
fn value(&self) -> &Value {
&self.value
}
}
type JsonSchemaEmitter = fn(&Path) -> Result<GeneratedSchema>;
pub fn generate_types(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
generate_ts(out_dir, prettier)?;
generate_json(out_dir)?;
@@ -65,9 +112,7 @@ pub fn generate_types(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
}
pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
let v2_out_dir = out_dir.join("v2");
ensure_dir(out_dir)?;
ensure_dir(&v2_out_dir)?;
ClientRequest::export_all_to(out_dir)?;
export_client_responses(out_dir)?;
@@ -78,15 +123,12 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
ServerNotification::export_all_to(out_dir)?;
generate_index_ts(out_dir)?;
generate_index_ts(&v2_out_dir)?;
// Ensure our header is present on all TS files (root + subdirs like v2/).
let ts_files = ts_files_in_recursive(out_dir)?;
let ts_files = ts_files_in(out_dir)?;
for file in &ts_files {
prepend_header_if_missing(file)?;
}
// Optionally run Prettier on all generated TS files.
if let Some(prettier_bin) = prettier
&& !ts_files.is_empty()
{
@@ -105,47 +147,23 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
pub fn generate_json(out_dir: &Path) -> Result<()> {
ensure_dir(out_dir)?;
let envelope_emitters: &[JsonSchemaEmitter] = &[
|d| write_json_schema_with_return::<crate::RequestId>(d, "RequestId"),
|d| write_json_schema_with_return::<crate::JSONRPCMessage>(d, "JSONRPCMessage"),
|d| write_json_schema_with_return::<crate::JSONRPCRequest>(d, "JSONRPCRequest"),
|d| write_json_schema_with_return::<crate::JSONRPCNotification>(d, "JSONRPCNotification"),
|d| write_json_schema_with_return::<crate::JSONRPCResponse>(d, "JSONRPCResponse"),
|d| write_json_schema_with_return::<crate::JSONRPCError>(d, "JSONRPCError"),
|d| write_json_schema_with_return::<crate::JSONRPCErrorError>(d, "JSONRPCErrorError"),
|d| write_json_schema_with_return::<crate::ClientRequest>(d, "ClientRequest"),
|d| write_json_schema_with_return::<crate::ServerRequest>(d, "ServerRequest"),
|d| write_json_schema_with_return::<crate::ClientNotification>(d, "ClientNotification"),
|d| write_json_schema_with_return::<crate::ServerNotification>(d, "ServerNotification"),
|d| write_json_schema_with_return::<EventMsg>(d, "EventMsg"),
|d| write_json_schema_with_return::<FileChange>(d, "FileChange"),
|d| write_json_schema_with_return::<crate::protocol::v1::InputItem>(d, "InputItem"),
|d| write_json_schema_with_return::<ParsedCommand>(d, "ParsedCommand"),
|d| write_json_schema_with_return::<SandboxPolicy>(d, "SandboxPolicy"),
];
let mut bundle: BTreeMap<String, RootSchema> = BTreeMap::new();
let mut schemas: Vec<GeneratedSchema> = Vec::new();
for emit in envelope_emitters {
schemas.push(emit(out_dir)?);
macro_rules! add_schema {
($ty:path) => {{
let name = type_basename(stringify!($ty));
let schema = write_json_schema_with_return::<$ty>(out_dir, &name)?;
bundle.insert(name, schema);
}};
}
schemas.extend(export_client_param_schemas(out_dir)?);
schemas.extend(export_client_response_schemas(out_dir)?);
schemas.extend(export_server_param_schemas(out_dir)?);
schemas.extend(export_server_response_schemas(out_dir)?);
schemas.extend(export_client_notification_schemas(out_dir)?);
schemas.extend(export_server_notification_schemas(out_dir)?);
for_each_schema_type!(add_schema);
let bundle = build_schema_bundle(schemas)?;
write_pretty_json(
out_dir.join("codex_app_server_protocol.schemas.json"),
&bundle,
)?;
export_client_response_schemas(out_dir)?;
export_server_response_schemas(out_dir)?;
Ok(())
}
let mut definitions = Map::new();
fn build_schema_bundle(schemas: Vec<GeneratedSchema>) -> Result<Value> {
const SPECIAL_DEFINITIONS: &[&str] = &[
"ClientNotification",
"ClientRequest",
@@ -158,62 +176,22 @@ fn build_schema_bundle(schemas: Vec<GeneratedSchema>) -> Result<Value> {
"ServerRequest",
];
let namespaced_types = collect_namespaced_types(&schemas);
let mut definitions = Map::new();
for (name, schema) in bundle {
let mut schema_value = serde_json::to_value(schema)?;
annotate_schema(&mut schema_value, Some(name.as_str()));
for schema in schemas {
let GeneratedSchema {
namespace,
logical_name,
mut value,
in_v1_dir,
} = schema;
if let Some(ref ns) = namespace {
rewrite_refs_to_namespace(&mut value, ns);
}
let mut forced_namespace_refs: Vec<(String, String)> = Vec::new();
if let Value::Object(ref mut obj) = value
if let Value::Object(ref mut obj) = schema_value
&& let Some(defs) = obj.remove("definitions")
&& let Value::Object(defs_obj) = defs
{
for (def_name, mut def_schema) in defs_obj {
if SPECIAL_DEFINITIONS.contains(&def_name.as_str()) {
continue;
}
annotate_schema(&mut def_schema, Some(def_name.as_str()));
let target_namespace = match namespace {
Some(ref ns) => Some(ns.clone()),
None => namespace_for_definition(&def_name, &namespaced_types)
.cloned()
.filter(|_| !in_v1_dir),
};
if let Some(ref ns) = target_namespace {
if namespace.as_deref() == Some(ns.as_str()) {
rewrite_refs_to_namespace(&mut def_schema, ns);
insert_into_namespace(&mut definitions, ns, def_name.clone(), def_schema)?;
} else if !forced_namespace_refs
.iter()
.any(|(name, existing_ns)| name == &def_name && existing_ns == ns)
{
forced_namespace_refs.push((def_name.clone(), ns.clone()));
}
} else {
if !SPECIAL_DEFINITIONS.contains(&def_name.as_str()) {
annotate_schema(&mut def_schema, Some(def_name.as_str()));
definitions.insert(def_name, def_schema);
}
}
}
for (name, ns) in forced_namespace_refs {
rewrite_named_ref_to_namespace(&mut value, &ns, &name);
}
if let Some(ref ns) = namespace {
insert_into_namespace(&mut definitions, ns, logical_name.clone(), value)?;
} else {
definitions.insert(logical_name, value);
}
definitions.insert(name, schema_value);
}
let mut root = Map::new();
@@ -228,28 +206,15 @@ fn build_schema_bundle(schemas: Vec<GeneratedSchema>) -> Result<Value> {
root.insert("type".to_string(), Value::String("object".into()));
root.insert("definitions".to_string(), Value::Object(definitions));
Ok(Value::Object(root))
write_pretty_json(
out_dir.join("codex_app_server_protocol.schemas.json"),
&Value::Object(root),
)?;
Ok(())
}
fn insert_into_namespace(
definitions: &mut Map<String, Value>,
namespace: &str,
name: String,
schema: Value,
) -> Result<()> {
let entry = definitions
.entry(namespace.to_string())
.or_insert_with(|| Value::Object(Map::new()));
match entry {
Value::Object(map) => {
map.insert(name, schema);
Ok(())
}
_ => Err(anyhow!("expected namespace {namespace} to be an object")),
}
}
fn write_json_schema_with_return<T>(out_dir: &Path, name: &str) -> Result<GeneratedSchema>
fn write_json_schema_with_return<T>(out_dir: &Path, name: &str) -> Result<RootSchema>
where
T: JsonSchema,
{
@@ -257,37 +222,17 @@ where
let schema = schema_for!(T);
let mut schema_value = serde_json::to_value(schema)?;
annotate_schema(&mut schema_value, Some(file_stem));
// If the name looks like a namespaced path (e.g., "v2::Type"), mirror
// the TypeScript layout and write to out_dir/v2/Type.json. Otherwise
// write alongside the legacy files.
let (raw_namespace, logical_name) = split_namespace(file_stem);
let out_path = if let Some(ns) = raw_namespace {
let dir = out_dir.join(ns);
ensure_dir(&dir)?;
dir.join(format!("{logical_name}.json"))
} else {
out_dir.join(format!("{file_stem}.json"))
};
write_pretty_json(out_path, &schema_value)
write_pretty_json(out_dir.join(format!("{file_stem}.json")), &schema_value)
.with_context(|| format!("Failed to write JSON schema for {file_stem}"))?;
let namespace = match raw_namespace {
Some("v1") | None => None,
Some(ns) => Some(ns.to_string()),
};
Ok(GeneratedSchema {
in_v1_dir: raw_namespace == Some("v1"),
namespace,
logical_name: logical_name.to_string(),
value: schema_value,
})
let annotated_schema = serde_json::from_value(schema_value)?;
Ok(annotated_schema)
}
pub(crate) fn write_json_schema<T>(out_dir: &Path, name: &str) -> Result<GeneratedSchema>
pub(crate) fn write_json_schema<T>(out_dir: &Path, name: &str) -> Result<()>
where
T: JsonSchema,
{
write_json_schema_with_return::<T>(out_dir, name)
write_json_schema_with_return::<T>(out_dir, name).map(|_| ())
}
fn write_pretty_json(path: PathBuf, value: &impl Serialize) -> Result<()> {
@@ -296,73 +241,13 @@ fn write_pretty_json(path: PathBuf, value: &impl Serialize) -> Result<()> {
fs::write(&path, json).with_context(|| format!("Failed to write {}", path.display()))?;
Ok(())
}
/// Split a fully-qualified type name like "v2::Type" into its namespace and logical name.
fn split_namespace(name: &str) -> (Option<&str>, &str) {
name.split_once("::")
.map_or((None, name), |(ns, rest)| (Some(ns), rest))
}
/// Recursively rewrite $ref values that point at "#/definitions/..." so that
/// they point to a namespaced location under the bundle.
fn rewrite_refs_to_namespace(value: &mut Value, ns: &str) {
match value {
Value::Object(obj) => {
if let Some(Value::String(r)) = obj.get_mut("$ref")
&& let Some(suffix) = r.strip_prefix("#/definitions/")
{
let prefix = format!("{ns}/");
if !suffix.starts_with(&prefix) {
*r = format!("#/definitions/{ns}/{suffix}");
}
}
for v in obj.values_mut() {
rewrite_refs_to_namespace(v, ns);
}
}
Value::Array(items) => {
for v in items.iter_mut() {
rewrite_refs_to_namespace(v, ns);
}
}
_ => {}
}
}
fn collect_namespaced_types(schemas: &[GeneratedSchema]) -> HashMap<String, String> {
let mut types = HashMap::new();
for schema in schemas {
if let Some(ns) = schema.namespace() {
types
.entry(schema.logical_name().to_string())
.or_insert_with(|| ns.to_string());
if let Some(Value::Object(defs)) = schema.value().get("definitions") {
for key in defs.keys() {
types.entry(key.clone()).or_insert_with(|| ns.to_string());
}
}
if let Some(Value::Object(defs)) = schema.value().get("$defs") {
for key in defs.keys() {
types.entry(key.clone()).or_insert_with(|| ns.to_string());
}
}
}
}
types
}
fn namespace_for_definition<'a>(
name: &str,
types: &'a HashMap<String, String>,
) -> Option<&'a String> {
if let Some(ns) = types.get(name) {
return Some(ns);
}
let trimmed = name.trim_end_matches(|c: char| c.is_ascii_digit());
if trimmed != name {
return types.get(trimmed);
}
None
fn type_basename(type_path: &str) -> String {
type_path
.rsplit_once("::")
.map(|(_, name)| name)
.unwrap_or(type_path)
.trim()
.to_string()
}
fn variant_definition_name(base: &str, variant: &Value) -> Option<String> {
@@ -582,33 +467,6 @@ fn ensure_dir(dir: &Path) -> Result<()> {
.with_context(|| format!("Failed to create output directory {}", dir.display()))
}
fn rewrite_named_ref_to_namespace(value: &mut Value, ns: &str, name: &str) {
let direct = format!("#/definitions/{name}");
let prefixed = format!("{direct}/");
let replacement = format!("#/definitions/{ns}/{name}");
let replacement_prefixed = format!("{replacement}/");
match value {
Value::Object(obj) => {
if let Some(Value::String(reference)) = obj.get_mut("$ref") {
if reference == &direct {
*reference = replacement;
} else if let Some(rest) = reference.strip_prefix(&prefixed) {
*reference = format!("{replacement_prefixed}{rest}");
}
}
for child in obj.values_mut() {
rewrite_named_ref_to_namespace(child, ns, name);
}
}
Value::Array(items) => {
for child in items {
rewrite_named_ref_to_namespace(child, ns, name);
}
}
_ => {}
}
}
fn prepend_header_if_missing(path: &Path) -> Result<()> {
let mut content = String::new();
{
@@ -646,26 +504,6 @@ fn ts_files_in(dir: &Path) -> Result<Vec<PathBuf>> {
Ok(files)
}
fn ts_files_in_recursive(dir: &Path) -> Result<Vec<PathBuf>> {
let mut files = Vec::new();
let mut stack = vec![dir.to_path_buf()];
while let Some(d) = stack.pop() {
for entry in
fs::read_dir(&d).with_context(|| format!("Failed to read dir {}", d.display()))?
{
let entry = entry?;
let path = entry.path();
if path.is_dir() {
stack.push(path);
} else if path.is_file() && path.extension() == Some(OsStr::new("ts")) {
files.push(path);
}
}
}
files.sort();
Ok(files)
}
fn generate_index_ts(out_dir: &Path) -> Result<PathBuf> {
let mut entries: Vec<String> = Vec::new();
let mut stems: Vec<String> = ts_files_in(out_dir)?
@@ -682,14 +520,6 @@ fn generate_index_ts(out_dir: &Path) -> Result<PathBuf> {
entries.push(format!("export type {{ {name} }} from \"./{name}\";\n"));
}
// If this is the root out_dir and a ./v2 folder exists with TS files,
// expose it as a namespace to avoid symbol collisions at the root.
let v2_dir = out_dir.join("v2");
let has_v2_ts = ts_files_in(&v2_dir).map(|v| !v.is_empty()).unwrap_or(false);
if has_v2_ts {
entries.push("export * as v2 from \"./v2\";\n".to_string());
}
let mut content =
String::with_capacity(HEADER.len() + entries.iter().map(String::len).sum::<usize>());
content.push_str(HEADER);
@@ -716,7 +546,6 @@ mod tests {
#[test]
fn generated_ts_has_no_optional_nullable_fields() -> Result<()> {
// Assert that there are no types of the form "?: T | null" in the generated TS files.
let output_dir = std::env::temp_dir().join(format!("codex_ts_types_{}", Uuid::now_v7()));
fs::create_dir(&output_dir)?;

View File

@@ -6,6 +6,4 @@ pub use export::generate_json;
pub use export::generate_ts;
pub use export::generate_types;
pub use jsonrpc_lite::*;
pub use protocol::common::*;
pub use protocol::v1::*;
pub use protocol::v2::*;
pub use protocol::*;

File diff suppressed because it is too large Load Diff

View File

@@ -1,821 +0,0 @@
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use crate::JSONRPCNotification;
use crate::JSONRPCRequest;
use crate::RequestId;
use crate::export::GeneratedSchema;
use crate::export::write_json_schema;
use crate::protocol::v1;
use crate::protocol::v2;
use codex_protocol::ConversationId;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::FileChange;
use codex_protocol::protocol::ReviewDecision;
use codex_protocol::protocol::SandboxCommandAssessment;
use paste::paste;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use strum_macros::Display;
use ts_rs::TS;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema, TS)]
#[ts(type = "string")]
pub struct GitSha(pub String);
impl GitSha {
pub fn new(sha: &str) -> Self {
Self(sha.to_string())
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Display, JsonSchema, TS)]
#[serde(rename_all = "lowercase")]
pub enum AuthMode {
ApiKey,
ChatGPT,
}
/// Generates an `enum ClientRequest` where each variant is a request that the
/// client can send to the server. Each variant has associated `params` and
/// `response` types. Also generates a `export_client_responses()` function to
/// export all response types to TypeScript.
macro_rules! client_request_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident {
params: $(#[$params_meta:meta])* $params:ty,
response: $response:ty,
}
),* $(,)?
) => {
/// Request from the client to the server.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "method", rename_all = "camelCase")]
pub enum ClientRequest {
$(
$(#[$variant_meta])*
$variant {
#[serde(rename = "id")]
request_id: RequestId,
$(#[$params_meta])*
params: $params,
},
)*
}
pub fn export_client_responses(
out_dir: &::std::path::Path,
) -> ::std::result::Result<(), ::ts_rs::ExportError> {
$(
<$response as ::ts_rs::TS>::export_all_to(out_dir)?;
)*
Ok(())
}
#[allow(clippy::vec_init_then_push)]
pub fn export_client_response_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
$(
schemas.push(write_json_schema::<$response>(out_dir, stringify!($response))?);
)*
Ok(schemas)
}
#[allow(clippy::vec_init_then_push)]
pub fn export_client_param_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
$(
schemas.push(write_json_schema::<$params>(out_dir, stringify!($params))?);
)*
Ok(schemas)
}
};
}
client_request_definitions! {
/// NEW APIs
// Thread lifecycle
#[serde(rename = "thread/start")]
#[ts(rename = "thread/start")]
ThreadStart {
params: v2::ThreadStartParams,
response: v2::ThreadStartResponse,
},
#[serde(rename = "thread/resume")]
#[ts(rename = "thread/resume")]
ThreadResume {
params: v2::ThreadResumeParams,
response: v2::ThreadResumeResponse,
},
#[serde(rename = "thread/archive")]
#[ts(rename = "thread/archive")]
ThreadArchive {
params: v2::ThreadArchiveParams,
response: v2::ThreadArchiveResponse,
},
#[serde(rename = "thread/list")]
#[ts(rename = "thread/list")]
ThreadList {
params: v2::ThreadListParams,
response: v2::ThreadListResponse,
},
#[serde(rename = "thread/compact")]
#[ts(rename = "thread/compact")]
ThreadCompact {
params: v2::ThreadCompactParams,
response: v2::ThreadCompactResponse,
},
#[serde(rename = "turn/start")]
#[ts(rename = "turn/start")]
TurnStart {
params: v2::TurnStartParams,
response: v2::TurnStartResponse,
},
#[serde(rename = "turn/interrupt")]
#[ts(rename = "turn/interrupt")]
TurnInterrupt {
params: v2::TurnInterruptParams,
response: v2::TurnInterruptResponse,
},
#[serde(rename = "model/list")]
#[ts(rename = "model/list")]
ModelList {
params: v2::ModelListParams,
response: v2::ModelListResponse,
},
#[serde(rename = "account/login/start")]
#[ts(rename = "account/login/start")]
LoginAccount {
params: v2::LoginAccountParams,
response: v2::LoginAccountResponse,
},
#[serde(rename = "account/login/cancel")]
#[ts(rename = "account/login/cancel")]
CancelLoginAccount {
params: v2::CancelLoginAccountParams,
response: v2::CancelLoginAccountResponse,
},
#[serde(rename = "account/logout")]
#[ts(rename = "account/logout")]
LogoutAccount {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::LogoutAccountResponse,
},
#[serde(rename = "account/rateLimits/read")]
#[ts(rename = "account/rateLimits/read")]
GetAccountRateLimits {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::GetAccountRateLimitsResponse,
},
#[serde(rename = "feedback/upload")]
#[ts(rename = "feedback/upload")]
FeedbackUpload {
params: v2::FeedbackUploadParams,
response: v2::FeedbackUploadResponse,
},
#[serde(rename = "account/read")]
#[ts(rename = "account/read")]
GetAccount {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::GetAccountResponse,
},
/// DEPRECATED APIs below
Initialize {
params: v1::InitializeParams,
response: v1::InitializeResponse,
},
NewConversation {
params: v1::NewConversationParams,
response: v1::NewConversationResponse,
},
GetConversationSummary {
params: v1::GetConversationSummaryParams,
response: v1::GetConversationSummaryResponse,
},
/// List recorded Codex conversations (rollouts) with optional pagination and search.
ListConversations {
params: v1::ListConversationsParams,
response: v1::ListConversationsResponse,
},
/// Resume a recorded Codex conversation from a rollout file.
ResumeConversation {
params: v1::ResumeConversationParams,
response: v1::ResumeConversationResponse,
},
ArchiveConversation {
params: v1::ArchiveConversationParams,
response: v1::ArchiveConversationResponse,
},
SendUserMessage {
params: v1::SendUserMessageParams,
response: v1::SendUserMessageResponse,
},
SendUserTurn {
params: v1::SendUserTurnParams,
response: v1::SendUserTurnResponse,
},
InterruptConversation {
params: v1::InterruptConversationParams,
response: v1::InterruptConversationResponse,
},
AddConversationListener {
params: v1::AddConversationListenerParams,
response: v1::AddConversationSubscriptionResponse,
},
RemoveConversationListener {
params: v1::RemoveConversationListenerParams,
response: v1::RemoveConversationSubscriptionResponse,
},
GitDiffToRemote {
params: v1::GitDiffToRemoteParams,
response: v1::GitDiffToRemoteResponse,
},
LoginApiKey {
params: v1::LoginApiKeyParams,
response: v1::LoginApiKeyResponse,
},
LoginChatGpt {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::LoginChatGptResponse,
},
// DEPRECATED in favor of CancelLoginAccount
CancelLoginChatGpt {
params: v1::CancelLoginChatGptParams,
response: v1::CancelLoginChatGptResponse,
},
LogoutChatGpt {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::LogoutChatGptResponse,
},
GetAuthStatus {
params: v1::GetAuthStatusParams,
response: v1::GetAuthStatusResponse,
},
GetUserSavedConfig {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::GetUserSavedConfigResponse,
},
SetDefaultModel {
params: v1::SetDefaultModelParams,
response: v1::SetDefaultModelResponse,
},
GetUserAgent {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::GetUserAgentResponse,
},
UserInfo {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::UserInfoResponse,
},
FuzzyFileSearch {
params: FuzzyFileSearchParams,
response: FuzzyFileSearchResponse,
},
/// Execute a command (argv vector) under the server's sandbox.
ExecOneOffCommand {
params: v1::ExecOneOffCommandParams,
response: v1::ExecOneOffCommandResponse,
},
}
/// Generates an `enum ServerRequest` where each variant is a request that the
/// server can send to the client along with the corresponding params and
/// response types. It also generates helper types used by the app/server
/// infrastructure (payload enum, request constructor, and export helpers).
macro_rules! server_request_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident
),* $(,)?
) => {
paste! {
/// Request initiated from the server and sent to the client.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "method", rename_all = "camelCase")]
pub enum ServerRequest {
$(
$(#[$variant_meta])*
$variant {
#[serde(rename = "id")]
request_id: RequestId,
params: [<$variant Params>],
},
)*
}
#[derive(Debug, Clone, PartialEq, JsonSchema)]
pub enum ServerRequestPayload {
$( $variant([<$variant Params>]), )*
}
impl ServerRequestPayload {
pub fn request_with_id(self, request_id: RequestId) -> ServerRequest {
match self {
$(Self::$variant(params) => ServerRequest::$variant { request_id, params },)*
}
}
}
}
pub fn export_server_responses(
out_dir: &::std::path::Path,
) -> ::std::result::Result<(), ::ts_rs::ExportError> {
paste! {
$(<[<$variant Response>] as ::ts_rs::TS>::export_all_to(out_dir)?;)*
}
Ok(())
}
#[allow(clippy::vec_init_then_push)]
pub fn export_server_response_schemas(
out_dir: &Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
paste! {
$(schemas.push(crate::export::write_json_schema::<[<$variant Response>]>(out_dir, stringify!([<$variant Response>]))?);)*
}
Ok(schemas)
}
#[allow(clippy::vec_init_then_push)]
pub fn export_server_param_schemas(
out_dir: &Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
paste! {
$(schemas.push(crate::export::write_json_schema::<[<$variant Params>]>(out_dir, stringify!([<$variant Params>]))?);)*
}
Ok(schemas)
}
};
}
/// Generates `ServerNotification` enum and helpers, including a JSON Schema
/// exporter for each notification.
macro_rules! server_notification_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident $(=> $wire:literal)? ( $payload:ty )
),* $(,)?
) => {
/// Notification sent from the server to the client.
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ServerNotification {
$(
$(#[$variant_meta])*
$(#[serde(rename = $wire)] #[ts(rename = $wire)] #[strum(serialize = $wire)])?
$variant($payload),
)*
}
impl ServerNotification {
pub fn to_params(self) -> Result<serde_json::Value, serde_json::Error> {
match self {
$(Self::$variant(params) => serde_json::to_value(params),)*
}
}
}
impl TryFrom<JSONRPCNotification> for ServerNotification {
type Error = serde_json::Error;
fn try_from(value: JSONRPCNotification) -> Result<Self, Self::Error> {
serde_json::from_value(serde_json::to_value(value)?)
}
}
#[allow(clippy::vec_init_then_push)]
pub fn export_server_notification_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
$(schemas.push(crate::export::write_json_schema::<$payload>(out_dir, stringify!($payload))?);)*
Ok(schemas)
}
};
}
/// Notifications sent from the client to the server.
macro_rules! client_notification_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident $( ( $payload:ty ) )?
),* $(,)?
) => {
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ClientNotification {
$(
$(#[$variant_meta])*
$variant $( ( $payload ) )?,
)*
}
pub fn export_client_notification_schemas(
_out_dir: &::std::path::Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let schemas = Vec::new();
$( $(schemas.push(crate::export::write_json_schema::<$payload>(_out_dir, stringify!($payload))?);)? )*
Ok(schemas)
}
};
}
impl TryFrom<JSONRPCRequest> for ServerRequest {
type Error = serde_json::Error;
fn try_from(value: JSONRPCRequest) -> Result<Self, Self::Error> {
serde_json::from_value(serde_json::to_value(value)?)
}
}
server_request_definitions! {
/// Request to approve a patch.
ApplyPatchApproval,
/// Request to exec a command.
ExecCommandApproval,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ApplyPatchApprovalParams {
pub conversation_id: ConversationId,
/// Use to correlate this with [codex_core::protocol::PatchApplyBeginEvent]
/// and [codex_core::protocol::PatchApplyEndEvent].
pub call_id: String,
pub file_changes: HashMap<PathBuf, FileChange>,
/// Optional explanatory reason (e.g. request for extra write access).
pub reason: Option<String>,
/// When set, the agent is asking the user to allow writes under this root
/// for the remainder of the session (unclear if this is honored today).
pub grant_root: Option<PathBuf>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecCommandApprovalParams {
pub conversation_id: ConversationId,
/// Use to correlate this with [codex_core::protocol::ExecCommandBeginEvent]
/// and [codex_core::protocol::ExecCommandEndEvent].
pub call_id: String,
pub command: Vec<String>,
pub cwd: PathBuf,
pub reason: Option<String>,
pub risk: Option<SandboxCommandAssessment>,
pub parsed_cmd: Vec<ParsedCommand>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct ExecCommandApprovalResponse {
pub decision: ReviewDecision,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct ApplyPatchApprovalResponse {
pub decision: ReviewDecision,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
pub struct FuzzyFileSearchParams {
pub query: String,
pub roots: Vec<String>,
// if provided, will cancel any previous request that used the same value
pub cancellation_token: Option<String>,
}
/// Superset of [`codex_file_search::FileMatch`]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct FuzzyFileSearchResult {
pub root: String,
pub path: String,
pub file_name: String,
pub score: u32,
pub indices: Option<Vec<u32>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct FuzzyFileSearchResponse {
pub files: Vec<FuzzyFileSearchResult>,
}
server_notification_definitions! {
/// NEW NOTIFICATIONS
ThreadStarted => "thread/started" (v2::ThreadStartedNotification),
TurnStarted => "turn/started" (v2::TurnStartedNotification),
TurnCompleted => "turn/completed" (v2::TurnCompletedNotification),
ItemStarted => "item/started" (v2::ItemStartedNotification),
ItemCompleted => "item/completed" (v2::ItemCompletedNotification),
AgentMessageDelta => "item/agentMessage/delta" (v2::AgentMessageDeltaNotification),
CommandExecutionOutputDelta => "item/commandExecution/outputDelta" (v2::CommandExecutionOutputDeltaNotification),
McpToolCallProgress => "item/mcpToolCall/progress" (v2::McpToolCallProgressNotification),
AccountUpdated => "account/updated" (v2::AccountUpdatedNotification),
AccountRateLimitsUpdated => "account/rateLimits/updated" (v2::AccountRateLimitsUpdatedNotification),
#[serde(rename = "account/login/completed")]
#[ts(rename = "account/login/completed")]
#[strum(serialize = "account/login/completed")]
AccountLoginCompleted(v2::AccountLoginCompletedNotification),
/// DEPRECATED NOTIFICATIONS below
AuthStatusChange(v1::AuthStatusChangeNotification),
/// Deprecated: use `account/login/completed` instead.
LoginChatGptComplete(v1::LoginChatGptCompleteNotification),
SessionConfigured(v1::SessionConfiguredNotification),
}
client_notification_definitions! {
Initialized,
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::Result;
use codex_protocol::account::PlanType;
use codex_protocol::protocol::AskForApproval;
use pretty_assertions::assert_eq;
use serde_json::json;
#[test]
fn serialize_new_conversation() -> Result<()> {
let request = ClientRequest::NewConversation {
request_id: RequestId::Integer(42),
params: v1::NewConversationParams {
model: Some("gpt-5-codex".to_string()),
model_provider: None,
profile: None,
cwd: None,
approval_policy: Some(AskForApproval::OnRequest),
sandbox: None,
config: None,
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
include_apply_patch_tool: None,
},
};
assert_eq!(
json!({
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5-codex",
"modelProvider": null,
"profile": null,
"cwd": null,
"approvalPolicy": "on-request",
"sandbox": null,
"config": null,
"baseInstructions": null,
"includeApplyPatchTool": null
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn conversation_id_serializes_as_plain_string() -> Result<()> {
let id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
assert_eq!(
json!("67e55044-10b1-426f-9247-bb680e5fe0c8"),
serde_json::to_value(id)?
);
Ok(())
}
#[test]
fn conversation_id_deserializes_from_plain_string() -> Result<()> {
let id: ConversationId =
serde_json::from_value(json!("67e55044-10b1-426f-9247-bb680e5fe0c8"))?;
assert_eq!(
ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?,
id,
);
Ok(())
}
#[test]
fn serialize_client_notification() -> Result<()> {
let notification = ClientNotification::Initialized;
// Note there is no "params" field for this notification.
assert_eq!(
json!({
"method": "initialized",
}),
serde_json::to_value(&notification)?,
);
Ok(())
}
#[test]
fn serialize_server_request() -> Result<()> {
let conversation_id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
let params = ExecCommandApprovalParams {
conversation_id,
call_id: "call-42".to_string(),
command: vec!["echo".to_string(), "hello".to_string()],
cwd: PathBuf::from("/tmp"),
reason: Some("because tests".to_string()),
risk: None,
parsed_cmd: vec![ParsedCommand::Unknown {
cmd: "echo hello".to_string(),
}],
};
let request = ServerRequest::ExecCommandApproval {
request_id: RequestId::Integer(7),
params: params.clone(),
};
assert_eq!(
json!({
"method": "execCommandApproval",
"id": 7,
"params": {
"conversationId": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"callId": "call-42",
"command": ["echo", "hello"],
"cwd": "/tmp",
"reason": "because tests",
"risk": null,
"parsedCmd": [
{
"type": "unknown",
"cmd": "echo hello"
}
]
}
}),
serde_json::to_value(&request)?,
);
let payload = ServerRequestPayload::ExecCommandApproval(params);
assert_eq!(payload.request_with_id(RequestId::Integer(7)), request);
Ok(())
}
#[test]
fn serialize_get_account_rate_limits() -> Result<()> {
let request = ClientRequest::GetAccountRateLimits {
request_id: RequestId::Integer(1),
params: None,
};
assert_eq!(
json!({
"method": "account/rateLimits/read",
"id": 1,
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_account_login_api_key() -> Result<()> {
let request = ClientRequest::LoginAccount {
request_id: RequestId::Integer(2),
params: v2::LoginAccountParams::ApiKey {
api_key: "secret".to_string(),
},
};
assert_eq!(
json!({
"method": "account/login/start",
"id": 2,
"params": {
"type": "apiKey",
"apiKey": "secret"
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_account_login_chatgpt() -> Result<()> {
let request = ClientRequest::LoginAccount {
request_id: RequestId::Integer(3),
params: v2::LoginAccountParams::Chatgpt,
};
assert_eq!(
json!({
"method": "account/login/start",
"id": 3,
"params": {
"type": "chatgpt"
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_account_logout() -> Result<()> {
let request = ClientRequest::LogoutAccount {
request_id: RequestId::Integer(4),
params: None,
};
assert_eq!(
json!({
"method": "account/logout",
"id": 4,
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_get_account() -> Result<()> {
let request = ClientRequest::GetAccount {
request_id: RequestId::Integer(5),
params: None,
};
assert_eq!(
json!({
"method": "account/read",
"id": 5,
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn account_serializes_fields_in_camel_case() -> Result<()> {
let api_key = v2::Account::ApiKey {
api_key: "secret".to_string(),
};
assert_eq!(
json!({
"type": "apiKey",
"apiKey": "secret",
}),
serde_json::to_value(&api_key)?,
);
let chatgpt = v2::Account::Chatgpt {
email: Some("user@example.com".to_string()),
plan_type: PlanType::Plus,
};
assert_eq!(
json!({
"type": "chatgpt",
"email": "user@example.com",
"planType": "plus",
}),
serde_json::to_value(&chatgpt)?,
);
Ok(())
}
#[test]
fn serialize_list_models() -> Result<()> {
let request = ClientRequest::ModelList {
request_id: RequestId::Integer(6),
params: v2::ModelListParams::default(),
};
assert_eq!(
json!({
"method": "model/list",
"id": 6,
"params": {
"limit": null,
"cursor": null
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
}

View File

@@ -1,6 +0,0 @@
// Module declarations for the app-server protocol namespace.
// Exposes protocol pieces used by `lib.rs` via `pub use protocol::common::*;`.
pub mod common;
pub mod v1;
pub mod v2;

View File

@@ -1,405 +0,0 @@
use std::collections::HashMap;
use std::path::PathBuf;
use codex_protocol::ConversationId;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::Verbosity;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::protocol::TurnAbortReason;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use ts_rs::TS;
use uuid::Uuid;
// Reuse shared types defined in `common.rs`.
use crate::protocol::common::AuthMode;
use crate::protocol::common::GitSha;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InitializeParams {
pub client_info: ClientInfo,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ClientInfo {
pub name: String,
pub title: Option<String>,
pub version: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InitializeResponse {
pub user_agent: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct NewConversationParams {
pub model: Option<String>,
pub model_provider: Option<String>,
pub profile: Option<String>,
pub cwd: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub sandbox: Option<SandboxMode>,
pub config: Option<HashMap<String, serde_json::Value>>,
pub base_instructions: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub developer_instructions: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub compact_prompt: Option<String>,
pub include_apply_patch_tool: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct NewConversationResponse {
pub conversation_id: ConversationId,
pub model: String,
pub reasoning_effort: Option<ReasoningEffort>,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ResumeConversationResponse {
pub conversation_id: ConversationId,
pub model: String,
pub initial_messages: Option<Vec<EventMsg>>,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(untagged)]
pub enum GetConversationSummaryParams {
RolloutPath {
#[serde(rename = "rolloutPath")]
rollout_path: PathBuf,
},
ConversationId {
#[serde(rename = "conversationId")]
conversation_id: ConversationId,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetConversationSummaryResponse {
pub summary: ConversationSummary,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ListConversationsParams {
pub page_size: Option<usize>,
pub cursor: Option<String>,
pub model_providers: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ConversationSummary {
pub conversation_id: ConversationId,
pub path: PathBuf,
pub preview: String,
pub timestamp: Option<String>,
pub model_provider: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ListConversationsResponse {
pub items: Vec<ConversationSummary>,
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ResumeConversationParams {
pub path: Option<PathBuf>,
pub conversation_id: Option<ConversationId>,
pub history: Option<Vec<ResponseItem>>,
pub overrides: Option<NewConversationParams>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct AddConversationSubscriptionResponse {
#[schemars(with = "String")]
pub subscription_id: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ArchiveConversationParams {
pub conversation_id: ConversationId,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ArchiveConversationResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct RemoveConversationSubscriptionResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginApiKeyParams {
pub api_key: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginApiKeyResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginChatGptResponse {
#[schemars(with = "String")]
pub login_id: Uuid,
pub auth_url: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GitDiffToRemoteResponse {
pub sha: GitSha,
pub diff: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct CancelLoginChatGptParams {
#[schemars(with = "String")]
pub login_id: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GitDiffToRemoteParams {
pub cwd: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct CancelLoginChatGptResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LogoutChatGptParams {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LogoutChatGptResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetAuthStatusParams {
pub include_token: Option<bool>,
pub refresh_token: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecOneOffCommandParams {
pub command: Vec<String>,
pub timeout_ms: Option<u64>,
pub cwd: Option<PathBuf>,
pub sandbox_policy: Option<SandboxPolicy>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecOneOffCommandResponse {
pub exit_code: i32,
pub stdout: String,
pub stderr: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetAuthStatusResponse {
pub auth_method: Option<AuthMode>,
pub auth_token: Option<String>,
pub requires_openai_auth: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetUserAgentResponse {
pub user_agent: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct UserInfoResponse {
pub alleged_user_email: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetUserSavedConfigResponse {
pub config: UserSavedConfig,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetDefaultModelParams {
pub model: Option<String>,
pub reasoning_effort: Option<ReasoningEffort>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetDefaultModelResponse {}
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct UserSavedConfig {
pub approval_policy: Option<AskForApproval>,
pub sandbox_mode: Option<SandboxMode>,
pub sandbox_settings: Option<SandboxSettings>,
pub forced_chatgpt_workspace_id: Option<String>,
pub forced_login_method: Option<ForcedLoginMethod>,
pub model: Option<String>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub tools: Option<Tools>,
pub profile: Option<String>,
pub profiles: HashMap<String, Profile>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct Profile {
pub model: Option<String>,
pub model_provider: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub chatgpt_base_url: Option<String>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct Tools {
pub web_search: Option<bool>,
pub view_image: Option<bool>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SandboxSettings {
#[serde(default)]
pub writable_roots: Vec<PathBuf>,
pub network_access: Option<bool>,
pub exclude_tmpdir_env_var: Option<bool>,
pub exclude_slash_tmp: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserMessageParams {
pub conversation_id: ConversationId,
pub items: Vec<InputItem>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserTurnParams {
pub conversation_id: ConversationId,
pub items: Vec<InputItem>,
pub cwd: PathBuf,
pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy,
pub model: String,
pub effort: Option<ReasoningEffort>,
pub summary: ReasoningSummary,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserTurnResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InterruptConversationParams {
pub conversation_id: ConversationId,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InterruptConversationResponse {
pub abort_reason: TurnAbortReason,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserMessageResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct AddConversationListenerParams {
pub conversation_id: ConversationId,
#[serde(default)]
pub experimental_raw_events: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct RemoveConversationListenerParams {
#[schemars(with = "String")]
pub subscription_id: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[serde(tag = "type", content = "data")]
pub enum InputItem {
Text { text: String },
Image { image_url: String },
LocalImage { path: PathBuf },
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
/// Deprecated in favor of AccountLoginCompletedNotification.
pub struct LoginChatGptCompleteNotification {
#[schemars(with = "String")]
pub login_id: Uuid,
pub success: bool,
pub error: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SessionConfiguredNotification {
pub session_id: ConversationId,
pub model: String,
pub reasoning_effort: Option<ReasoningEffort>,
pub history_log_id: u64,
#[ts(type = "number")]
pub history_entry_count: usize,
pub initial_messages: Option<Vec<EventMsg>>,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
/// Deprecated notification. Use AccountUpdatedNotification instead.
pub struct AuthStatusChangeNotification {
pub auth_method: Option<AuthMode>,
}

View File

@@ -1,699 +0,0 @@
use std::collections::HashMap;
use std::path::PathBuf;
use crate::protocol::common::AuthMode;
use codex_protocol::ConversationId;
use codex_protocol::account::PlanType;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
use codex_protocol::user_input::UserInput as CoreUserInput;
use mcp_types::ContentBlock as McpContentBlock;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use serde_json::Value as JsonValue;
use ts_rs::TS;
// Macro to declare a camelCased API v2 enum mirroring a core enum which
// tends to use kebab-case.
macro_rules! v2_enum_from_core {
(
pub enum $Name:ident from $Src:path { $( $Variant:ident ),+ $(,)? }
) => {
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum $Name { $( $Variant ),+ }
impl $Name {
pub fn to_core(self) -> $Src {
match self { $( $Name::$Variant => <$Src>::$Variant ),+ }
}
}
impl From<$Src> for $Name {
fn from(value: $Src) -> Self {
match value { $( <$Src>::$Variant => $Name::$Variant ),+ }
}
}
};
}
v2_enum_from_core!(
pub enum AskForApproval from codex_protocol::protocol::AskForApproval {
UnlessTrusted, OnFailure, OnRequest, Never
}
);
v2_enum_from_core!(
pub enum SandboxMode from codex_protocol::config_types::SandboxMode {
ReadOnly, WorkspaceWrite, DangerFullAccess
}
);
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(tag = "mode", rename_all = "camelCase")]
#[ts(tag = "mode")]
#[ts(export_to = "v2/")]
pub enum SandboxPolicy {
DangerFullAccess,
ReadOnly,
WorkspaceWrite {
#[serde(default)]
writable_roots: Vec<PathBuf>,
#[serde(default)]
network_access: bool,
#[serde(default)]
exclude_tmpdir_env_var: bool,
#[serde(default)]
exclude_slash_tmp: bool,
},
}
impl SandboxPolicy {
pub fn to_core(&self) -> codex_protocol::protocol::SandboxPolicy {
match self {
SandboxPolicy::DangerFullAccess => {
codex_protocol::protocol::SandboxPolicy::DangerFullAccess
}
SandboxPolicy::ReadOnly => codex_protocol::protocol::SandboxPolicy::ReadOnly,
SandboxPolicy::WorkspaceWrite {
writable_roots,
network_access,
exclude_tmpdir_env_var,
exclude_slash_tmp,
} => codex_protocol::protocol::SandboxPolicy::WorkspaceWrite {
writable_roots: writable_roots.clone(),
network_access: *network_access,
exclude_tmpdir_env_var: *exclude_tmpdir_env_var,
exclude_slash_tmp: *exclude_slash_tmp,
},
}
}
}
impl From<codex_protocol::protocol::SandboxPolicy> for SandboxPolicy {
fn from(value: codex_protocol::protocol::SandboxPolicy) -> Self {
match value {
codex_protocol::protocol::SandboxPolicy::DangerFullAccess => {
SandboxPolicy::DangerFullAccess
}
codex_protocol::protocol::SandboxPolicy::ReadOnly => SandboxPolicy::ReadOnly,
codex_protocol::protocol::SandboxPolicy::WorkspaceWrite {
writable_roots,
network_access,
exclude_tmpdir_env_var,
exclude_slash_tmp,
} => SandboxPolicy::WorkspaceWrite {
writable_roots,
network_access,
exclude_tmpdir_env_var,
exclude_slash_tmp,
},
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum Account {
#[serde(rename = "apiKey", rename_all = "camelCase")]
#[ts(rename = "apiKey", rename_all = "camelCase")]
ApiKey { api_key: String },
#[serde(rename = "chatgpt", rename_all = "camelCase")]
#[ts(rename = "chatgpt", rename_all = "camelCase")]
Chatgpt {
email: Option<String>,
plan_type: PlanType,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum LoginAccountParams {
#[serde(rename = "apiKey", rename_all = "camelCase")]
#[ts(rename = "apiKey", rename_all = "camelCase")]
ApiKey {
#[serde(rename = "apiKey")]
#[ts(rename = "apiKey")]
api_key: String,
},
#[serde(rename = "chatgpt")]
#[ts(rename = "chatgpt")]
Chatgpt,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum LoginAccountResponse {
#[serde(rename = "apiKey", rename_all = "camelCase")]
#[ts(rename = "apiKey", rename_all = "camelCase")]
ApiKey {},
#[serde(rename = "chatgpt", rename_all = "camelCase")]
#[ts(rename = "chatgpt", rename_all = "camelCase")]
Chatgpt {
// Use plain String for identifiers to avoid TS/JSON Schema quirks around uuid-specific types.
// Convert to/from UUIDs at the application layer as needed.
login_id: String,
/// URL the client should open in a browser to initiate the OAuth flow.
auth_url: String,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CancelLoginAccountParams {
pub login_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CancelLoginAccountResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct LogoutAccountResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct GetAccountRateLimitsResponse {
pub rate_limits: RateLimitSnapshot,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct GetAccountResponse {
pub account: Account,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ModelListParams {
/// Opaque pagination cursor returned by a previous call.
pub cursor: Option<String>,
/// Optional page size; defaults to a reasonable server-side value.
pub limit: Option<u32>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct Model {
pub id: String,
pub model: String,
pub display_name: String,
pub description: String,
pub supported_reasoning_efforts: Vec<ReasoningEffortOption>,
pub default_reasoning_effort: ReasoningEffort,
// Only one model should be marked as default.
pub is_default: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ReasoningEffortOption {
pub reasoning_effort: ReasoningEffort,
pub description: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ModelListResponse {
pub data: Vec<Model>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// If None, there are no more items to return.
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct FeedbackUploadParams {
pub classification: String,
pub reason: Option<String>,
pub conversation_id: Option<ConversationId>,
pub include_logs: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct FeedbackUploadResponse {
pub thread_id: String,
}
// === Threads, Turns, and Items ===
// Thread APIs
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadStartParams {
pub model: Option<String>,
pub model_provider: Option<String>,
pub cwd: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub sandbox: Option<SandboxMode>,
pub config: Option<HashMap<String, serde_json::Value>>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadStartResponse {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadResumeParams {
pub thread_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadResumeResponse {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadArchiveParams {
pub thread_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadArchiveResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadListParams {
/// Opaque pagination cursor returned by a previous call.
pub cursor: Option<String>,
/// Optional page size; defaults to a reasonable server-side value.
pub limit: Option<u32>,
/// Optional provider filter; when set, only sessions recorded under these
/// providers are returned. When present but empty, includes all providers.
pub model_providers: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadListResponse {
pub data: Vec<Thread>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// if None, there are no more items to return.
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadCompactParams {
pub thread_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadCompactResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct Thread {
pub id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AccountUpdatedNotification {
pub auth_mode: Option<AuthMode>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct Turn {
pub id: String,
pub items: Vec<ThreadItem>,
pub status: TurnStatus,
pub error: Option<TurnError>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnError {
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum TurnStatus {
Completed,
Interrupted,
Failed,
InProgress,
}
// Turn APIs
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnStartParams {
pub thread_id: String,
pub input: Vec<UserInput>,
/// Override the working directory for this turn and subsequent turns.
pub cwd: Option<PathBuf>,
/// Override the approval policy for this turn and subsequent turns.
pub approval_policy: Option<AskForApproval>,
/// Override the sandbox policy for this turn and subsequent turns.
pub sandbox_policy: Option<SandboxPolicy>,
/// Override the model for this turn and subsequent turns.
pub model: Option<String>,
/// Override the reasoning effort for this turn and subsequent turns.
pub effort: Option<ReasoningEffort>,
/// Override the reasoning summary for this turn and subsequent turns.
pub summary: Option<ReasoningSummary>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnStartResponse {
pub turn: Turn,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnInterruptParams {
pub thread_id: String,
pub turn_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnInterruptResponse {}
// User input types
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum UserInput {
Text { text: String },
Image { url: String },
LocalImage { path: PathBuf },
}
impl UserInput {
pub fn into_core(self) -> CoreUserInput {
match self {
UserInput::Text { text } => CoreUserInput::Text { text },
UserInput::Image { url } => CoreUserInput::Image { image_url: url },
UserInput::LocalImage { path } => CoreUserInput::LocalImage { path },
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum ThreadItem {
UserMessage {
id: String,
content: Vec<UserInput>,
},
AgentMessage {
id: String,
text: String,
},
Reasoning {
id: String,
text: String,
},
CommandExecution {
id: String,
command: String,
aggregated_output: String,
exit_code: Option<i32>,
status: CommandExecutionStatus,
duration_ms: Option<i64>,
},
FileChange {
id: String,
changes: Vec<FileUpdateChange>,
status: PatchApplyStatus,
},
McpToolCall {
id: String,
server: String,
tool: String,
status: McpToolCallStatus,
arguments: JsonValue,
result: Option<McpToolCallResult>,
error: Option<McpToolCallError>,
},
WebSearch {
id: String,
query: String,
},
TodoList {
id: String,
items: Vec<TodoItem>,
},
ImageView {
id: String,
path: String,
},
CodeReview {
id: String,
review: String,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum CommandExecutionStatus {
InProgress,
Completed,
Failed,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct FileUpdateChange {
pub path: String,
pub kind: PatchChangeKind,
pub diff: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum PatchChangeKind {
Add,
Delete,
Update,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum PatchApplyStatus {
Completed,
Failed,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum McpToolCallStatus {
InProgress,
Completed,
Failed,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpToolCallResult {
pub content: Vec<McpContentBlock>,
pub structured_content: JsonValue,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpToolCallError {
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TodoItem {
pub id: String,
pub text: String,
pub completed: bool,
}
// === Server Notifications ===
// Thread/Turn lifecycle notifications and item progress events
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadStartedNotification {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnStartedNotification {
pub turn: Turn,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct Usage {
pub input_tokens: i32,
pub cached_input_tokens: i32,
pub output_tokens: i32,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnCompletedNotification {
pub turn: Turn,
// TODO: should usage be stored on the Turn object, and we return that instead?
pub usage: Usage,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ItemStartedNotification {
pub item: ThreadItem,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ItemCompletedNotification {
pub item: ThreadItem,
}
// Item-specific progress notifications
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AgentMessageDeltaNotification {
pub item_id: String,
pub delta: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CommandExecutionOutputDeltaNotification {
pub item_id: String,
pub delta: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpToolCallProgressNotification {
pub item_id: String,
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AccountRateLimitsUpdatedNotification {
pub rate_limits: RateLimitSnapshot,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct RateLimitSnapshot {
pub primary: Option<RateLimitWindow>,
pub secondary: Option<RateLimitWindow>,
}
impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
fn from(value: CoreRateLimitSnapshot) -> Self {
Self {
primary: value.primary.map(RateLimitWindow::from),
secondary: value.secondary.map(RateLimitWindow::from),
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct RateLimitWindow {
pub used_percent: i32,
pub window_duration_mins: Option<i64>,
pub resets_at: Option<i64>,
}
impl From<CoreRateLimitWindow> for RateLimitWindow {
fn from(value: CoreRateLimitWindow) -> Self {
Self {
used_percent: value.used_percent.round() as i32,
window_duration_mins: value.window_minutes,
resets_at: value.resets_at,
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AccountLoginCompletedNotification {
// Use plain String for identifiers to avoid TS/JSON Schema quirks around uuid-specific types.
// Convert to/from UUIDs at the application layer as needed.
pub login_id: Option<String>,
pub success: bool,
pub error: Option<String>,
}

File diff suppressed because it is too large Load Diff

View File

@@ -64,79 +64,64 @@ impl MessageProcessor {
pub(crate) async fn process_request(&mut self, request: JSONRPCRequest) {
let request_id = request.id.clone();
let request_json = match serde_json::to_value(&request) {
Ok(request_json) => request_json,
Err(err) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("Invalid request: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
if let Ok(request_json) = serde_json::to_value(request)
&& let Ok(codex_request) = serde_json::from_value::<ClientRequest>(request_json)
{
match codex_request {
// Handle Initialize internally so CodexMessageProcessor does not have to concern
// itself with the `initialized` bool.
ClientRequest::Initialize { request_id, params } => {
if self.initialized {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Already initialized".to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
} else {
let ClientInfo {
name,
title: _title,
version,
} = params.client_info;
let user_agent_suffix = format!("{name}; {version}");
if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() {
*suffix = Some(user_agent_suffix);
}
let codex_request = match serde_json::from_value::<ClientRequest>(request_json) {
Ok(codex_request) => codex_request,
Err(err) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("Invalid request: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
let user_agent = get_codex_user_agent();
let response = InitializeResponse { user_agent };
self.outgoing.send_response(request_id, response).await;
match codex_request {
// Handle Initialize internally so CodexMessageProcessor does not have to concern
// itself with the `initialized` bool.
ClientRequest::Initialize { request_id, params } => {
if self.initialized {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Already initialized".to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
} else {
let ClientInfo {
name,
title: _title,
version,
} = params.client_info;
let user_agent_suffix = format!("{name}; {version}");
if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() {
*suffix = Some(user_agent_suffix);
self.initialized = true;
return;
}
}
_ => {
if !self.initialized {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Not initialized".to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
let user_agent = get_codex_user_agent();
let response = InitializeResponse { user_agent };
self.outgoing.send_response(request_id, response).await;
self.initialized = true;
return;
}
}
_ => {
if !self.initialized {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Not initialized".to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
}
self.codex_message_processor
.process_request(codex_request)
.await;
} else {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Invalid request".to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
}
self.codex_message_processor
.process_request(codex_request)
.await;
}
pub(crate) async fn process_notification(&self, notification: JSONRPCNotification) {

View File

@@ -1,12 +1,11 @@
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_common::model_presets::builtin_model_presets;
pub fn supported_models(auth_mode: Option<AuthMode>) -> Vec<Model> {
builtin_model_presets(auth_mode)
pub fn supported_models() -> Vec<Model> {
builtin_model_presets(None)
.into_iter()
.map(model_from_preset)
.collect()

View File

@@ -141,13 +141,9 @@ pub(crate) struct OutgoingError {
#[cfg(test)]
mod tests {
use codex_app_server_protocol::AccountLoginCompletedNotification;
use codex_app_server_protocol::AccountRateLimitsUpdatedNotification;
use codex_app_server_protocol::AccountUpdatedNotification;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::LoginChatGptCompleteNotification;
use codex_app_server_protocol::RateLimitSnapshot;
use codex_app_server_protocol::RateLimitWindow;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow;
use pretty_assertions::assert_eq;
use serde_json::json;
use uuid::Uuid;
@@ -179,78 +175,28 @@ mod tests {
);
}
#[test]
fn verify_account_login_completed_notification_serialization() {
let notification =
ServerNotification::AccountLoginCompleted(AccountLoginCompletedNotification {
login_id: Some(Uuid::nil().to_string()),
success: true,
error: None,
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);
assert_eq!(
json!({
"method": "account/login/completed",
"params": {
"loginId": Uuid::nil().to_string(),
"success": true,
"error": null,
},
}),
serde_json::to_value(jsonrpc_notification)
.expect("ensure the notification serializes correctly"),
"ensure the notification serializes correctly"
);
}
#[test]
fn verify_account_rate_limits_notification_serialization() {
let notification =
ServerNotification::AccountRateLimitsUpdated(AccountRateLimitsUpdatedNotification {
rate_limits: RateLimitSnapshot {
primary: Some(RateLimitWindow {
used_percent: 25,
window_duration_mins: Some(15),
resets_at: Some(123),
}),
secondary: None,
},
});
let notification = ServerNotification::AccountRateLimitsUpdated(RateLimitSnapshot {
primary: Some(RateLimitWindow {
used_percent: 25.0,
window_minutes: Some(15),
resets_at: Some(123),
}),
secondary: None,
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);
assert_eq!(
json!({
"method": "account/rateLimits/updated",
"params": {
"rateLimits": {
"primary": {
"usedPercent": 25,
"windowDurationMins": 15,
"resetsAt": 123
},
"secondary": null
}
},
}),
serde_json::to_value(jsonrpc_notification)
.expect("ensure the notification serializes correctly"),
"ensure the notification serializes correctly"
);
}
#[test]
fn verify_account_updated_notification_serialization() {
let notification = ServerNotification::AccountUpdated(AccountUpdatedNotification {
auth_mode: Some(AuthMode::ApiKey),
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);
assert_eq!(
json!({
"method": "account/updated",
"params": {
"authMode": "apikey"
"primary": {
"used_percent": 25.0,
"window_minutes": 15,
"resets_at": 123,
},
"secondary": null,
},
}),
serde_json::to_value(jsonrpc_notification)

View File

@@ -13,7 +13,6 @@ base64 = { workspace = true }
chrono = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-core = { workspace = true }
codex-protocol = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true, features = [
@@ -22,5 +21,4 @@ tokio = { workspace = true, features = [
"process",
"rt-multi-thread",
] }
uuid = { workspace = true }
wiremock = { workspace = true }

View File

@@ -2,7 +2,6 @@ mod auth_fixtures;
mod mcp_process;
mod mock_model_server;
mod responses;
mod rollout;
pub use auth_fixtures::ChatGptAuthFixture;
pub use auth_fixtures::ChatGptIdTokenClaims;
@@ -11,11 +10,9 @@ pub use auth_fixtures::write_chatgpt_auth;
use codex_app_server_protocol::JSONRPCResponse;
pub use mcp_process::McpProcess;
pub use mock_model_server::create_mock_chat_completions_server;
pub use mock_model_server::create_mock_chat_completions_server_unchecked;
pub use responses::create_apply_patch_sse_response;
pub use responses::create_final_assistant_message_sse_response;
pub use responses::create_shell_sse_response;
pub use rollout::create_fake_rollout;
use serde::de::DeserializeOwned;
pub fn to_response<T: DeserializeOwned>(response: JSONRPCResponse) -> anyhow::Result<T> {

View File

@@ -14,36 +14,30 @@ use anyhow::Context;
use assert_cmd::prelude::*;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::ArchiveConversationParams;
use codex_app_server_protocol::CancelLoginAccountParams;
use codex_app_server_protocol::CancelLoginChatGptParams;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::ClientNotification;
use codex_app_server_protocol::FeedbackUploadParams;
use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::InitializeParams;
use codex_app_server_protocol::InterruptConversationParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::ListModelsParams;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::NewConversationParams;
use codex_app_server_protocol::RemoveConversationListenerParams;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ResumeConversationParams;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserTurnParams;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::SetDefaultModelParams;
use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::UploadFeedbackParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use std::process::Command as StdCommand;
use tokio::process::Command;
@@ -250,9 +244,9 @@ impl McpProcess {
}
/// Send a `feedback/upload` JSON-RPC request.
pub async fn send_feedback_upload_request(
pub async fn send_upload_feedback_request(
&mut self,
params: FeedbackUploadParams,
params: UploadFeedbackParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("feedback/upload", params).await
@@ -281,46 +275,10 @@ impl McpProcess {
self.send_request("listConversations", params).await
}
/// Send a `thread/start` JSON-RPC request.
pub async fn send_thread_start_request(
&mut self,
params: ThreadStartParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/start", params).await
}
/// Send a `thread/resume` JSON-RPC request.
pub async fn send_thread_resume_request(
&mut self,
params: ThreadResumeParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/resume", params).await
}
/// Send a `thread/archive` JSON-RPC request.
pub async fn send_thread_archive_request(
&mut self,
params: ThreadArchiveParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/archive", params).await
}
/// Send a `thread/list` JSON-RPC request.
pub async fn send_thread_list_request(
&mut self,
params: ThreadListParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/list", params).await
}
/// Send a `model/list` JSON-RPC request.
pub async fn send_list_models_request(
&mut self,
params: ModelListParams,
params: ListModelsParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("model/list", params).await
@@ -349,24 +307,6 @@ impl McpProcess {
self.send_request("loginChatGpt", None).await
}
/// Send a `turn/start` JSON-RPC request (v2).
pub async fn send_turn_start_request(
&mut self,
params: TurnStartParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("turn/start", params).await
}
/// Send a `turn/interrupt` JSON-RPC request (v2).
pub async fn send_turn_interrupt_request(
&mut self,
params: TurnInterruptParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("turn/interrupt", params).await
}
/// Send a `cancelLoginChatGpt` JSON-RPC request.
pub async fn send_cancel_login_chat_gpt_request(
&mut self,
@@ -381,40 +321,6 @@ impl McpProcess {
self.send_request("logoutChatGpt", None).await
}
/// Send an `account/logout` JSON-RPC request.
pub async fn send_logout_account_request(&mut self) -> anyhow::Result<i64> {
self.send_request("account/logout", None).await
}
/// Send an `account/login/start` JSON-RPC request for API key login.
pub async fn send_login_account_api_key_request(
&mut self,
api_key: &str,
) -> anyhow::Result<i64> {
let params = serde_json::json!({
"type": "apiKey",
"apiKey": api_key,
});
self.send_request("account/login/start", Some(params)).await
}
/// Send an `account/login/start` JSON-RPC request for ChatGPT login.
pub async fn send_login_account_chatgpt_request(&mut self) -> anyhow::Result<i64> {
let params = serde_json::json!({
"type": "chatgpt"
});
self.send_request("account/login/start", Some(params)).await
}
/// Send an `account/login/cancel` JSON-RPC request.
pub async fn send_cancel_login_account_request(
&mut self,
params: CancelLoginAccountParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("account/login/cancel", params).await
}
/// Send a `fuzzyFileSearch` JSON-RPC request.
pub async fn send_fuzzy_file_search_request(
&mut self,

View File

@@ -29,25 +29,6 @@ pub async fn create_mock_chat_completions_server(responses: Vec<String>) -> Mock
server
}
/// Same as `create_mock_chat_completions_server` but does not enforce an
/// expectation on the number of calls.
pub async fn create_mock_chat_completions_server_unchecked(responses: Vec<String>) -> MockServer {
let server = MockServer::start().await;
let seq_responder = SeqResponder {
num_calls: AtomicUsize::new(0),
responses,
};
Mock::given(method("POST"))
.and(path("/v1/chat/completions"))
.respond_with(seq_responder)
.mount(&server)
.await;
server
}
struct SeqResponder {
num_calls: AtomicUsize,
responses: Vec<String>,

View File

@@ -1,82 +0,0 @@
use anyhow::Result;
use codex_protocol::ConversationId;
use codex_protocol::protocol::SessionMeta;
use codex_protocol::protocol::SessionSource;
use serde_json::json;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use uuid::Uuid;
/// Create a minimal rollout file under `CODEX_HOME/sessions/YYYY/MM/DD/`.
///
/// - `filename_ts` is the filename timestamp component in `YYYY-MM-DDThh-mm-ss` format.
/// - `meta_rfc3339` is the envelope timestamp used in JSON lines.
/// - `preview` is the user message preview text.
/// - `model_provider` optionally sets the provider in the session meta payload.
///
/// Returns the generated conversation/session UUID as a string.
pub fn create_fake_rollout(
codex_home: &Path,
filename_ts: &str,
meta_rfc3339: &str,
preview: &str,
model_provider: Option<&str>,
) -> Result<String> {
let uuid = Uuid::new_v4();
let uuid_str = uuid.to_string();
let conversation_id = ConversationId::from_string(&uuid_str)?;
// sessions/YYYY/MM/DD derived from filename_ts (YYYY-MM-DDThh-mm-ss)
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
let dir = codex_home.join("sessions").join(year).join(month).join(day);
fs::create_dir_all(&dir)?;
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
// Build JSONL lines
let payload = serde_json::to_value(SessionMeta {
id: conversation_id,
timestamp: meta_rfc3339.to_string(),
cwd: PathBuf::from("/"),
originator: "codex".to_string(),
cli_version: "0.0.0".to_string(),
instructions: None,
source: SessionSource::Cli,
model_provider: model_provider.map(str::to_string),
})?;
let lines = [
json!({
"timestamp": meta_rfc3339,
"type": "session_meta",
"payload": payload
})
.to_string(),
json!({
"timestamp": meta_rfc3339,
"type":"response_item",
"payload": {
"type":"message",
"role":"user",
"content":[{"type":"input_text","text": preview}]
}
})
.to_string(),
json!({
"timestamp": meta_rfc3339,
"type":"event_msg",
"payload": {
"type":"user_message",
"message": preview,
"kind": "plain"
}
})
.to_string(),
];
fs::write(file_path, lines.join("\n") + "\n")?;
Ok(uuid_str)
}

View File

@@ -12,7 +12,7 @@ use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(20);
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn archive_conversation_moves_rollout_into_archived_directory() -> Result<()> {

View File

@@ -146,7 +146,7 @@ fn create_config_toml(codex_home: &Path, server_uri: String) -> std::io::Result<
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
sandbox_mode = "danger-full-access"
model_provider = "mock_provider"

View File

@@ -1,6 +1,5 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
@@ -16,8 +15,12 @@ use codex_core::protocol::EventMsg;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::fs;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
use uuid::Uuid;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
@@ -354,3 +357,70 @@ async fn test_list_and_resume_conversations() -> Result<()> {
Ok(())
}
fn create_fake_rollout(
codex_home: &Path,
filename_ts: &str,
meta_rfc3339: &str,
preview: &str,
model_provider: Option<&str>,
) -> Result<()> {
let uuid = Uuid::new_v4();
// sessions/YYYY/MM/DD/ derived from filename_ts (YYYY-MM-DDThh-mm-ss)
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
let dir = codex_home.join("sessions").join(year).join(month).join(day);
fs::create_dir_all(&dir)?;
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
let mut lines = Vec::new();
// Meta line with timestamp (flattened meta in payload for new schema)
let mut payload = json!({
"id": uuid,
"timestamp": meta_rfc3339,
"cwd": "/",
"originator": "codex",
"cli_version": "0.0.0",
"instructions": null,
});
if let Some(provider) = model_provider {
payload["model_provider"] = json!(provider);
}
lines.push(
json!({
"timestamp": meta_rfc3339,
"type": "session_meta",
"payload": payload
})
.to_string(),
);
// Minimal user message entry as a persisted response item (with envelope timestamp)
lines.push(
json!({
"timestamp": meta_rfc3339,
"type":"response_item",
"payload": {
"type":"message",
"role":"user",
"content":[{"type":"input_text","text": preview}]
}
})
.to_string(),
);
// Add a matching user message event line to satisfy filters
lines.push(
json!({
"timestamp": meta_rfc3339,
"type":"event_msg",
"payload": {
"type":"user_message",
"message": preview,
"kind": "plain"
}
})
.to_string(),
);
fs::write(file_path, lines.join("\n") + "\n")?;
Ok(())
}

View File

@@ -13,4 +13,3 @@ mod send_message;
mod set_default_model;
mod user_agent;
mod user_info;
mod v2;

View File

@@ -6,9 +6,9 @@ use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::ListModelsParams;
use codex_app_server_protocol::ListModelsResponse;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::ModelListResponse;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_app_server_protocol::RequestId;
use codex_protocol::config_types::ReasoningEffort;
@@ -27,8 +27,8 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ModelListParams {
limit: Some(100),
.send_list_models_request(ListModelsParams {
page_size: Some(100),
cursor: None,
})
.await?;
@@ -39,17 +39,14 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
)
.await??;
let ModelListResponse {
data: items,
next_cursor,
} = to_response::<ModelListResponse>(response)?;
let ListModelsResponse { items, next_cursor } = to_response::<ListModelsResponse>(response)?;
let expected_models = vec![
Model {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for codex.".to_string(),
description: "Optimized for coding tasks with many tools.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
@@ -114,8 +111,8 @@ async fn list_models_pagination_works() -> Result<()> {
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let first_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
.send_list_models_request(ListModelsParams {
page_size: Some(1),
cursor: None,
})
.await?;
@@ -126,18 +123,18 @@ async fn list_models_pagination_works() -> Result<()> {
)
.await??;
let ModelListResponse {
data: first_items,
let ListModelsResponse {
items: first_items,
next_cursor: first_cursor,
} = to_response::<ModelListResponse>(first_response)?;
} = to_response::<ListModelsResponse>(first_response)?;
assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, "gpt-5-codex");
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
let second_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
.send_list_models_request(ListModelsParams {
page_size: Some(1),
cursor: Some(next_cursor.clone()),
})
.await?;
@@ -148,10 +145,10 @@ async fn list_models_pagination_works() -> Result<()> {
)
.await??;
let ModelListResponse {
data: second_items,
let ListModelsResponse {
items: second_items,
next_cursor: second_cursor,
} = to_response::<ModelListResponse>(second_response)?;
} = to_response::<ListModelsResponse>(second_response)?;
assert_eq!(second_items.len(), 1);
assert_eq!(second_items[0].id, "gpt-5");
@@ -167,8 +164,8 @@ async fn list_models_rejects_invalid_cursor() -> Result<()> {
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ModelListParams {
limit: None,
.send_list_models_request(ListModelsParams {
page_size: None,
cursor: Some("invalid".to_string()),
})
.await?;

View File

@@ -7,10 +7,10 @@ use codex_app_server_protocol::GetAccountRateLimitsResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::RateLimitSnapshot;
use codex_app_server_protocol::RateLimitWindow;
use codex_app_server_protocol::RequestId;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::path::Path;
@@ -143,13 +143,13 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
let expected = GetAccountRateLimitsResponse {
rate_limits: RateLimitSnapshot {
primary: Some(RateLimitWindow {
used_percent: 42,
window_duration_mins: Some(60),
used_percent: 42.0,
window_minutes: Some(60),
resets_at: Some(primary_reset_timestamp),
}),
secondary: Some(RateLimitWindow {
used_percent: 5,
window_duration_mins: Some(1440),
used_percent: 5.0,
window_minutes: Some(1440),
resets_at: Some(secondary_reset_timestamp),
}),
},

View File

@@ -313,11 +313,10 @@ fn assert_instructions_message(item: &ResponseItem) {
ResponseItem::Message { role, content, .. } => {
assert_eq!(role, "user");
let texts = content_texts(content);
let is_instructions = texts
.iter()
.any(|text| text.starts_with("# AGENTS.md instructions for "));
assert!(
is_instructions,
texts
.iter()
.any(|text| text.contains("<user_instructions>")),
"expected instructions message, got {texts:?}"
);
}

View File

@@ -1,309 +0,0 @@
use anyhow::Result;
use anyhow::bail;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::CancelLoginAccountParams;
use codex_app_server_protocol::CancelLoginAccountResponse;
use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::GetAuthStatusResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginAccountResponse;
use codex_app_server_protocol::LogoutAccountResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerNotification;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_login::login_with_api_key;
use pretty_assertions::assert_eq;
use serial_test::serial;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
// Helper to create a minimal config.toml for the app server
fn create_config_toml(
codex_home: &Path,
forced_method: Option<&str>,
forced_workspace_id: Option<&str>,
) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
let forced_line = if let Some(method) = forced_method {
format!("forced_login_method = \"{method}\"\n")
} else {
String::new()
};
let forced_workspace_line = if let Some(ws) = forced_workspace_id {
format!("forced_chatgpt_workspace_id = \"{ws}\"\n")
} else {
String::new()
};
let contents = format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
{forced_line}
{forced_workspace_line}
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "http://127.0.0.1:0/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
);
std::fs::write(config_toml, contents)
}
#[tokio::test]
async fn logout_account_removes_auth_and_notifies() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), None, None)?;
login_with_api_key(
codex_home.path(),
"sk-test-key",
AuthCredentialsStoreMode::File,
)?;
assert!(codex_home.path().join("auth.json").exists());
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let id = mcp.send_logout_account_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(id)),
)
.await??;
let _ok: LogoutAccountResponse = to_response(resp)?;
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountUpdated(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
assert!(
payload.auth_mode.is_none(),
"auth_method should be None after logout"
);
assert!(
!codex_home.path().join("auth.json").exists(),
"auth.json should be deleted"
);
let status_id = mcp
.send_get_auth_status_request(GetAuthStatusParams {
include_token: Some(true),
refresh_token: Some(false),
})
.await?;
let status_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(status_id)),
)
.await??;
let status: GetAuthStatusResponse = to_response(status_resp)?;
assert_eq!(status.auth_method, None);
assert_eq!(status.auth_token, None);
Ok(())
}
#[tokio::test]
async fn login_account_api_key_succeeds_and_notifies() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), None, None)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let req_id = mcp
.send_login_account_api_key_request("sk-test-key")
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let login: LoginAccountResponse = to_response(resp)?;
assert_eq!(login, LoginAccountResponse::ApiKey {});
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/login/completed"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountLoginCompleted(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
pretty_assertions::assert_eq!(payload.login_id, None);
pretty_assertions::assert_eq!(payload.success, true);
pretty_assertions::assert_eq!(payload.error, None);
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountUpdated(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
pretty_assertions::assert_eq!(payload.auth_mode, Some(AuthMode::ApiKey));
assert!(codex_home.path().join("auth.json").exists());
Ok(())
}
#[tokio::test]
async fn login_account_api_key_rejected_when_forced_chatgpt() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), Some("chatgpt"), None)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_login_account_api_key_request("sk-test-key")
.await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(
err.error.message,
"API key login is disabled. Use ChatGPT login instead."
);
Ok(())
}
#[tokio::test]
async fn login_account_chatgpt_rejected_when_forced_api() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), Some("api"), None)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_login_account_chatgpt_request().await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(
err.error.message,
"ChatGPT login is disabled. Use API key login instead."
);
Ok(())
}
#[tokio::test]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn login_account_chatgpt_start() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), None, None)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_login_account_chatgpt_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let login: LoginAccountResponse = to_response(resp)?;
let LoginAccountResponse::Chatgpt { login_id, auth_url } = login else {
bail!("unexpected login response: {login:?}");
};
assert!(
auth_url.contains("redirect_uri=http%3A%2F%2Flocalhost"),
"auth_url should contain a redirect_uri to localhost"
);
let cancel_id = mcp
.send_cancel_login_account_request(CancelLoginAccountParams {
login_id: login_id.clone(),
})
.await?;
let cancel_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
)
.await??;
let _ok: CancelLoginAccountResponse = to_response(cancel_resp)?;
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/login/completed"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountLoginCompleted(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
pretty_assertions::assert_eq!(payload.login_id, Some(login_id));
pretty_assertions::assert_eq!(payload.success, false);
assert!(
payload.error.is_some(),
"expected a non-empty error on cancel"
);
let maybe_updated = timeout(
Duration::from_millis(500),
mcp.read_stream_until_notification_message("account/updated"),
)
.await;
assert!(
maybe_updated.is_err(),
"account/updated should not be emitted when login is cancelled"
);
Ok(())
}
#[tokio::test]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn login_account_chatgpt_includes_forced_workspace_query_param() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), None, Some("ws-forced"))?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_login_account_chatgpt_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let login: LoginAccountResponse = to_response(resp)?;
let LoginAccountResponse::Chatgpt { auth_url, .. } = login else {
bail!("unexpected login response: {login:?}");
};
assert!(
auth_url.contains("allowed_workspace_id=ws-forced"),
"auth URL should include forced workspace"
);
Ok(())
}

View File

@@ -1,7 +0,0 @@
mod account;
mod thread_archive;
mod thread_list;
mod thread_resume;
mod thread_start;
mod turn_interrupt;
mod turn_start;

View File

@@ -1,93 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadArchiveResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
use codex_core::find_conversation_path_by_id_str;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_archive_moves_rollout_into_archived_directory() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a thread.
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(start_resp)?;
assert!(!thread.id.is_empty());
// Locate the rollout path recorded for this thread id.
let rollout_path = find_conversation_path_by_id_str(codex_home.path(), &thread.id)
.await?
.expect("expected rollout path for thread id to exist");
assert!(
rollout_path.exists(),
"expected {} to exist",
rollout_path.display()
);
// Archive the thread.
let archive_id = mcp
.send_thread_archive_request(ThreadArchiveParams {
thread_id: thread.id.clone(),
})
.await?;
let archive_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(archive_id)),
)
.await??;
let _: ThreadArchiveResponse = to_response::<ThreadArchiveResponse>(archive_resp)?;
// Verify file moved.
let archived_directory = codex_home.path().join(ARCHIVED_SESSIONS_SUBDIR);
// The archived file keeps the original filename (rollout-...-<id>.jsonl).
let archived_rollout_path =
archived_directory.join(rollout_path.file_name().expect("rollout file name"));
assert!(
!rollout_path.exists(),
"expected rollout path {} to be moved",
rollout_path.display()
);
assert!(
archived_rollout_path.exists(),
"expected archived rollout path {} to exist",
archived_rollout_path.display()
);
Ok(())
}
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(config_toml, config_contents())
}
fn config_contents() -> &'static str {
r#"model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
"#
}

View File

@@ -1,205 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadListResponse;
use serde_json::json;
use tempfile::TempDir;
use tokio::time::timeout;
use uuid::Uuid;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_list_basic_empty() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// List threads in an empty CODEX_HOME; should return an empty page with nextCursor: null.
let list_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(10),
model_providers: None,
})
.await?;
let list_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await??;
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(list_resp)?;
assert!(data.is_empty());
assert_eq!(next_cursor, None);
Ok(())
}
// Minimal config.toml for listing.
fn create_minimal_config(codex_home: &std::path::Path) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
r#"
model = "mock-model"
approval_policy = "never"
"#,
)
}
#[tokio::test]
async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
// Create three rollouts so we can paginate with limit=2.
let _a = create_fake_rollout(
codex_home.path(),
"2025-01-02T12-00-00",
"2025-01-02T12:00:00Z",
"Hello",
Some("mock_provider"),
)?;
let _b = create_fake_rollout(
codex_home.path(),
"2025-01-01T13-00-00",
"2025-01-01T13:00:00Z",
"Hello",
Some("mock_provider"),
)?;
let _c = create_fake_rollout(
codex_home.path(),
"2025-01-01T12-00-00",
"2025-01-01T12:00:00Z",
"Hello",
Some("mock_provider"),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Page 1: limit 2 → expect next_cursor Some.
let page1_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(2),
model_providers: Some(vec!["mock_provider".to_string()]),
})
.await?;
let page1_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(page1_id)),
)
.await??;
let ThreadListResponse {
data: data1,
next_cursor: cursor1,
} = to_response::<ThreadListResponse>(page1_resp)?;
assert_eq!(data1.len(), 2);
let cursor1 = cursor1.expect("expected nextCursor on first page");
// Page 2: with cursor → expect next_cursor None when no more results.
let page2_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: Some(cursor1),
limit: Some(2),
model_providers: Some(vec!["mock_provider".to_string()]),
})
.await?;
let page2_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(page2_id)),
)
.await??;
let ThreadListResponse {
data: data2,
next_cursor: cursor2,
} = to_response::<ThreadListResponse>(page2_resp)?;
assert!(data2.len() <= 2);
assert_eq!(cursor2, None, "expected nextCursor to be null on last page");
Ok(())
}
#[tokio::test]
async fn thread_list_respects_provider_filter() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
// Create rollouts under two providers.
let _a = create_fake_rollout(
codex_home.path(),
"2025-01-02T10-00-00",
"2025-01-02T10:00:00Z",
"X",
Some("mock_provider"),
)?; // mock_provider
// one with a different provider
let uuid = Uuid::new_v4();
let dir = codex_home
.path()
.join("sessions")
.join("2025")
.join("01")
.join("02");
std::fs::create_dir_all(&dir)?;
let file_path = dir.join(format!("rollout-2025-01-02T11-00-00-{uuid}.jsonl"));
let lines = [
json!({
"timestamp": "2025-01-02T11:00:00Z",
"type": "session_meta",
"payload": {
"id": uuid,
"timestamp": "2025-01-02T11:00:00Z",
"cwd": "/",
"originator": "codex",
"cli_version": "0.0.0",
"instructions": null,
"source": "vscode",
"model_provider": "other_provider"
}
})
.to_string(),
json!({
"timestamp": "2025-01-02T11:00:00Z",
"type":"response_item",
"payload": {"type":"message","role":"user","content":[{"type":"input_text","text":"X"}]}
})
.to_string(),
json!({
"timestamp": "2025-01-02T11:00:00Z",
"type":"event_msg",
"payload": {"type":"user_message","message":"X","kind":"plain"}
})
.to_string(),
];
std::fs::write(file_path, lines.join("\n") + "\n")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Filter to only other_provider; expect 1 item, nextCursor None.
let list_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(10),
model_providers: Some(vec!["other_provider".to_string()]),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await??;
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(resp)?;
assert_eq!(data.len(), 1);
assert_eq!(next_cursor, None);
Ok(())
}

View File

@@ -1,79 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadResumeResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_resume_returns_existing_thread() -> Result<()> {
let server = create_mock_chat_completions_server(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a thread.
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5-codex".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(start_resp)?;
// Resume it via v2 API.
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: thread.id.clone(),
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let ThreadResumeResponse { thread: resumed } =
to_response::<ThreadResumeResponse>(resume_resp)?;
assert_eq!(resumed.id, thread.id);
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,81 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadStartedNotification;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_start_creates_thread_and_emits_started() -> Result<()> {
// Provide a mock server and config so model wiring is valid.
let server = create_mock_chat_completions_server(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
// Start server and initialize.
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a v2 thread with an explicit model override.
let req_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5".to_string()),
..Default::default()
})
.await?;
// Expect a proper JSON-RPC response with a thread id.
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(resp)?;
assert!(!thread.id.is_empty(), "thread id should not be empty");
// A corresponding thread/started notification should arrive.
let notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("thread/started"),
)
.await??;
let started: ThreadStartedNotification =
serde_json::from_value(notif.params.expect("params must be present"))?;
assert_eq!(started.thread.id, thread.id);
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,128 +0,0 @@
#![cfg(unix)]
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_shell_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnInterruptResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn turn_interrupt_aborts_running_turn() -> Result<()> {
// Use a portable sleep command to keep the turn running.
#[cfg(target_os = "windows")]
let shell_command = vec![
"powershell".to_string(),
"-Command".to_string(),
"Start-Sleep -Seconds 10".to_string(),
];
#[cfg(not(target_os = "windows"))]
let shell_command = vec!["sleep".to_string(), "10".to_string()];
let tmp = TempDir::new()?;
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
let working_directory = tmp.path().join("workdir");
std::fs::create_dir(&working_directory)?;
// Mock server: long-running shell command then (after abort) nothing else needed.
let server = create_mock_chat_completions_server(vec![create_shell_sse_response(
shell_command.clone(),
Some(&working_directory),
Some(10_000),
"call_sleep",
)?])
.await;
create_config_toml(&codex_home, &server.uri())?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a v2 thread and capture its id.
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn that triggers a long-running command.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run sleep".to_string(),
}],
cwd: Some(working_directory.clone()),
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
// Give the command a brief moment to start.
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
// Interrupt the in-progress turn by id (v2 API).
let interrupt_id = mcp
.send_turn_interrupt_request(TurnInterruptParams {
thread_id: thread.id,
turn_id: turn.id,
})
.await?;
let interrupt_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(interrupt_id)),
)
.await??;
let _resp: TurnInterruptResponse = to_response::<TurnInterruptResponse>(interrupt_resp)?;
// No fields to assert on; successful deserialization confirms proper response shape.
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "workspace-write"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,486 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_mock_chat_completions_server_unchecked;
use app_test_support::create_shell_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStartedNotification;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::protocol_config_types::ReasoningEffort;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<()> {
// Provide a mock server and config so model wiring is valid.
// Three Codex turns hit the mock model (session start + two turn/start calls).
let responses = vec![
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
];
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a thread (v2) and capture its id.
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn with only input and thread_id set (no overrides).
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
assert!(!turn.id.is_empty());
// Expect a turn/started notification.
let notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/started"),
)
.await??;
let started: TurnStartedNotification =
serde_json::from_value(notif.params.expect("params must be present"))?;
assert_eq!(
started.turn.status,
codex_app_server_protocol::TurnStatus::InProgress
);
// Send a second turn that exercises the overrides path: change the model.
let turn_req2 = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Second".to_string(),
}],
model: Some("mock-model-override".to_string()),
..Default::default()
})
.await?;
let turn_resp2: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req2)),
)
.await??;
let TurnStartResponse { turn: turn2 } = to_response::<TurnStartResponse>(turn_resp2)?;
assert!(!turn2.id.is_empty());
// Ensure the second turn has a different id than the first.
assert_ne!(turn.id, turn2.id);
// Expect a second turn/started notification as well.
let _notif2: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/started"),
)
.await??;
// And we should ultimately get a task_complete without having to add a
// legacy conversation listener explicitly (auto-attached by thread/start).
let _task_complete: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_local_image_input() -> Result<()> {
// Two Codex turns hit the mock model (session start + turn/start).
let responses = vec![
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
];
// Use the unchecked variant because the request payload includes a LocalImage
// which the strict matcher does not currently cover.
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(thread_resp)?;
let image_path = codex_home.path().join("image.png");
// No need to actually write the file; we just exercise the input path.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::LocalImage { path: image_path }],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
assert!(!turn.id.is_empty());
// This test only validates that turn/start responds and returns a turn.
Ok(())
}
#[tokio::test]
async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let tmp = TempDir::new()?;
let codex_home = tmp.path().to_path_buf();
// Mock server: first turn requests a shell call (elicitation), then completes.
// Second turn same, but we'll set approval_policy=never to avoid elicitation.
let responses = vec![
create_shell_sse_response(
vec![
"python3".to_string(),
"-c".to_string(),
"print(42)".to_string(),
],
None,
Some(5000),
"call1",
)?,
create_final_assistant_message_sse_response("done 1")?,
create_shell_sse_response(
vec![
"python3".to_string(),
"-c".to_string(),
"print(42)".to_string(),
],
None,
Some(5000),
"call2",
)?,
create_final_assistant_message_sse_response("done 2")?,
];
let server = create_mock_chat_completions_server(responses).await;
// Default approval is untrusted to force elicitation on first turn.
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// thread/start
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(start_resp)?;
// turn/start — expect ExecCommandApproval request from server
let first_turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run python".to_string(),
}],
..Default::default()
})
.await?;
// Acknowledge RPC
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_turn_id)),
)
.await??;
// Receive elicitation
let server_req = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ExecCommandApproval { request_id, params } = server_req else {
panic!("expected ExecCommandApproval request");
};
assert_eq!(params.call_id, "call1");
assert_eq!(
params.parsed_cmd,
vec![ParsedCommand::Unknown {
cmd: "python3 -c 'print(42)'".to_string()
}]
);
// Approve and wait for task completion
mcp.send_response(
request_id,
serde_json::json!({ "decision": codex_core::protocol::ReviewDecision::Approved }),
)
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
// Second turn with approval_policy=never should not elicit approval
let second_turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run python again".to_string(),
}],
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::DangerFullAccess),
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_turn_id)),
)
.await??;
// Ensure we do NOT receive an ExecCommandApproval request before task completes
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
#[tokio::test]
async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
// When returning Result from a test, pass an Ok(()) to the skip macro
// so the early return type matches. The no-arg form returns unit.
skip_if_no_network!(Ok(()));
let tmp = TempDir::new()?;
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
let workspace_root = tmp.path().join("workspace");
std::fs::create_dir(&workspace_root)?;
let first_cwd = workspace_root.join("turn1");
let second_cwd = workspace_root.join("turn2");
std::fs::create_dir(&first_cwd)?;
std::fs::create_dir(&second_cwd)?;
let responses = vec![
create_shell_sse_response(
vec![
"bash".to_string(),
"-lc".to_string(),
"echo first turn".to_string(),
],
None,
Some(5000),
"call-first",
)?,
create_final_assistant_message_sse_response("done first")?,
create_shell_sse_response(
vec![
"bash".to_string(),
"-lc".to_string(),
"echo second turn".to_string(),
],
None,
Some(5000),
"call-second",
)?,
create_final_assistant_message_sse_response("done second")?,
];
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// thread/start
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(start_resp)?;
// first turn with workspace-write sandbox and first_cwd
let first_turn = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "first turn".to_string(),
}],
cwd: Some(first_cwd.clone()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::WorkspaceWrite {
writable_roots: vec![first_cwd.clone()],
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,
}),
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_turn)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
// second turn with workspace-write and second_cwd, ensure exec begins in second_cwd
let second_turn = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "second turn".to_string(),
}],
cwd: Some(second_cwd.clone()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::DangerFullAccess),
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_turn)),
)
.await??;
let exec_begin_notification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/exec_command_begin"),
)
.await??;
let params = exec_begin_notification
.params
.clone()
.expect("exec_command_begin params");
let event: Event = serde_json::from_value(params).expect("deserialize exec begin event");
let exec_begin = match event.msg {
EventMsg::ExecCommandBegin(exec_begin) => exec_begin,
other => panic!("expected ExecCommandBegin event, got {other:?}"),
};
assert_eq!(exec_begin.cwd, second_cwd);
assert_eq!(
exec_begin.command,
vec![
"bash".to_string(),
"-lc".to_string(),
"echo second turn".to_string()
]
);
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(
codex_home: &Path,
server_uri: &str,
approval_policy: &str,
) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "{approval_policy}"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -39,7 +39,6 @@ ctor = { workspace = true }
owo-colors = { workspace = true }
serde_json = { workspace = true }
supports-color = { workspace = true }
toml = { workspace = true }
tokio = { workspace = true, features = [
"io-std",
"macros",
@@ -48,9 +47,6 @@ tokio = { workspace = true, features = [
"signal",
] }
[target.'cfg(target_os = "windows")'.dependencies]
codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows-sandbox-rs" }
[dev-dependencies]
assert_cmd = { workspace = true }
assert_matches = { workspace = true }

View File

@@ -11,7 +11,6 @@ use codex_protocol::config_types::SandboxMode;
use crate::LandlockCommand;
use crate::SeatbeltCommand;
use crate::WindowsCommand;
use crate::exit_status::handle_exit_status;
pub async fn run_command_under_seatbelt(
@@ -52,29 +51,9 @@ pub async fn run_command_under_landlock(
.await
}
pub async fn run_command_under_windows(
command: WindowsCommand,
codex_linux_sandbox_exe: Option<PathBuf>,
) -> anyhow::Result<()> {
let WindowsCommand {
full_auto,
config_overrides,
command,
} = command;
run_command_under_sandbox(
full_auto,
command,
config_overrides,
codex_linux_sandbox_exe,
SandboxType::Windows,
)
.await
}
enum SandboxType {
Seatbelt,
Landlock,
Windows,
}
async fn run_command_under_sandbox(
@@ -108,65 +87,6 @@ async fn run_command_under_sandbox(
let stdio_policy = StdioPolicy::Inherit;
let env = create_env(&config.shell_environment_policy);
// Special-case Windows sandbox: execute and exit the process to emulate inherited stdio.
if let SandboxType::Windows = sandbox_type {
#[cfg(target_os = "windows")]
{
use codex_windows_sandbox::run_windows_sandbox_capture;
let policy_str = match &config.sandbox_policy {
codex_core::protocol::SandboxPolicy::DangerFullAccess => "workspace-write",
codex_core::protocol::SandboxPolicy::ReadOnly => "read-only",
codex_core::protocol::SandboxPolicy::WorkspaceWrite { .. } => "workspace-write",
};
let sandbox_cwd = sandbox_policy_cwd.clone();
let cwd_clone = cwd.clone();
let env_map = env.clone();
let command_vec = command.clone();
let base_dir = config.codex_home.clone();
let res = tokio::task::spawn_blocking(move || {
run_windows_sandbox_capture(
policy_str,
&sandbox_cwd,
command_vec,
&cwd_clone,
env_map,
None,
Some(base_dir.as_path()),
)
})
.await;
let capture = match res {
Ok(Ok(v)) => v,
Ok(Err(err)) => {
eprintln!("windows sandbox failed: {err}");
std::process::exit(1);
}
Err(join_err) => {
eprintln!("windows sandbox join error: {join_err}");
std::process::exit(1);
}
};
if !capture.stdout.is_empty() {
use std::io::Write;
let _ = std::io::stdout().write_all(&capture.stdout);
}
if !capture.stderr.is_empty() {
use std::io::Write;
let _ = std::io::stderr().write_all(&capture.stderr);
}
std::process::exit(capture.exit_code);
}
#[cfg(not(target_os = "windows"))]
{
anyhow::bail!("Windows sandbox is only available on Windows");
}
}
let mut child = match sandbox_type {
SandboxType::Seatbelt => {
spawn_command_under_seatbelt(
@@ -195,9 +115,6 @@ async fn run_command_under_sandbox(
)
.await?
}
SandboxType::Windows => {
unreachable!("Windows sandbox should have been handled above");
}
};
let status = child.wait().await?;

View File

@@ -32,17 +32,3 @@ pub struct LandlockCommand {
#[arg(trailing_var_arg = true)]
pub command: Vec<String>,
}
#[derive(Debug, Parser)]
pub struct WindowsCommand {
/// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR)
#[arg(long = "full-auto", default_value_t = false)]
pub full_auto: bool,
#[clap(skip)]
pub config_overrides: CliConfigOverrides,
/// Full command args to run under Windows restricted token sandbox.
#[arg(trailing_var_arg = true)]
pub command: Vec<String>,
}

View File

@@ -7,7 +7,6 @@ use codex_chatgpt::apply_command::ApplyCommand;
use codex_chatgpt::apply_command::run_apply_command;
use codex_cli::LandlockCommand;
use codex_cli::SeatbeltCommand;
use codex_cli::WindowsCommand;
use codex_cli::login::read_api_key_from_stdin;
use codex_cli::login::run_login_status;
use codex_cli::login::run_login_with_api_key;
@@ -152,9 +151,6 @@ enum SandboxCommand {
/// Run a command under Landlock+seccomp (Linux only).
#[clap(visible_alias = "landlock")]
Linux(LandlockCommand),
/// Run a command under Windows restricted token (Windows only).
Windows(WindowsCommand),
}
#[derive(Debug, Parser)]
@@ -476,17 +472,6 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
)
.await?;
}
SandboxCommand::Windows(mut windows_cli) => {
prepend_config_flags(
&mut windows_cli.config_overrides,
root_config_overrides.clone(),
);
codex_cli::debug_sandbox::run_command_under_windows(
windows_cli,
codex_linux_sandbox_exe,
)
.await?;
}
},
Some(Subcommand::Apply(mut apply_cli)) => {
prepend_config_flags(
@@ -510,21 +495,15 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
Some(Subcommand::Features(FeaturesCli { sub })) => match sub {
FeaturesSubcommand::List => {
// Respect root-level `-c` overrides plus top-level flags like `--profile`.
let mut cli_kv_overrides = root_config_overrides
let cli_kv_overrides = root_config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
// Honor `--search` via the new feature toggle.
if interactive.web_search {
cli_kv_overrides.push((
"features.web_search_request".to_string(),
toml::Value::Boolean(true),
));
}
.map_err(|e| anyhow::anyhow!(e))?;
// Thread through relevant top-level flags (at minimum, `--profile`).
// Also honor `--search` since it maps to a feature toggle.
let overrides = ConfigOverrides {
config_profile: interactive.config_profile.clone(),
tools_web_search_request: interactive.web_search.then_some(true),
..Default::default()
};

View File

@@ -196,9 +196,7 @@ impl McpCli {
async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<()> {
// Validate any provided overrides even though they are not currently applied.
let overrides = config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
.await
.context("failed to load configuration")?;
@@ -312,9 +310,7 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
}
async fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) -> Result<()> {
config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
let RemoveArgs { name } = remove_args;
@@ -345,17 +341,13 @@ async fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveAr
}
async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs) -> Result<()> {
let overrides = config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
.await
.context("failed to load configuration")?;
if !config.features.enabled(Feature::RmcpClient) {
bail!(
"OAuth login is only supported when [features].rmcp_client is true in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details."
);
bail!("OAuth login is only supported when [feature].rmcp_client is true in config.toml.");
}
let LoginArgs { name, scopes } = login_args;
@@ -388,9 +380,7 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
}
async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutArgs) -> Result<()> {
let overrides = config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
.await
.context("failed to load configuration")?;
@@ -417,9 +407,7 @@ async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutAr
}
async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Result<()> {
let overrides = config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
.await
.context("failed to load configuration")?;
@@ -674,9 +662,7 @@ async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) ->
}
async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<()> {
let overrides = config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
.await
.context("failed to load configuration")?;

View File

@@ -22,6 +22,6 @@ chrono = { version = "0.4", features = ["serde"] }
diffy = "0.4.2"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
thiserror = "2.0.17"
thiserror = "2.0.12"
codex-backend-client = { path = "../backend-client", optional = true }
codex-git = { workspace = true }

View File

@@ -1044,7 +1044,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
// Close task modal/pending apply if present before opening env modal
app.diff_overlay = None;
app.env_modal = Some(app::EnvModalState { query: String::new(), selected: 0 });
// Cache environments while the modal is open to avoid repeated fetches.
// Cache environments until user explicitly refreshes with 'r' inside the modal.
let should_fetch = app.environments.is_empty();
if should_fetch {
app.env_loading = true;
@@ -1115,7 +1115,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
let _ = tx.send(evt);
});
} else {
app.status = "No environment selected".to_string();
app.status = "No environment selected (press 'e' to choose)".to_string();
}
}
needs_redraw = true;
@@ -1313,6 +1313,18 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
// Environment modal key handling
match key.code {
KeyCode::Esc => { app.env_modal = None; needs_redraw = true; }
KeyCode::Char('r') | KeyCode::Char('R') => {
// Trigger refresh of environments
app.env_loading = true; app.env_error = None; needs_redraw = true;
let _ = frame_tx.send(Instant::now() + Duration::from_millis(100));
let tx = tx.clone();
tokio::spawn(async move {
let base_url = crate::util::normalize_base_url(&std::env::var("CODEX_CLOUD_TASKS_BASE_URL").unwrap_or_else(|_| "https://chatgpt.com/backend-api".to_string()));
let headers = crate::util::build_chatgpt_headers().await;
let res = crate::env_detect::list_environments(&base_url, &headers).await;
let _ = tx.send(app::AppEvent::EnvironmentsLoaded(res));
});
}
KeyCode::Char(ch) if !key.modifiers.contains(KeyModifiers::CONTROL) && !key.modifiers.contains(KeyModifiers::ALT) => {
if let Some(m) = app.env_modal.as_mut() { m.query.push(ch); }
needs_redraw = true;
@@ -1419,7 +1431,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
}
KeyCode::Char('o') | KeyCode::Char('O') => {
app.env_modal = Some(app::EnvModalState { query: String::new(), selected: 0 });
// Cache environments while the modal is open to avoid repeated fetches.
// Cache environments until user explicitly refreshes with 'r' inside the modal.
let should_fetch = app.environments.is_empty();
if should_fetch { app.env_loading = true; app.env_error = None; }
needs_redraw = true;

View File

@@ -945,7 +945,9 @@ pub fn draw_env_modal(frame: &mut Frame, area: Rect, app: &mut App) {
// Subheader with usage hints (dim cyan)
let subheader = Paragraph::new(Line::from(
"Type to search, Enter select, Esc cancel".cyan().dim(),
"Type to search, Enter select, Esc cancel; r refresh"
.cyan()
.dim(),
))
.wrap(Wrap { trim: true });
frame.render_widget(subheader, rows[0]);

View File

@@ -34,7 +34,7 @@ const PRESETS: &[ModelPreset] = &[
id: "gpt-5-codex",
model: "gpt-5-codex",
display_name: "gpt-5-codex",
description: "Optimized for codex.",
description: "Optimized for coding tasks with many tools.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
@@ -52,24 +52,6 @@ const PRESETS: &[ModelPreset] = &[
],
is_default: true,
},
ModelPreset {
id: "gpt-5-codex-mini",
model: "gpt-5-codex-mini",
display_name: "gpt-5-codex-mini",
description: "Optimized for codex. Cheaper, faster, but less capable.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
},
ModelPreset {
id: "gpt-5",
model: "gpt-5",
@@ -98,13 +80,8 @@ const PRESETS: &[ModelPreset] = &[
},
];
pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
let allow_codex_mini = matches!(auth_mode, Some(AuthMode::ChatGPT));
PRESETS
.iter()
.filter(|preset| allow_codex_mini || preset.id != "gpt-5-codex-mini")
.copied()
.collect()
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS.to_vec()
}
#[cfg(test)]

View File

@@ -80,10 +80,9 @@ toml_edit = { workspace = true }
tracing = { workspace = true, features = ["log"] }
tree-sitter = { workspace = true }
tree-sitter-bash = { workspace = true }
uuid = { workspace = true, features = ["serde", "v4", "v5"] }
uuid = { workspace = true, features = ["serde", "v4"] }
which = { workspace = true }
wildmatch = { workspace = true }
codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows-sandbox-rs" }
[target.'cfg(target_os = "linux")'.dependencies]

View File

@@ -82,6 +82,6 @@ OUTPUT FORMAT:
* **Do not** wrap the JSON in markdown fences or extra prose.
* The code_location field is required and must include absolute_file_path and line_range.
* Line ranges must be as short as possible for interpreting the issue (avoid ranges over 510 lines; pick the most suitable subrange).
*Line ranges must be as short as possible for interpreting the issue (avoid ranges over 510 lines; pick the most suitable subrange).
* The code_location should overlap with the diff.
* Do not generate a PR fix.
* Do not generate a PR fix.

View File

@@ -1,14 +1,12 @@
mod storage;
use chrono::Utc;
use reqwest::StatusCode;
use serde::Deserialize;
use serde::Serialize;
#[cfg(test)]
use serial_test::serial;
use std::env;
use std::fmt::Debug;
use std::io::ErrorKind;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
@@ -24,14 +22,10 @@ use crate::auth::storage::AuthStorageBackend;
use crate::auth::storage::create_auth_storage;
use crate::config::Config;
use crate::default_client::CodexHttpClient;
use crate::error::RefreshTokenFailedError;
use crate::error::RefreshTokenFailedReason;
use crate::token_data::PlanType;
use crate::token_data::TokenData;
use crate::token_data::parse_id_token;
use crate::util::try_parse_error_message;
use serde_json::Value;
use thiserror::Error;
#[derive(Debug, Clone)]
pub struct CodexAuth {
@@ -52,54 +46,18 @@ impl PartialEq for CodexAuth {
// TODO(pakrym): use token exp field to check for expiration instead
const TOKEN_REFRESH_INTERVAL: i64 = 8;
const REFRESH_TOKEN_EXPIRED_MESSAGE: &str = "Your access token could not be refreshed because your refresh token has expired. Please log out and sign in again.";
const REFRESH_TOKEN_REUSED_MESSAGE: &str = "Your access token could not be refreshed because your refresh token was already used. Please log out and sign in again.";
const REFRESH_TOKEN_INVALIDATED_MESSAGE: &str = "Your access token could not be refreshed because your refresh token was revoked. Please log out and sign in again.";
const REFRESH_TOKEN_UNKNOWN_MESSAGE: &str =
"Your access token could not be refreshed. Please log out and sign in again.";
const REFRESH_TOKEN_URL: &str = "https://auth.openai.com/oauth/token";
pub const REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR: &str = "CODEX_REFRESH_TOKEN_URL_OVERRIDE";
#[derive(Debug, Error)]
pub enum RefreshTokenError {
#[error("{0}")]
Permanent(#[from] RefreshTokenFailedError),
#[error(transparent)]
Transient(#[from] std::io::Error),
}
impl RefreshTokenError {
pub fn failed_reason(&self) -> Option<RefreshTokenFailedReason> {
match self {
Self::Permanent(error) => Some(error.reason),
Self::Transient(_) => None,
}
}
fn other_with_message(message: impl Into<String>) -> Self {
Self::Transient(std::io::Error::other(message.into()))
}
}
impl From<RefreshTokenError> for std::io::Error {
fn from(err: RefreshTokenError) -> Self {
match err {
RefreshTokenError::Permanent(failed) => std::io::Error::other(failed),
RefreshTokenError::Transient(inner) => inner,
}
}
}
impl CodexAuth {
pub async fn refresh_token(&self) -> Result<String, RefreshTokenError> {
pub async fn refresh_token(&self) -> Result<String, std::io::Error> {
tracing::info!("Refreshing token");
let token_data = self.get_current_token_data().ok_or_else(|| {
RefreshTokenError::Transient(std::io::Error::other("Token data is not available."))
})?;
let token_data = self
.get_current_token_data()
.ok_or(std::io::Error::other("Token data is not available."))?;
let token = token_data.refresh_token;
let refresh_response = try_refresh_token(token, &self.client).await?;
let refresh_response = try_refresh_token(token, &self.client)
.await
.map_err(std::io::Error::other)?;
let updated = update_tokens(
&self.storage,
@@ -107,8 +65,7 @@ impl CodexAuth {
refresh_response.access_token,
refresh_response.refresh_token,
)
.await
.map_err(RefreshTokenError::from)?;
.await?;
if let Ok(mut auth_lock) = self.auth_dot_json.lock() {
*auth_lock = Some(updated.clone());
@@ -117,7 +74,7 @@ impl CodexAuth {
let access = match updated.tokens {
Some(t) => t.access_token,
None => {
return Err(RefreshTokenError::other_with_message(
return Err(std::io::Error::other(
"Token data is not available after refresh.",
));
}
@@ -142,21 +99,15 @@ impl CodexAuth {
..
}) => {
if last_refresh < Utc::now() - chrono::Duration::days(TOKEN_REFRESH_INTERVAL) {
let refresh_result = tokio::time::timeout(
let refresh_response = tokio::time::timeout(
Duration::from_secs(60),
try_refresh_token(tokens.refresh_token.clone(), &self.client),
)
.await;
let refresh_response = match refresh_result {
Ok(Ok(response)) => response,
Ok(Err(err)) => return Err(err.into()),
Err(_) => {
return Err(std::io::Error::new(
ErrorKind::TimedOut,
"timed out while refreshing OpenAI API key",
));
}
};
.await
.map_err(|_| {
std::io::Error::other("timed out while refreshing OpenAI API key")
})?
.map_err(std::io::Error::other)?;
let updated_auth_dot_json = update_tokens(
&self.storage,
@@ -474,7 +425,7 @@ async fn update_tokens(
async fn try_refresh_token(
refresh_token: String,
client: &CodexHttpClient,
) -> Result<RefreshResponse, RefreshTokenError> {
) -> std::io::Result<RefreshResponse> {
let refresh_request = RefreshRequest {
client_id: CLIENT_ID,
grant_type: "refresh_token",
@@ -482,93 +433,30 @@ async fn try_refresh_token(
scope: "openid profile email",
};
let endpoint = refresh_token_endpoint();
// Use shared client factory to include standard headers
let response = client
.post(endpoint.as_str())
.post("https://auth.openai.com/oauth/token")
.header("Content-Type", "application/json")
.json(&refresh_request)
.send()
.await
.map_err(|err| RefreshTokenError::Transient(std::io::Error::other(err)))?;
.map_err(std::io::Error::other)?;
let status = response.status();
if status.is_success() {
if response.status().is_success() {
let refresh_response = response
.json::<RefreshResponse>()
.await
.map_err(|err| RefreshTokenError::Transient(std::io::Error::other(err)))?;
.map_err(std::io::Error::other)?;
Ok(refresh_response)
} else {
let body = response.text().await.unwrap_or_default();
if status == StatusCode::UNAUTHORIZED {
let failed = classify_refresh_token_failure(&body);
Err(RefreshTokenError::Permanent(failed))
} else {
let message = try_parse_error_message(&body);
Err(RefreshTokenError::Transient(std::io::Error::other(
format!("Failed to refresh token: {status}: {message}"),
)))
}
Err(std::io::Error::other(format!(
"Failed to refresh token: {}: {}",
response.status(),
try_parse_error_message(&response.text().await.unwrap_or_default()),
)))
}
}
fn classify_refresh_token_failure(body: &str) -> RefreshTokenFailedError {
let code = extract_refresh_token_error_code(body);
let normalized_code = code.as_deref().map(str::to_ascii_lowercase);
let reason = match normalized_code.as_deref() {
Some("refresh_token_expired") => RefreshTokenFailedReason::Expired,
Some("refresh_token_reused") => RefreshTokenFailedReason::Exhausted,
Some("refresh_token_invalidated") => RefreshTokenFailedReason::Revoked,
_ => RefreshTokenFailedReason::Other,
};
if reason == RefreshTokenFailedReason::Other {
tracing::warn!(
backend_code = normalized_code.as_deref(),
backend_body = body,
"Encountered unknown 401 response while refreshing token"
);
}
let message = match reason {
RefreshTokenFailedReason::Expired => REFRESH_TOKEN_EXPIRED_MESSAGE.to_string(),
RefreshTokenFailedReason::Exhausted => REFRESH_TOKEN_REUSED_MESSAGE.to_string(),
RefreshTokenFailedReason::Revoked => REFRESH_TOKEN_INVALIDATED_MESSAGE.to_string(),
RefreshTokenFailedReason::Other => REFRESH_TOKEN_UNKNOWN_MESSAGE.to_string(),
};
RefreshTokenFailedError::new(reason, message)
}
fn extract_refresh_token_error_code(body: &str) -> Option<String> {
if body.trim().is_empty() {
return None;
}
let Value::Object(map) = serde_json::from_str::<Value>(body).ok()? else {
return None;
};
if let Some(error_value) = map.get("error") {
match error_value {
Value::Object(obj) => {
if let Some(code) = obj.get("code").and_then(Value::as_str) {
return Some(code.to_string());
}
}
Value::String(code) => {
return Some(code.to_string());
}
_ => {}
}
}
map.get("code").and_then(Value::as_str).map(str::to_string)
}
#[derive(Serialize)]
struct RefreshRequest {
client_id: &'static str,
@@ -587,11 +475,6 @@ struct RefreshResponse {
// Shared constant for token refresh (client id used for oauth token refresh flow)
pub const CLIENT_ID: &str = "app_EMoamEEZ73f0CkXaXp7hrann";
fn refresh_token_endpoint() -> String {
std::env::var(REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR)
.unwrap_or_else(|_| REFRESH_TOKEN_URL.to_string())
}
use std::sync::RwLock;
/// Internal cached auth state.
@@ -1082,9 +965,7 @@ impl AuthManager {
/// Attempt to refresh the current auth token (if any). On success, reload
/// the auth state from disk so other components observe refreshed token.
/// If the token refresh fails in a permanent (nontransient) way, logs out
/// to clear invalid auth state.
pub async fn refresh_token(&self) -> Result<Option<String>, RefreshTokenError> {
pub async fn refresh_token(&self) -> std::io::Result<Option<String>> {
let auth = match self.auth() {
Some(a) => a,
None => return Ok(None),

View File

@@ -31,7 +31,6 @@ use tracing::warn;
use crate::AuthManager;
use crate::auth::CodexAuth;
use crate::auth::RefreshTokenError;
use crate::chat_completions::AggregateStreamExt;
use crate::chat_completions::stream_chat_completions;
use crate::client_common::Prompt;
@@ -217,12 +216,10 @@ impl ModelClient {
let verbosity = if self.config.model_family.support_verbosity {
self.config.model_verbosity
} else {
if self.config.model_verbosity.is_some() {
warn!(
"model_verbosity is set but ignored as the model does not support verbosity: {}",
self.config.model_family.family
);
}
warn!(
"model_verbosity is set but ignored as the model does not support verbosity: {}",
self.config.model_family.family
);
None
};
@@ -390,17 +387,12 @@ impl ModelClient {
&& let Some(manager) = auth_manager.as_ref()
&& let Some(auth) = auth.as_ref()
&& auth.mode == AuthMode::ChatGPT
&& let Err(err) = manager.refresh_token().await
{
let stream_error = match err {
RefreshTokenError::Permanent(failed) => {
StreamAttemptError::Fatal(CodexErr::RefreshTokenFailed(failed))
}
RefreshTokenError::Transient(other) => {
StreamAttemptError::RetryableTransportError(CodexErr::Io(other))
}
};
return Err(stream_error);
manager.refresh_token().await.map_err(|err| {
StreamAttemptError::Fatal(CodexErr::Fatal(format!(
"Failed to refresh ChatGPT credentials: {err}"
)))
})?;
}
// The OpenAI Responses endpoint returns structured JSON bodies even for 4xx/5xx
@@ -680,33 +672,6 @@ fn parse_header_str<'a>(headers: &'a HeaderMap, name: &str) -> Option<&'a str> {
headers.get(name)?.to_str().ok()
}
async fn emit_completed(
tx_event: &mpsc::Sender<Result<ResponseEvent>>,
otel_event_manager: &OtelEventManager,
completed: ResponseCompleted,
) {
if let Some(token_usage) = &completed.usage {
otel_event_manager.sse_event_completed(
token_usage.input_tokens,
token_usage.output_tokens,
token_usage
.input_tokens_details
.as_ref()
.map(|d| d.cached_tokens),
token_usage
.output_tokens_details
.as_ref()
.map(|d| d.reasoning_tokens),
token_usage.total_tokens,
);
}
let event = ResponseEvent::Completed {
response_id: completed.id.clone(),
token_usage: completed.usage.map(Into::into),
};
let _ = tx_event.send(Ok(event)).await;
}
async fn process_sse<S>(
stream: S,
tx_event: mpsc::Sender<Result<ResponseEvent>>,
@@ -719,7 +684,7 @@ async fn process_sse<S>(
// If the stream stays completely silent for an extended period treat it as disconnected.
// The response id returned from the "complete" message.
let response_completed: Option<ResponseCompleted> = None;
let mut response_completed: Option<ResponseCompleted> = None;
let mut response_error: Option<CodexErr> = None;
loop {
@@ -738,8 +703,30 @@ async fn process_sse<S>(
}
Ok(None) => {
match response_completed {
Some(completed) => {
emit_completed(&tx_event, &otel_event_manager, completed).await
Some(ResponseCompleted {
id: response_id,
usage,
}) => {
if let Some(token_usage) = &usage {
otel_event_manager.sse_event_completed(
token_usage.input_tokens,
token_usage.output_tokens,
token_usage
.input_tokens_details
.as_ref()
.map(|d| d.cached_tokens),
token_usage
.output_tokens_details
.as_ref()
.map(|d| d.reasoning_tokens),
token_usage.total_tokens,
);
}
let event = ResponseEvent::Completed {
response_id,
token_usage: usage.map(Into::into),
};
let _ = tx_event.send(Ok(event)).await;
}
None => {
let error = response_error.unwrap_or(CodexErr::Stream(
@@ -869,8 +856,7 @@ async fn process_sse<S>(
if let Some(resp_val) = event.response {
match serde_json::from_value::<ResponseCompleted>(resp_val) {
Ok(r) => {
emit_completed(&tx_event, &otel_event_manager, r).await;
return;
response_completed = Some(r);
}
Err(e) => {
let error = format!("failed to parse ResponseCompleted: {e}");
@@ -943,10 +929,8 @@ async fn stream_from_fixture(
fn rate_limit_regex() -> &'static Regex {
static RE: OnceLock<Regex> = OnceLock::new();
// Match both OpenAI-style messages like "Please try again in 1.898s"
// and Azure OpenAI-style messages like "Try again in 35 seconds".
#[expect(clippy::unwrap_used)]
RE.get_or_init(|| Regex::new(r"(?i)try again in\s*(\d+(?:\.\d+)?)\s*(s|ms|seconds?)").unwrap())
RE.get_or_init(|| Regex::new(r"Please try again in (\d+(?:\.\d+)?)(s|ms)").unwrap())
}
fn try_parse_retry_after(err: &Error) -> Option<Duration> {
@@ -954,8 +938,7 @@ fn try_parse_retry_after(err: &Error) -> Option<Duration> {
return None;
}
// parse retry hints like "try again in 1.898s" or
// "Try again in 35 seconds" using regex
// parse the Please try again in 1.898s format using regex
let re = rate_limit_regex();
if let Some(message) = &err.message
&& let Some(captures) = re.captures(message)
@@ -965,9 +948,9 @@ fn try_parse_retry_after(err: &Error) -> Option<Duration> {
if let (Some(value), Some(unit)) = (seconds, unit) {
let value = value.as_str().parse::<f64>().ok()?;
let unit = unit.as_str().to_ascii_lowercase();
let unit = unit.as_str();
if unit == "s" || unit.starts_with("second") {
if unit == "s" {
return Some(Duration::from_secs_f64(value));
} else if unit == "ms" {
return Some(Duration::from_millis(value as u64));
@@ -1442,19 +1425,6 @@ mod tests {
assert_eq!(delay, Some(Duration::from_secs_f64(1.898)));
}
#[test]
fn test_try_parse_retry_after_azure() {
let err = Error {
r#type: None,
message: Some("Rate limit exceeded. Try again in 35 seconds.".to_string()),
code: Some("rate_limit_exceeded".to_string()),
plan_type: None,
resets_at: None,
};
let delay = try_parse_retry_after(&err);
assert_eq!(delay, Some(Duration::from_secs(35)));
}
#[test]
fn error_response_deserializes_schema_known_plan_type_and_serializes_back() {
use crate::token_data::KnownPlan;

View File

@@ -58,7 +58,7 @@ use crate::client_common::ResponseEvent;
use crate::config::Config;
use crate::config::types::McpServerTransportConfig;
use crate::config::types::ShellEnvironmentPolicy;
use crate::context_manager::ContextManager;
use crate::conversation_history::ConversationHistory;
use crate::environment_context::EnvironmentContext;
use crate::error::CodexErr;
use crate::error::Result as CodexResult;
@@ -116,6 +116,7 @@ use crate::user_instructions::DeveloperInstructions;
use crate::user_instructions::UserInstructions;
use crate::user_notification::UserNotification;
use crate::util::backoff;
use chrono::Local;
use codex_async_utils::OrCancelExt;
use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
@@ -267,6 +268,7 @@ pub(crate) struct TurnContext {
/// the model as well as sandbox policies are resolved against this path
/// instead of `std::env::current_dir()`.
pub(crate) cwd: PathBuf,
pub(crate) local_date_with_timezone: Option<String>,
pub(crate) developer_instructions: Option<String>,
pub(crate) base_instructions: Option<String>,
pub(crate) compact_prompt: Option<String>,
@@ -423,6 +425,7 @@ impl Session {
sub_id,
client,
cwd: session_configuration.cwd.clone(),
local_date_with_timezone: Some(Local::now().format("%Y-%m-%d %:z").to_string()),
developer_instructions: session_configuration.developer_instructions.clone(),
base_instructions: session_configuration.base_instructions.clone(),
compact_prompt: session_configuration.compact_prompt.clone(),
@@ -553,7 +556,7 @@ impl Session {
None
} else {
Some(format!(
"Enable it with `--enable {canonical}` or `[features].{canonical}` in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details."
"You can either enable it using the CLI with `--enable {canonical}` or through the config.toml file with `[features].{canonical}`"
))
};
post_session_configured_events.push(Event {
@@ -945,7 +948,7 @@ impl Session {
turn_context: &TurnContext,
rollout_items: &[RolloutItem],
) -> Vec<ResponseItem> {
let mut history = ContextManager::new();
let mut history = ConversationHistory::new();
for item in rollout_items {
match item {
RolloutItem::ResponseItem(response_item) => {
@@ -1003,16 +1006,11 @@ impl Session {
items.push(DeveloperInstructions::new(developer_instructions.to_string()).into());
}
if let Some(user_instructions) = turn_context.user_instructions.as_deref() {
items.push(
UserInstructions {
text: user_instructions.to_string(),
directory: turn_context.cwd.to_string_lossy().into_owned(),
}
.into(),
);
items.push(UserInstructions::new(user_instructions.to_string()).into());
}
items.push(ResponseItem::from(EnvironmentContext::new(
Some(turn_context.cwd.clone()),
turn_context.local_date_with_timezone.clone(),
Some(turn_context.approval_policy),
Some(turn_context.sandbox_policy.clone()),
Some(self.user_shell().clone()),
@@ -1032,7 +1030,7 @@ impl Session {
}
}
pub(crate) async fn clone_history(&self) -> ContextManager {
pub(crate) async fn clone_history(&self) -> ConversationHistory {
let state = self.state.lock().await;
state.clone_history()
}
@@ -1698,6 +1696,7 @@ async fn spawn_review_thread(
sandbox_policy: parent_turn_context.sandbox_policy.clone(),
shell_environment_policy: parent_turn_context.shell_environment_policy.clone(),
cwd: parent_turn_context.cwd.clone(),
local_date_with_timezone: parent_turn_context.local_date_with_timezone.clone(),
final_output_json_schema: None,
codex_linux_sandbox_exe: parent_turn_context.codex_linux_sandbox_exe.clone(),
tool_call_gate: Arc::new(ReadinessFlag::new()),
@@ -1773,14 +1772,19 @@ pub(crate) async fn run_task(
sess.clone_history().await.get_history_for_prompt()
};
let turn_input_messages = turn_input
let turn_input_messages: Vec<String> = turn_input
.iter()
.filter_map(|item| match parse_turn_item(item) {
Some(TurnItem::UserMessage(user_message)) => Some(user_message),
.filter_map(|item| match item {
ResponseItem::Message { content, .. } => Some(content),
_ => None,
})
.map(|user_message| user_message.message())
.collect::<Vec<String>>();
.flat_map(|content| {
content.iter().filter_map(|item| match item {
ContentItem::OutputText { text } => Some(text.clone()),
_ => None,
})
})
.collect();
match run_turn(
Arc::clone(&sess),
Arc::clone(&turn_context),
@@ -1928,7 +1932,6 @@ async fn run_turn(
return Err(CodexErr::UsageLimitReached(e));
}
Err(CodexErr::UsageNotIncluded) => return Err(CodexErr::UsageNotIncluded),
Err(e @ CodexErr::RefreshTokenFailed(_)) => return Err(e),
Err(e) => {
// Use the configured provider-specific stream retry budget.
let max_retries = turn_context.client.get_provider().stream_max_retries();
@@ -1947,7 +1950,7 @@ async fn run_turn(
// at a seemingly frozen screen.
sess.notify_stream_error(
&turn_context,
format!("Reconnecting... {retries}/{max_retries}"),
format!("Re-connecting... {retries}/{max_retries}"),
)
.await;
@@ -2835,7 +2838,7 @@ mod tests {
turn_context: &TurnContext,
) -> (Vec<RolloutItem>, Vec<ResponseItem>) {
let mut rollout_items = Vec::new();
let mut live_history = ContextManager::new();
let mut live_history = ConversationHistory::new();
let initial_context = session.build_initial_context(turn_context);
for item in &initial_context {

View File

@@ -13,9 +13,9 @@ use crate::protocol::ErrorEvent;
use crate::protocol::EventMsg;
use crate::protocol::TaskStartedEvent;
use crate::protocol::TurnContextItem;
use crate::protocol::WarningEvent;
use crate::truncate::truncate_middle;
use crate::util::backoff;
use askama::Template;
use codex_protocol::items::TurnItem;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseInputItem;
@@ -28,6 +28,13 @@ use tracing::error;
pub const SUMMARIZATION_PROMPT: &str = include_str!("../../templates/compact/prompt.md");
const COMPACT_USER_MESSAGE_MAX_TOKENS: usize = 20_000;
#[derive(Template)]
#[template(path = "compact/history_bridge.md", escape = "none")]
struct HistoryBridgeTemplate<'a> {
user_messages_text: &'a str,
summary_text: &'a str,
}
pub(crate) async fn run_inline_auto_compact_task(
sess: Arc<Session>,
turn_context: Arc<TurnContext>,
@@ -142,7 +149,6 @@ async fn run_compact_task_inner(
let history_snapshot = sess.clone_history().await.get_history();
let summary_text = get_last_assistant_message_from_turn(&history_snapshot).unwrap_or_default();
let user_messages = collect_user_messages(&history_snapshot);
let initial_context = sess.build_initial_context(turn_context.as_ref());
let mut new_history = build_compacted_history(initial_context, &user_messages, &summary_text);
let ghost_snapshots: Vec<ResponseItem> = history_snapshot
@@ -162,11 +168,6 @@ async fn run_compact_task_inner(
message: "Compact task completed".to_string(),
});
sess.send_event(&turn_context, event).await;
let warning = EventMsg::Warning(WarningEvent {
message: "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start new a new conversation when possible to keep conversations small and targeted.".to_string(),
});
sess.send_event(&turn_context, warning).await;
}
pub fn content_items_to_text(content: &[ContentItem]) -> Option<String> {
@@ -217,47 +218,33 @@ fn build_compacted_history_with_limit(
summary_text: &str,
max_bytes: usize,
) -> Vec<ResponseItem> {
let mut selected_messages: Vec<String> = Vec::new();
if max_bytes > 0 {
let mut remaining = max_bytes;
for message in user_messages.iter().rev() {
if remaining == 0 {
break;
}
if message.len() <= remaining {
selected_messages.push(message.clone());
remaining = remaining.saturating_sub(message.len());
} else {
let (truncated, _) = truncate_middle(message, remaining);
selected_messages.push(truncated);
break;
}
}
selected_messages.reverse();
let mut user_messages_text = if user_messages.is_empty() {
"(none)".to_string()
} else {
user_messages.join("\n\n")
};
// Truncate the concatenated prior user messages so the bridge message
// stays well under the context window (approx. 4 bytes/token).
if user_messages_text.len() > max_bytes {
user_messages_text = truncate_middle(&user_messages_text, max_bytes).0;
}
for message in &selected_messages {
history.push(ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: message.clone(),
}],
});
}
let summary_text = if summary_text.is_empty() {
"(no summary available)".to_string()
} else {
summary_text.to_string()
};
let Ok(bridge) = HistoryBridgeTemplate {
user_messages_text: &user_messages_text,
summary_text: &summary_text,
}
.render() else {
return vec![];
};
history.push(ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText { text: summary_text }],
content: vec![ContentItem::InputText { text: bridge }],
});
history
}
@@ -360,8 +347,7 @@ mod tests {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "# AGENTS.md instructions for project\n\n<INSTRUCTIONS>\ndo things\n</INSTRUCTIONS>"
.to_string(),
text: "<user_instructions>do things</user_instructions>".to_string(),
}],
},
ResponseItem::Message {
@@ -397,55 +383,30 @@ mod tests {
"SUMMARY",
max_bytes,
);
assert_eq!(history.len(), 2);
let truncated_message = &history[0];
let summary_message = &history[1];
// Expect exactly one bridge message added to history (plus any initial context we provided, which is none).
assert_eq!(history.len(), 1);
let truncated_text = match truncated_message {
// Extract the text content of the bridge message.
let bridge_text = match &history[0] {
ResponseItem::Message { role, content, .. } if role == "user" => {
content_items_to_text(content).unwrap_or_default()
}
other => panic!("unexpected item in history: {other:?}"),
};
// The bridge should contain the truncation marker and not the full original payload.
assert!(
truncated_text.contains("tokens truncated"),
"expected truncation marker in truncated user message"
bridge_text.contains("tokens truncated"),
"expected truncation marker in bridge message"
);
assert!(
!truncated_text.contains(&big),
"truncated user message should not include the full oversized user text"
!bridge_text.contains(&big),
"bridge should not include the full oversized user text"
);
let summary_text = match summary_message {
ResponseItem::Message { role, content, .. } if role == "user" => {
content_items_to_text(content).unwrap_or_default()
}
other => panic!("unexpected item in history: {other:?}"),
};
assert_eq!(summary_text, "SUMMARY");
}
#[test]
fn build_compacted_history_appends_summary_message() {
let initial_context: Vec<ResponseItem> = Vec::new();
let user_messages = vec!["first user message".to_string()];
let summary_text = "summary text";
let history = build_compacted_history(initial_context, &user_messages, summary_text);
assert!(
!history.is_empty(),
"expected compacted history to include summary"
bridge_text.contains("SUMMARY"),
"bridge should include the provided summary text"
);
let last = history.last().expect("history should have a summary entry");
let summary = match last {
ResponseItem::Message { role, content, .. } if role == "user" => {
content_items_to_text(content).unwrap_or_default()
}
other => panic!("expected summary message, found {other:?}"),
};
assert_eq!(summary, summary_text);
}
}

View File

@@ -158,11 +158,6 @@ async fn forward_events(
) {
while let Ok(event) = codex.next_event().await {
match event {
// ignore all legacy delta events
Event {
id: _,
msg: EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoningDelta(_),
} => continue,
Event {
id: _,
msg: EventMsg::SessionConfigured(_),

View File

@@ -769,8 +769,6 @@ impl ConfigToml {
let mut forced_auto_mode_downgraded_on_windows = false;
if cfg!(target_os = "windows")
&& matches!(resolved_sandbox_mode, SandboxMode::WorkspaceWrite)
// If the experimental Windows sandbox is enabled, do not force a downgrade.
&& crate::safety::get_platform_sandbox().is_none()
{
sandbox_policy = SandboxPolicy::new_read_only_policy();
forced_auto_mode_downgraded_on_windows = true;
@@ -902,10 +900,6 @@ impl Config {
};
let features = Features::from_config(&cfg, &config_profile, feature_overrides);
#[cfg(target_os = "windows")]
{
crate::safety::set_windows_sandbox_enabled(features.enabled(Feature::WindowsSandbox));
}
let resolved_cwd = {
use std::env;

View File

@@ -1,174 +0,0 @@
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::TokenUsage;
use codex_protocol::protocol::TokenUsageInfo;
use std::ops::Deref;
use crate::context_manager::normalize;
use crate::context_manager::truncate::format_output_for_model_body;
use crate::context_manager::truncate::globally_truncate_function_output_items;
/// Transcript of conversation history
#[derive(Debug, Clone, Default)]
pub(crate) struct ContextManager {
/// The oldest items are at the beginning of the vector.
items: Vec<ResponseItem>,
token_info: Option<TokenUsageInfo>,
}
impl ContextManager {
pub(crate) fn new() -> Self {
Self {
items: Vec::new(),
token_info: TokenUsageInfo::new_or_append(&None, &None, None),
}
}
pub(crate) fn token_info(&self) -> Option<TokenUsageInfo> {
self.token_info.clone()
}
pub(crate) fn set_token_usage_full(&mut self, context_window: i64) {
match &mut self.token_info {
Some(info) => info.fill_to_context_window(context_window),
None => {
self.token_info = Some(TokenUsageInfo::full_context_window(context_window));
}
}
}
/// `items` is ordered from oldest to newest.
pub(crate) fn record_items<I>(&mut self, items: I)
where
I: IntoIterator,
I::Item: std::ops::Deref<Target = ResponseItem>,
{
for item in items {
let item_ref = item.deref();
let is_ghost_snapshot = matches!(item_ref, ResponseItem::GhostSnapshot { .. });
if !is_api_message(item_ref) && !is_ghost_snapshot {
continue;
}
let processed = Self::process_item(&item);
self.items.push(processed);
}
}
pub(crate) fn get_history(&mut self) -> Vec<ResponseItem> {
self.normalize_history();
self.contents()
}
// Returns the history prepared for sending to the model.
// With extra response items filtered out and GhostCommits removed.
pub(crate) fn get_history_for_prompt(&mut self) -> Vec<ResponseItem> {
let mut history = self.get_history();
Self::remove_ghost_snapshots(&mut history);
history
}
pub(crate) fn remove_first_item(&mut self) {
if !self.items.is_empty() {
// Remove the oldest item (front of the list). Items are ordered from
// oldest → newest, so index 0 is the first entry recorded.
let removed = self.items.remove(0);
// If the removed item participates in a call/output pair, also remove
// its corresponding counterpart to keep the invariants intact without
// running a full normalization pass.
normalize::remove_corresponding_for(&mut self.items, &removed);
}
}
pub(crate) fn replace(&mut self, items: Vec<ResponseItem>) {
self.items = items;
}
pub(crate) fn update_token_info(
&mut self,
usage: &TokenUsage,
model_context_window: Option<i64>,
) {
self.token_info = TokenUsageInfo::new_or_append(
&self.token_info,
&Some(usage.clone()),
model_context_window,
);
}
/// This function enforces a couple of invariants on the in-memory history:
/// 1. every call (function/custom) has a corresponding output entry
/// 2. every output has a corresponding call entry
fn normalize_history(&mut self) {
// all function/tool calls must have a corresponding output
normalize::ensure_call_outputs_present(&mut self.items);
// all outputs must have a corresponding function/tool call
normalize::remove_orphan_outputs(&mut self.items);
}
/// Returns a clone of the contents in the transcript.
fn contents(&self) -> Vec<ResponseItem> {
self.items.clone()
}
fn remove_ghost_snapshots(items: &mut Vec<ResponseItem>) {
items.retain(|item| !matches!(item, ResponseItem::GhostSnapshot { .. }));
}
fn process_item(item: &ResponseItem) -> ResponseItem {
match item {
ResponseItem::FunctionCallOutput { call_id, output } => {
let truncated = format_output_for_model_body(output.content.as_str());
let truncated_items = output
.content_items
.as_ref()
.map(|items| globally_truncate_function_output_items(items));
ResponseItem::FunctionCallOutput {
call_id: call_id.clone(),
output: FunctionCallOutputPayload {
content: truncated,
content_items: truncated_items,
success: output.success,
},
}
}
ResponseItem::CustomToolCallOutput { call_id, output } => {
let truncated = format_output_for_model_body(output);
ResponseItem::CustomToolCallOutput {
call_id: call_id.clone(),
output: truncated,
}
}
ResponseItem::Message { .. }
| ResponseItem::Reasoning { .. }
| ResponseItem::LocalShellCall { .. }
| ResponseItem::FunctionCall { .. }
| ResponseItem::WebSearchCall { .. }
| ResponseItem::CustomToolCall { .. }
| ResponseItem::GhostSnapshot { .. }
| ResponseItem::Other => item.clone(),
}
}
}
/// API messages include every non-system item (user/assistant messages, reasoning,
/// tool calls, tool outputs, shell calls, and web-search calls).
fn is_api_message(message: &ResponseItem) -> bool {
match message {
ResponseItem::Message { role, .. } => role.as_str() != "system",
ResponseItem::FunctionCallOutput { .. }
| ResponseItem::FunctionCall { .. }
| ResponseItem::CustomToolCall { .. }
| ResponseItem::CustomToolCallOutput { .. }
| ResponseItem::LocalShellCall { .. }
| ResponseItem::Reasoning { .. }
| ResponseItem::WebSearchCall { .. } => true,
ResponseItem::GhostSnapshot { .. } => false,
ResponseItem::Other => false,
}
}
#[cfg(test)]
#[path = "history_tests.rs"]
mod tests;

View File

@@ -1,841 +0,0 @@
use super::*;
use crate::context_manager::truncate;
use codex_git::GhostCommit;
use codex_protocol::models::ContentItem;
use codex_protocol::models::FunctionCallOutputContentItem;
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::LocalShellAction;
use codex_protocol::models::LocalShellExecAction;
use codex_protocol::models::LocalShellStatus;
use codex_protocol::models::ReasoningItemContent;
use codex_protocol::models::ReasoningItemReasoningSummary;
use pretty_assertions::assert_eq;
use regex_lite::Regex;
fn assistant_msg(text: &str) -> ResponseItem {
ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: text.to_string(),
}],
}
}
fn create_history_with_items(items: Vec<ResponseItem>) -> ContextManager {
let mut h = ContextManager::new();
h.record_items(items.iter());
h
}
fn user_msg(text: &str) -> ResponseItem {
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::OutputText {
text: text.to_string(),
}],
}
}
fn reasoning_msg(text: &str) -> ResponseItem {
ResponseItem::Reasoning {
id: String::new(),
summary: vec![ReasoningItemReasoningSummary::SummaryText {
text: "summary".to_string(),
}],
content: Some(vec![ReasoningItemContent::ReasoningText {
text: text.to_string(),
}]),
encrypted_content: None,
}
}
#[test]
fn filters_non_api_messages() {
let mut h = ContextManager::default();
// System message is not API messages; Other is ignored.
let system = ResponseItem::Message {
id: None,
role: "system".to_string(),
content: vec![ContentItem::OutputText {
text: "ignored".to_string(),
}],
};
let reasoning = reasoning_msg("thinking...");
h.record_items([&system, &reasoning, &ResponseItem::Other]);
// User and assistant should be retained.
let u = user_msg("hi");
let a = assistant_msg("hello");
h.record_items([&u, &a]);
let items = h.contents();
assert_eq!(
items,
vec![
ResponseItem::Reasoning {
id: String::new(),
summary: vec![ReasoningItemReasoningSummary::SummaryText {
text: "summary".to_string(),
}],
content: Some(vec![ReasoningItemContent::ReasoningText {
text: "thinking...".to_string(),
}]),
encrypted_content: None,
},
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::OutputText {
text: "hi".to_string()
}]
},
ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: "hello".to_string()
}]
}
]
);
}
#[test]
fn get_history_for_prompt_drops_ghost_commits() {
let items = vec![ResponseItem::GhostSnapshot {
ghost_commit: GhostCommit::new("ghost-1".to_string(), None, Vec::new(), Vec::new()),
}];
let mut history = create_history_with_items(items);
let filtered = history.get_history_for_prompt();
assert_eq!(filtered, vec![]);
}
#[test]
fn remove_first_item_removes_matching_output_for_function_call() {
let items = vec![
ResponseItem::FunctionCall {
id: None,
name: "do_it".to_string(),
arguments: "{}".to_string(),
call_id: "call-1".to_string(),
},
ResponseItem::FunctionCallOutput {
call_id: "call-1".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
},
];
let mut h = create_history_with_items(items);
h.remove_first_item();
assert_eq!(h.contents(), vec![]);
}
#[test]
fn remove_first_item_removes_matching_call_for_output() {
let items = vec![
ResponseItem::FunctionCallOutput {
call_id: "call-2".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
},
ResponseItem::FunctionCall {
id: None,
name: "do_it".to_string(),
arguments: "{}".to_string(),
call_id: "call-2".to_string(),
},
];
let mut h = create_history_with_items(items);
h.remove_first_item();
assert_eq!(h.contents(), vec![]);
}
#[test]
fn remove_first_item_handles_local_shell_pair() {
let items = vec![
ResponseItem::LocalShellCall {
id: None,
call_id: Some("call-3".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string(), "hi".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
},
ResponseItem::FunctionCallOutput {
call_id: "call-3".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
},
];
let mut h = create_history_with_items(items);
h.remove_first_item();
assert_eq!(h.contents(), vec![]);
}
#[test]
fn remove_first_item_handles_custom_tool_pair() {
let items = vec![
ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "tool-1".to_string(),
name: "my_tool".to_string(),
input: "{}".to_string(),
},
ResponseItem::CustomToolCallOutput {
call_id: "tool-1".to_string(),
output: "ok".to_string(),
},
];
let mut h = create_history_with_items(items);
h.remove_first_item();
assert_eq!(h.contents(), vec![]);
}
#[test]
fn normalization_retains_local_shell_outputs() {
let items = vec![
ResponseItem::LocalShellCall {
id: None,
call_id: Some("shell-1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string(), "hi".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
},
ResponseItem::FunctionCallOutput {
call_id: "shell-1".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
},
];
let mut history = create_history_with_items(items.clone());
let normalized = history.get_history();
assert_eq!(normalized, items);
}
#[test]
fn record_items_truncates_function_call_output_content() {
let mut history = ContextManager::new();
let long_line = "a very long line to trigger truncation\n";
let long_output = long_line.repeat(2_500);
let item = ResponseItem::FunctionCallOutput {
call_id: "call-100".to_string(),
output: FunctionCallOutputPayload {
content: long_output.clone(),
success: Some(true),
..Default::default()
},
};
history.record_items([&item]);
assert_eq!(history.items.len(), 1);
match &history.items[0] {
ResponseItem::FunctionCallOutput { output, .. } => {
assert_ne!(output.content, long_output);
assert!(
output.content.starts_with("Total output lines:"),
"expected truncated summary, got {}",
output.content
);
}
other => panic!("unexpected history item: {other:?}"),
}
}
#[test]
fn record_items_truncates_custom_tool_call_output_content() {
let mut history = ContextManager::new();
let line = "custom output that is very long\n";
let long_output = line.repeat(2_500);
let item = ResponseItem::CustomToolCallOutput {
call_id: "tool-200".to_string(),
output: long_output.clone(),
};
history.record_items([&item]);
assert_eq!(history.items.len(), 1);
match &history.items[0] {
ResponseItem::CustomToolCallOutput { output, .. } => {
assert_ne!(output, &long_output);
assert!(
output.starts_with("Total output lines:"),
"expected truncated summary, got {output}"
);
}
other => panic!("unexpected history item: {other:?}"),
}
}
fn assert_truncated_message_matches(message: &str, line: &str, total_lines: usize) {
let pattern = truncated_message_pattern(line, total_lines);
let regex = Regex::new(&pattern).unwrap_or_else(|err| {
panic!("failed to compile regex {pattern}: {err}");
});
let captures = regex
.captures(message)
.unwrap_or_else(|| panic!("message failed to match pattern {pattern}: {message}"));
let body = captures
.name("body")
.expect("missing body capture")
.as_str();
assert!(
body.len() <= truncate::MODEL_FORMAT_MAX_BYTES,
"body exceeds byte limit: {} bytes",
body.len()
);
}
fn truncated_message_pattern(line: &str, total_lines: usize) -> String {
let head_take = truncate::MODEL_FORMAT_HEAD_LINES.min(total_lines);
let tail_take = truncate::MODEL_FORMAT_TAIL_LINES.min(total_lines.saturating_sub(head_take));
let omitted = total_lines.saturating_sub(head_take + tail_take);
let escaped_line = regex_lite::escape(line);
if omitted == 0 {
return format!(
r"(?s)^Total output lines: {total_lines}\n\n(?P<body>{escaped_line}.*\n\[\.{{3}} output truncated to fit {max_bytes} bytes \.{{3}}]\n\n.*)$",
max_bytes = truncate::MODEL_FORMAT_MAX_BYTES,
);
}
format!(
r"(?s)^Total output lines: {total_lines}\n\n(?P<body>{escaped_line}.*\n\[\.{{3}} omitted {omitted} of {total_lines} lines \.{{3}}]\n\n.*)$",
)
}
#[test]
fn format_exec_output_truncates_large_error() {
let line = "very long execution error line that should trigger truncation\n";
let large_error = line.repeat(2_500); // way beyond both byte and line limits
let truncated = truncate::format_output_for_model_body(&large_error);
let total_lines = large_error.lines().count();
assert_truncated_message_matches(&truncated, line, total_lines);
assert_ne!(truncated, large_error);
}
#[test]
fn format_exec_output_marks_byte_truncation_without_omitted_lines() {
let long_line = "a".repeat(truncate::MODEL_FORMAT_MAX_BYTES + 50);
let truncated = truncate::format_output_for_model_body(&long_line);
assert_ne!(truncated, long_line);
let marker_line = format!(
"[... output truncated to fit {} bytes ...]",
truncate::MODEL_FORMAT_MAX_BYTES
);
assert!(
truncated.contains(&marker_line),
"missing byte truncation marker: {truncated}"
);
assert!(
!truncated.contains("omitted"),
"line omission marker should not appear when no lines were dropped: {truncated}"
);
}
#[test]
fn format_exec_output_returns_original_when_within_limits() {
let content = "example output\n".repeat(10);
assert_eq!(truncate::format_output_for_model_body(&content), content);
}
#[test]
fn format_exec_output_reports_omitted_lines_and_keeps_head_and_tail() {
let total_lines = truncate::MODEL_FORMAT_MAX_LINES + 100;
let content: String = (0..total_lines)
.map(|idx| format!("line-{idx}\n"))
.collect();
let truncated = truncate::format_output_for_model_body(&content);
let omitted = total_lines - truncate::MODEL_FORMAT_MAX_LINES;
let expected_marker = format!("[... omitted {omitted} of {total_lines} lines ...]");
assert!(
truncated.contains(&expected_marker),
"missing omitted marker: {truncated}"
);
assert!(
truncated.contains("line-0\n"),
"expected head line to remain: {truncated}"
);
let last_line = format!("line-{}\n", total_lines - 1);
assert!(
truncated.contains(&last_line),
"expected tail line to remain: {truncated}"
);
}
#[test]
fn format_exec_output_prefers_line_marker_when_both_limits_exceeded() {
let total_lines = truncate::MODEL_FORMAT_MAX_LINES + 42;
let long_line = "x".repeat(256);
let content: String = (0..total_lines)
.map(|idx| format!("line-{idx}-{long_line}\n"))
.collect();
let truncated = truncate::format_output_for_model_body(&content);
assert!(
truncated.contains("[... omitted 42 of 298 lines ...]"),
"expected omitted marker when line count exceeds limit: {truncated}"
);
assert!(
!truncated.contains("output truncated to fit"),
"line omission marker should take precedence over byte marker: {truncated}"
);
}
#[test]
fn truncates_across_multiple_under_limit_texts_and_reports_omitted() {
// Arrange: several text items, none exceeding per-item limit, but total exceeds budget.
let budget = truncate::MODEL_FORMAT_MAX_BYTES;
let t1_len = (budget / 2).saturating_sub(10);
let t2_len = (budget / 2).saturating_sub(10);
let remaining_after_t1_t2 = budget.saturating_sub(t1_len + t2_len);
let t3_len = 50; // gets truncated to remaining_after_t1_t2
let t4_len = 5; // omitted
let t5_len = 7; // omitted
let t1 = "a".repeat(t1_len);
let t2 = "b".repeat(t2_len);
let t3 = "c".repeat(t3_len);
let t4 = "d".repeat(t4_len);
let t5 = "e".repeat(t5_len);
let item = ResponseItem::FunctionCallOutput {
call_id: "call-omit".to_string(),
output: FunctionCallOutputPayload {
content: "irrelevant".to_string(),
content_items: Some(vec![
FunctionCallOutputContentItem::InputText { text: t1 },
FunctionCallOutputContentItem::InputText { text: t2 },
FunctionCallOutputContentItem::InputImage {
image_url: "img:mid".to_string(),
},
FunctionCallOutputContentItem::InputText { text: t3 },
FunctionCallOutputContentItem::InputText { text: t4 },
FunctionCallOutputContentItem::InputText { text: t5 },
]),
success: Some(true),
},
};
let mut history = ContextManager::new();
history.record_items([&item]);
assert_eq!(history.items.len(), 1);
let json = serde_json::to_value(&history.items[0]).expect("serialize to json");
let output = json
.get("output")
.expect("output field")
.as_array()
.expect("array output");
// Expect: t1 (full), t2 (full), image, t3 (truncated), summary mentioning 2 omitted.
assert_eq!(output.len(), 5);
let first = output[0].as_object().expect("first obj");
assert_eq!(first.get("type").unwrap(), "input_text");
let first_text = first.get("text").unwrap().as_str().unwrap();
assert_eq!(first_text.len(), t1_len);
let second = output[1].as_object().expect("second obj");
assert_eq!(second.get("type").unwrap(), "input_text");
let second_text = second.get("text").unwrap().as_str().unwrap();
assert_eq!(second_text.len(), t2_len);
assert_eq!(
output[2],
serde_json::json!({"type": "input_image", "image_url": "img:mid"})
);
let fourth = output[3].as_object().expect("fourth obj");
assert_eq!(fourth.get("type").unwrap(), "input_text");
let fourth_text = fourth.get("text").unwrap().as_str().unwrap();
assert_eq!(fourth_text.len(), remaining_after_t1_t2);
let summary = output[4].as_object().expect("summary obj");
assert_eq!(summary.get("type").unwrap(), "input_text");
let summary_text = summary.get("text").unwrap().as_str().unwrap();
assert!(summary_text.contains("omitted 2 text items"));
}
//TODO(aibrahim): run CI in release mode.
#[cfg(not(debug_assertions))]
#[test]
fn normalize_adds_missing_output_for_function_call() {
let items = vec![ResponseItem::FunctionCall {
id: None,
name: "do_it".to_string(),
arguments: "{}".to_string(),
call_id: "call-x".to_string(),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
assert_eq!(
h.contents(),
vec![
ResponseItem::FunctionCall {
id: None,
name: "do_it".to_string(),
arguments: "{}".to_string(),
call_id: "call-x".to_string(),
},
ResponseItem::FunctionCallOutput {
call_id: "call-x".to_string(),
output: FunctionCallOutputPayload {
content: "aborted".to_string(),
..Default::default()
},
},
]
);
}
#[cfg(not(debug_assertions))]
#[test]
fn normalize_adds_missing_output_for_custom_tool_call() {
let items = vec![ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "tool-x".to_string(),
name: "custom".to_string(),
input: "{}".to_string(),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
assert_eq!(
h.contents(),
vec![
ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "tool-x".to_string(),
name: "custom".to_string(),
input: "{}".to_string(),
},
ResponseItem::CustomToolCallOutput {
call_id: "tool-x".to_string(),
output: "aborted".to_string(),
},
]
);
}
#[cfg(not(debug_assertions))]
#[test]
fn normalize_adds_missing_output_for_local_shell_call_with_id() {
let items = vec![ResponseItem::LocalShellCall {
id: None,
call_id: Some("shell-1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string(), "hi".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
assert_eq!(
h.contents(),
vec![
ResponseItem::LocalShellCall {
id: None,
call_id: Some("shell-1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string(), "hi".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
},
ResponseItem::FunctionCallOutput {
call_id: "shell-1".to_string(),
output: FunctionCallOutputPayload {
content: "aborted".to_string(),
..Default::default()
},
},
]
);
}
#[cfg(not(debug_assertions))]
#[test]
fn normalize_removes_orphan_function_call_output() {
let items = vec![ResponseItem::FunctionCallOutput {
call_id: "orphan-1".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
}];
let mut h = create_history_with_items(items);
h.normalize_history();
assert_eq!(h.contents(), vec![]);
}
#[cfg(not(debug_assertions))]
#[test]
fn normalize_removes_orphan_custom_tool_call_output() {
let items = vec![ResponseItem::CustomToolCallOutput {
call_id: "orphan-2".to_string(),
output: "ok".to_string(),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
assert_eq!(h.contents(), vec![]);
}
#[cfg(not(debug_assertions))]
#[test]
fn normalize_mixed_inserts_and_removals() {
let items = vec![
// Will get an inserted output
ResponseItem::FunctionCall {
id: None,
name: "f1".to_string(),
arguments: "{}".to_string(),
call_id: "c1".to_string(),
},
// Orphan output that should be removed
ResponseItem::FunctionCallOutput {
call_id: "c2".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
},
// Will get an inserted custom tool output
ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "t1".to_string(),
name: "tool".to_string(),
input: "{}".to_string(),
},
// Local shell call also gets an inserted function call output
ResponseItem::LocalShellCall {
id: None,
call_id: Some("s1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
},
];
let mut h = create_history_with_items(items);
h.normalize_history();
assert_eq!(
h.contents(),
vec![
ResponseItem::FunctionCall {
id: None,
name: "f1".to_string(),
arguments: "{}".to_string(),
call_id: "c1".to_string(),
},
ResponseItem::FunctionCallOutput {
call_id: "c1".to_string(),
output: FunctionCallOutputPayload {
content: "aborted".to_string(),
..Default::default()
},
},
ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "t1".to_string(),
name: "tool".to_string(),
input: "{}".to_string(),
},
ResponseItem::CustomToolCallOutput {
call_id: "t1".to_string(),
output: "aborted".to_string(),
},
ResponseItem::LocalShellCall {
id: None,
call_id: Some("s1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
},
ResponseItem::FunctionCallOutput {
call_id: "s1".to_string(),
output: FunctionCallOutputPayload {
content: "aborted".to_string(),
..Default::default()
},
},
]
);
}
// In debug builds we panic on normalization errors instead of silently fixing them.
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn normalize_adds_missing_output_for_function_call_panics_in_debug() {
let items = vec![ResponseItem::FunctionCall {
id: None,
name: "do_it".to_string(),
arguments: "{}".to_string(),
call_id: "call-x".to_string(),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn normalize_adds_missing_output_for_custom_tool_call_panics_in_debug() {
let items = vec![ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "tool-x".to_string(),
name: "custom".to_string(),
input: "{}".to_string(),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn normalize_adds_missing_output_for_local_shell_call_with_id_panics_in_debug() {
let items = vec![ResponseItem::LocalShellCall {
id: None,
call_id: Some("shell-1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string(), "hi".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn normalize_removes_orphan_function_call_output_panics_in_debug() {
let items = vec![ResponseItem::FunctionCallOutput {
call_id: "orphan-1".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
}];
let mut h = create_history_with_items(items);
h.normalize_history();
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn normalize_removes_orphan_custom_tool_call_output_panics_in_debug() {
let items = vec![ResponseItem::CustomToolCallOutput {
call_id: "orphan-2".to_string(),
output: "ok".to_string(),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn normalize_mixed_inserts_and_removals_panics_in_debug() {
let items = vec![
ResponseItem::FunctionCall {
id: None,
name: "f1".to_string(),
arguments: "{}".to_string(),
call_id: "c1".to_string(),
},
ResponseItem::FunctionCallOutput {
call_id: "c2".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
},
ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "t1".to_string(),
name: "tool".to_string(),
input: "{}".to_string(),
},
ResponseItem::LocalShellCall {
id: None,
call_id: Some("s1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
},
];
let mut h = create_history_with_items(items);
h.normalize_history();
}

View File

@@ -1,6 +0,0 @@
mod history;
mod normalize;
mod truncate;
pub(crate) use history::ContextManager;
pub(crate) use truncate::format_output_for_model_body;

View File

@@ -1,213 +0,0 @@
use std::collections::HashSet;
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::ResponseItem;
use crate::util::error_or_panic;
pub(crate) fn ensure_call_outputs_present(items: &mut Vec<ResponseItem>) {
// Collect synthetic outputs to insert immediately after their calls.
// Store the insertion position (index of call) alongside the item so
// we can insert in reverse order and avoid index shifting.
let mut missing_outputs_to_insert: Vec<(usize, ResponseItem)> = Vec::new();
for (idx, item) in items.iter().enumerate() {
match item {
ResponseItem::FunctionCall { call_id, .. } => {
let has_output = items.iter().any(|i| match i {
ResponseItem::FunctionCallOutput {
call_id: existing, ..
} => existing == call_id,
_ => false,
});
if !has_output {
error_or_panic(format!(
"Function call output is missing for call id: {call_id}"
));
missing_outputs_to_insert.push((
idx,
ResponseItem::FunctionCallOutput {
call_id: call_id.clone(),
output: FunctionCallOutputPayload {
content: "aborted".to_string(),
..Default::default()
},
},
));
}
}
ResponseItem::CustomToolCall { call_id, .. } => {
let has_output = items.iter().any(|i| match i {
ResponseItem::CustomToolCallOutput {
call_id: existing, ..
} => existing == call_id,
_ => false,
});
if !has_output {
error_or_panic(format!(
"Custom tool call output is missing for call id: {call_id}"
));
missing_outputs_to_insert.push((
idx,
ResponseItem::CustomToolCallOutput {
call_id: call_id.clone(),
output: "aborted".to_string(),
},
));
}
}
// LocalShellCall is represented in upstream streams by a FunctionCallOutput
ResponseItem::LocalShellCall { call_id, .. } => {
if let Some(call_id) = call_id.as_ref() {
let has_output = items.iter().any(|i| match i {
ResponseItem::FunctionCallOutput {
call_id: existing, ..
} => existing == call_id,
_ => false,
});
if !has_output {
error_or_panic(format!(
"Local shell call output is missing for call id: {call_id}"
));
missing_outputs_to_insert.push((
idx,
ResponseItem::FunctionCallOutput {
call_id: call_id.clone(),
output: FunctionCallOutputPayload {
content: "aborted".to_string(),
..Default::default()
},
},
));
}
}
}
_ => {}
}
}
// Insert synthetic outputs in reverse index order to avoid re-indexing.
for (idx, output_item) in missing_outputs_to_insert.into_iter().rev() {
items.insert(idx + 1, output_item);
}
}
pub(crate) fn remove_orphan_outputs(items: &mut Vec<ResponseItem>) {
let function_call_ids: HashSet<String> = items
.iter()
.filter_map(|i| match i {
ResponseItem::FunctionCall { call_id, .. } => Some(call_id.clone()),
_ => None,
})
.collect();
let local_shell_call_ids: HashSet<String> = items
.iter()
.filter_map(|i| match i {
ResponseItem::LocalShellCall {
call_id: Some(call_id),
..
} => Some(call_id.clone()),
_ => None,
})
.collect();
let custom_tool_call_ids: HashSet<String> = items
.iter()
.filter_map(|i| match i {
ResponseItem::CustomToolCall { call_id, .. } => Some(call_id.clone()),
_ => None,
})
.collect();
items.retain(|item| match item {
ResponseItem::FunctionCallOutput { call_id, .. } => {
let has_match =
function_call_ids.contains(call_id) || local_shell_call_ids.contains(call_id);
if !has_match {
error_or_panic(format!(
"Orphan function call output for call id: {call_id}"
));
}
has_match
}
ResponseItem::CustomToolCallOutput { call_id, .. } => {
let has_match = custom_tool_call_ids.contains(call_id);
if !has_match {
error_or_panic(format!(
"Orphan custom tool call output for call id: {call_id}"
));
}
has_match
}
_ => true,
});
}
pub(crate) fn remove_corresponding_for(items: &mut Vec<ResponseItem>, item: &ResponseItem) {
match item {
ResponseItem::FunctionCall { call_id, .. } => {
remove_first_matching(items, |i| {
matches!(
i,
ResponseItem::FunctionCallOutput {
call_id: existing, ..
} if existing == call_id
)
});
}
ResponseItem::FunctionCallOutput { call_id, .. } => {
if let Some(pos) = items.iter().position(|i| {
matches!(i, ResponseItem::FunctionCall { call_id: existing, .. } if existing == call_id)
}) {
items.remove(pos);
} else if let Some(pos) = items.iter().position(|i| {
matches!(i, ResponseItem::LocalShellCall { call_id: Some(existing), .. } if existing == call_id)
}) {
items.remove(pos);
}
}
ResponseItem::CustomToolCall { call_id, .. } => {
remove_first_matching(items, |i| {
matches!(
i,
ResponseItem::CustomToolCallOutput {
call_id: existing, ..
} if existing == call_id
)
});
}
ResponseItem::CustomToolCallOutput { call_id, .. } => {
remove_first_matching(
items,
|i| matches!(i, ResponseItem::CustomToolCall { call_id: existing, .. } if existing == call_id),
);
}
ResponseItem::LocalShellCall {
call_id: Some(call_id),
..
} => {
remove_first_matching(items, |i| {
matches!(
i,
ResponseItem::FunctionCallOutput {
call_id: existing, ..
} if existing == call_id
)
});
}
_ => {}
}
}
fn remove_first_matching<F>(items: &mut Vec<ResponseItem>, predicate: F)
where
F: Fn(&ResponseItem) -> bool,
{
if let Some(pos) = items.iter().position(predicate) {
items.remove(pos);
}
}

View File

@@ -1,128 +0,0 @@
use codex_protocol::models::FunctionCallOutputContentItem;
use codex_utils_string::take_bytes_at_char_boundary;
use codex_utils_string::take_last_bytes_at_char_boundary;
// Model-formatting limits: clients get full streams; only content sent to the model is truncated.
pub(crate) const MODEL_FORMAT_MAX_BYTES: usize = 10 * 1024; // 10 KiB
pub(crate) const MODEL_FORMAT_MAX_LINES: usize = 256; // lines
pub(crate) const MODEL_FORMAT_HEAD_LINES: usize = MODEL_FORMAT_MAX_LINES / 2;
pub(crate) const MODEL_FORMAT_TAIL_LINES: usize = MODEL_FORMAT_MAX_LINES - MODEL_FORMAT_HEAD_LINES; // 128
pub(crate) const MODEL_FORMAT_HEAD_BYTES: usize = MODEL_FORMAT_MAX_BYTES / 2;
pub(crate) fn globally_truncate_function_output_items(
items: &[FunctionCallOutputContentItem],
) -> Vec<FunctionCallOutputContentItem> {
let mut out: Vec<FunctionCallOutputContentItem> = Vec::with_capacity(items.len());
let mut remaining = MODEL_FORMAT_MAX_BYTES;
let mut omitted_text_items = 0usize;
for it in items {
match it {
FunctionCallOutputContentItem::InputText { text } => {
if remaining == 0 {
omitted_text_items += 1;
continue;
}
let len = text.len();
if len <= remaining {
out.push(FunctionCallOutputContentItem::InputText { text: text.clone() });
remaining -= len;
} else {
let slice = take_bytes_at_char_boundary(text, remaining);
if !slice.is_empty() {
out.push(FunctionCallOutputContentItem::InputText {
text: slice.to_string(),
});
}
remaining = 0;
}
}
// todo(aibrahim): handle input images; resize
FunctionCallOutputContentItem::InputImage { image_url } => {
out.push(FunctionCallOutputContentItem::InputImage {
image_url: image_url.clone(),
});
}
}
}
if omitted_text_items > 0 {
out.push(FunctionCallOutputContentItem::InputText {
text: format!("[omitted {omitted_text_items} text items ...]"),
});
}
out
}
pub(crate) fn format_output_for_model_body(content: &str) -> String {
// Head+tail truncation for the model: show the beginning and end with an elision.
// Clients still receive full streams; only this formatted summary is capped.
let total_lines = content.lines().count();
if content.len() <= MODEL_FORMAT_MAX_BYTES && total_lines <= MODEL_FORMAT_MAX_LINES {
return content.to_string();
}
let output = truncate_formatted_exec_output(content, total_lines);
format!("Total output lines: {total_lines}\n\n{output}")
}
fn truncate_formatted_exec_output(content: &str, total_lines: usize) -> String {
let segments: Vec<&str> = content.split_inclusive('\n').collect();
let head_take = MODEL_FORMAT_HEAD_LINES.min(segments.len());
let tail_take = MODEL_FORMAT_TAIL_LINES.min(segments.len().saturating_sub(head_take));
let omitted = segments.len().saturating_sub(head_take + tail_take);
let head_slice_end: usize = segments
.iter()
.take(head_take)
.map(|segment| segment.len())
.sum();
let tail_slice_start: usize = if tail_take == 0 {
content.len()
} else {
content.len()
- segments
.iter()
.rev()
.take(tail_take)
.map(|segment| segment.len())
.sum::<usize>()
};
let head_slice = &content[..head_slice_end];
let tail_slice = &content[tail_slice_start..];
let truncated_by_bytes = content.len() > MODEL_FORMAT_MAX_BYTES;
// this is a bit wrong. We are counting metadata lines and not just shell output lines.
let marker = if omitted > 0 {
Some(format!(
"\n[... omitted {omitted} of {total_lines} lines ...]\n\n"
))
} else if truncated_by_bytes {
Some(format!(
"\n[... output truncated to fit {MODEL_FORMAT_MAX_BYTES} bytes ...]\n\n"
))
} else {
None
};
let marker_len = marker.as_ref().map_or(0, String::len);
let base_head_budget = MODEL_FORMAT_HEAD_BYTES.min(MODEL_FORMAT_MAX_BYTES);
let head_budget = base_head_budget.min(MODEL_FORMAT_MAX_BYTES.saturating_sub(marker_len));
let head_part = take_bytes_at_char_boundary(head_slice, head_budget);
let mut result = String::with_capacity(MODEL_FORMAT_MAX_BYTES.min(content.len()));
result.push_str(head_part);
if let Some(marker_text) = marker.as_ref() {
result.push_str(marker_text);
}
let remaining = MODEL_FORMAT_MAX_BYTES.saturating_sub(result.len());
if remaining == 0 {
return result;
}
let tail_part = take_last_bytes_at_char_boundary(tail_slice, remaining);
result.push_str(tail_part);
result
}

File diff suppressed because it is too large Load Diff

View File

@@ -32,11 +32,12 @@ pub async fn discover_prompts_in_excluding(
while let Ok(Some(entry)) = entries.next_entry().await {
let path = entry.path();
let is_file_like = fs::metadata(&path)
let is_file = entry
.file_type()
.await
.map(|m| m.is_file())
.map(|ft| ft.is_file())
.unwrap_or(false);
if !is_file_like {
if !is_file {
continue;
}
// Only include Markdown files with a .md extension.
@@ -196,25 +197,6 @@ mod tests {
assert_eq!(names, vec!["good"]);
}
#[tokio::test]
#[cfg(unix)]
async fn discovers_symlinked_md_files() {
let tmp = tempdir().expect("create TempDir");
let dir = tmp.path();
// Create a real file
fs::write(dir.join("real.md"), b"real content").unwrap();
// Create a symlink to the real file
std::os::unix::fs::symlink(dir.join("real.md"), dir.join("link.md")).unwrap();
let found = discover_prompts_in(dir).await;
let names: Vec<String> = found.into_iter().map(|e| e.name).collect();
// Both real and link should be discovered, sorted alphabetically
assert_eq!(names, vec!["link", "real"]);
}
#[tokio::test]
async fn parses_frontmatter_and_strips_from_body() {
let tmp = tempdir().expect("create TempDir");

View File

@@ -24,6 +24,7 @@ pub enum NetworkAccess {
#[serde(rename = "environment_context", rename_all = "snake_case")]
pub(crate) struct EnvironmentContext {
pub cwd: Option<PathBuf>,
pub local_date: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub sandbox_mode: Option<SandboxMode>,
pub network_access: Option<NetworkAccess>,
@@ -34,12 +35,14 @@ pub(crate) struct EnvironmentContext {
impl EnvironmentContext {
pub fn new(
cwd: Option<PathBuf>,
local_date: Option<String>,
approval_policy: Option<AskForApproval>,
sandbox_policy: Option<SandboxPolicy>,
shell: Option<Shell>,
) -> Self {
Self {
cwd,
local_date,
approval_policy,
sandbox_mode: match sandbox_policy {
Some(SandboxPolicy::DangerFullAccess) => Some(SandboxMode::DangerFullAccess),
@@ -79,6 +82,7 @@ impl EnvironmentContext {
pub fn equals_except_shell(&self, other: &EnvironmentContext) -> bool {
let EnvironmentContext {
cwd,
local_date,
approval_policy,
sandbox_mode,
network_access,
@@ -88,6 +92,7 @@ impl EnvironmentContext {
} = other;
self.cwd == *cwd
&& self.local_date == *local_date
&& self.approval_policy == *approval_policy
&& self.sandbox_mode == *sandbox_mode
&& self.network_access == *network_access
@@ -100,6 +105,11 @@ impl EnvironmentContext {
} else {
None
};
let local_date = if before.local_date_with_timezone != after.local_date_with_timezone {
after.local_date_with_timezone.clone()
} else {
None
};
let approval_policy = if before.approval_policy != after.approval_policy {
Some(after.approval_policy)
} else {
@@ -110,7 +120,7 @@ impl EnvironmentContext {
} else {
None
};
EnvironmentContext::new(cwd, approval_policy, sandbox_policy, None)
EnvironmentContext::new(cwd, local_date, approval_policy, sandbox_policy, None)
}
}
@@ -118,6 +128,7 @@ impl From<&TurnContext> for EnvironmentContext {
fn from(turn_context: &TurnContext) -> Self {
Self::new(
Some(turn_context.cwd.clone()),
turn_context.local_date_with_timezone.clone(),
Some(turn_context.approval_policy),
Some(turn_context.sandbox_policy.clone()),
// Shell is not configurable from turn to turn
@@ -134,6 +145,7 @@ impl EnvironmentContext {
/// ```xml
/// <environment_context>
/// <cwd>...</cwd>
/// <local_date>...</local_date>
/// <approval_policy>...</approval_policy>
/// <sandbox_mode>...</sandbox_mode>
/// <writable_roots>...</writable_roots>
@@ -146,6 +158,9 @@ impl EnvironmentContext {
if let Some(cwd) = self.cwd {
lines.push(format!(" <cwd>{}</cwd>", cwd.to_string_lossy()));
}
if let Some(local_date) = self.local_date {
lines.push(format!(" <local_date>{local_date}</local_date>"));
}
if let Some(approval_policy) = self.approval_policy {
lines.push(format!(
" <approval_policy>{approval_policy}</approval_policy>"
@@ -212,6 +227,7 @@ mod tests {
fn serialize_workspace_write_environment_context() {
let context = EnvironmentContext::new(
Some(PathBuf::from("/repo")),
Some("2025-01-01 +00:00".to_string()),
Some(AskForApproval::OnRequest),
Some(workspace_write_policy(vec!["/repo", "/tmp"], false)),
None,
@@ -219,6 +235,7 @@ mod tests {
let expected = r#"<environment_context>
<cwd>/repo</cwd>
<local_date>2025-01-01 +00:00</local_date>
<approval_policy>on-request</approval_policy>
<sandbox_mode>workspace-write</sandbox_mode>
<network_access>restricted</network_access>
@@ -235,12 +252,14 @@ mod tests {
fn serialize_read_only_environment_context() {
let context = EnvironmentContext::new(
None,
Some("2025-01-01 +00:00".to_string()),
Some(AskForApproval::Never),
Some(SandboxPolicy::ReadOnly),
None,
);
let expected = r#"<environment_context>
<local_date>2025-01-01 +00:00</local_date>
<approval_policy>never</approval_policy>
<sandbox_mode>read-only</sandbox_mode>
<network_access>restricted</network_access>
@@ -253,12 +272,14 @@ mod tests {
fn serialize_full_access_environment_context() {
let context = EnvironmentContext::new(
None,
Some("2025-01-01 +00:00".to_string()),
Some(AskForApproval::OnFailure),
Some(SandboxPolicy::DangerFullAccess),
None,
);
let expected = r#"<environment_context>
<local_date>2025-01-01 +00:00</local_date>
<approval_policy>on-failure</approval_policy>
<sandbox_mode>danger-full-access</sandbox_mode>
<network_access>enabled</network_access>
@@ -272,12 +293,14 @@ mod tests {
// Approval policy
let context1 = EnvironmentContext::new(
Some(PathBuf::from("/repo")),
Some("2025-01-01 +00:00".to_string()),
Some(AskForApproval::OnRequest),
Some(workspace_write_policy(vec!["/repo"], false)),
None,
);
let context2 = EnvironmentContext::new(
Some(PathBuf::from("/repo")),
Some("2025-01-01 +00:00".to_string()),
Some(AskForApproval::Never),
Some(workspace_write_policy(vec!["/repo"], true)),
None,
@@ -289,12 +312,14 @@ mod tests {
fn equals_except_shell_compares_sandbox_policy() {
let context1 = EnvironmentContext::new(
Some(PathBuf::from("/repo")),
Some("2025-01-01 +00:00".to_string()),
Some(AskForApproval::OnRequest),
Some(SandboxPolicy::new_read_only_policy()),
None,
);
let context2 = EnvironmentContext::new(
Some(PathBuf::from("/repo")),
Some("2025-01-01 +00:00".to_string()),
Some(AskForApproval::OnRequest),
Some(SandboxPolicy::new_workspace_write_policy()),
None,
@@ -307,12 +332,14 @@ mod tests {
fn equals_except_shell_compares_workspace_write_policy() {
let context1 = EnvironmentContext::new(
Some(PathBuf::from("/repo")),
Some("2025-01-01 +00:00".to_string()),
Some(AskForApproval::OnRequest),
Some(workspace_write_policy(vec!["/repo", "/tmp", "/var"], false)),
None,
);
let context2 = EnvironmentContext::new(
Some(PathBuf::from("/repo")),
Some("2025-01-01 +00:00".to_string()),
Some(AskForApproval::OnRequest),
Some(workspace_write_policy(vec!["/repo", "/tmp"], true)),
None,
@@ -325,6 +352,7 @@ mod tests {
fn equals_except_shell_ignores_shell() {
let context1 = EnvironmentContext::new(
Some(PathBuf::from("/repo")),
Some("2025-01-01 +00:00".to_string()),
Some(AskForApproval::OnRequest),
Some(workspace_write_policy(vec!["/repo"], false)),
Some(Shell::Bash(BashShell {
@@ -334,6 +362,7 @@ mod tests {
);
let context2 = EnvironmentContext::new(
Some(PathBuf::from("/repo")),
Some("2025-01-01 +00:00".to_string()),
Some(AskForApproval::OnRequest),
Some(workspace_write_policy(vec!["/repo"], false)),
Some(Shell::Zsh(ZshShell {

View File

@@ -4,8 +4,6 @@ use crate::token_data::KnownPlan;
use crate::token_data::PlanType;
use crate::truncate::truncate_middle;
use chrono::DateTime;
use chrono::Datelike;
use chrono::Local;
use chrono::Utc;
use codex_async_utils::CancelErr;
use codex_protocol::ConversationId;
@@ -135,9 +133,6 @@ pub enum CodexErr {
#[error("unsupported operation: {0}")]
UnsupportedOperation(String),
#[error("{0}")]
RefreshTokenFailed(RefreshTokenFailedError),
#[error("Fatal error: {0}")]
Fatal(String),
@@ -204,30 +199,6 @@ impl std::fmt::Display for ResponseStreamFailed {
}
}
#[derive(Debug, Clone, PartialEq, Eq, Error)]
#[error("{message}")]
pub struct RefreshTokenFailedError {
pub reason: RefreshTokenFailedReason,
pub message: String,
}
impl RefreshTokenFailedError {
pub fn new(reason: RefreshTokenFailedReason, message: impl Into<String>) -> Self {
Self {
reason,
message: message.into(),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum RefreshTokenFailedReason {
Expired,
Exhausted,
Revoked,
Other,
}
#[derive(Debug)]
pub struct UnexpectedResponseError {
pub status: StatusCode,
@@ -282,7 +253,7 @@ impl std::fmt::Display for UsageLimitReachedError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let message = match self.plan_type.as_ref() {
Some(PlanType::Known(KnownPlan::Plus)) => format!(
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit https://chatgpt.com/codex/settings/usage to purchase more credits{}",
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit chatgpt.com/codex/settings/usage to purchase more credits{}",
retry_suffix_after_or(self.resets_at.as_ref())
),
Some(PlanType::Known(KnownPlan::Team)) | Some(PlanType::Known(KnownPlan::Business)) => {
@@ -296,7 +267,7 @@ impl std::fmt::Display for UsageLimitReachedError {
.to_string()
}
Some(PlanType::Known(KnownPlan::Pro)) => format!(
"You've hit your usage limit. Visit https://chatgpt.com/codex/settings/usage to purchase more credits{}",
"You've hit your usage limit. Visit chatgpt.com/codex/settings/usage to purchase more credits{}",
retry_suffix_after_or(self.resets_at.as_ref())
),
Some(PlanType::Known(KnownPlan::Enterprise))
@@ -315,46 +286,28 @@ impl std::fmt::Display for UsageLimitReachedError {
}
fn retry_suffix(resets_at: Option<&DateTime<Utc>>) -> String {
if let Some(resets_at) = resets_at {
let formatted = format_retry_timestamp(resets_at);
format!(" Try again at {formatted}.")
if let Some(secs) = remaining_seconds(resets_at) {
let reset_duration = format_reset_duration(secs);
format!(" Try again in {reset_duration}.")
} else {
" Try again later.".to_string()
}
}
fn retry_suffix_after_or(resets_at: Option<&DateTime<Utc>>) -> String {
if let Some(resets_at) = resets_at {
let formatted = format_retry_timestamp(resets_at);
format!(" or try again at {formatted}.")
if let Some(secs) = remaining_seconds(resets_at) {
let reset_duration = format_reset_duration(secs);
format!(" or try again in {reset_duration}.")
} else {
" or try again later.".to_string()
}
}
fn format_retry_timestamp(resets_at: &DateTime<Utc>) -> String {
let local_reset = resets_at.with_timezone(&Local);
let local_now = now_for_retry().with_timezone(&Local);
if local_reset.date_naive() == local_now.date_naive() {
local_reset.format("%-I:%M %p").to_string()
} else {
let suffix = day_suffix(local_reset.day());
local_reset
.format(&format!("%b %-d{suffix}, %Y %-I:%M %p"))
.to_string()
}
}
fn day_suffix(day: u32) -> &'static str {
match day {
11..=13 => "th",
_ => match day % 10 {
1 => "st",
2 => "nd", // codespell:ignore
3 => "rd",
_ => "th",
},
}
fn remaining_seconds(resets_at: Option<&DateTime<Utc>>) -> Option<u64> {
let resets_at = resets_at.cloned()?;
let now = now_for_retry();
let secs = resets_at.signed_duration_since(now).num_seconds();
Some(if secs <= 0 { 0 } else { secs as u64 })
}
#[cfg(test)]
@@ -373,6 +326,36 @@ fn now_for_retry() -> DateTime<Utc> {
Utc::now()
}
fn format_reset_duration(total_secs: u64) -> String {
let days = total_secs / 86_400;
let hours = (total_secs % 86_400) / 3_600;
let minutes = (total_secs % 3_600) / 60;
let mut parts: Vec<String> = Vec::new();
if days > 0 {
let unit = if days == 1 { "day" } else { "days" };
parts.push(format!("{days} {unit}"));
}
if hours > 0 {
let unit = if hours == 1 { "hour" } else { "hours" };
parts.push(format!("{hours} {unit}"));
}
if minutes > 0 {
let unit = if minutes == 1 { "minute" } else { "minutes" };
parts.push(format!("{minutes} {unit}"));
}
if parts.is_empty() {
return "less than a minute".to_string();
}
match parts.len() {
1 => parts[0].clone(),
2 => format!("{} {}", parts[0], parts[1]),
_ => format!("{} {} {}", parts[0], parts[1], parts[2]),
}
}
#[derive(Debug)]
pub struct EnvVarError {
/// Name of the environment variable that is missing.
@@ -487,7 +470,7 @@ mod tests {
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit https://chatgpt.com/codex/settings/usage to purchase more credits or try again later."
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit chatgpt.com/codex/settings/usage to purchase more credits or try again later."
);
}
@@ -589,16 +572,15 @@ mod tests {
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
let resets_at = base + ChronoDuration::hours(1);
with_now_override(base, move || {
let expected_time = format_retry_timestamp(&resets_at);
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Team)),
resets_at: Some(resets_at),
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!(
"You've hit your usage limit. To get more access now, send a request to your admin or try again at {expected_time}."
assert_eq!(
err.to_string(),
"You've hit your usage limit. To get more access now, send a request to your admin or try again in 1 hour."
);
assert_eq!(err.to_string(), expected);
});
}
@@ -633,16 +615,15 @@ mod tests {
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
let resets_at = base + ChronoDuration::hours(1);
with_now_override(base, move || {
let expected_time = format_retry_timestamp(&resets_at);
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Pro)),
resets_at: Some(resets_at),
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!(
"You've hit your usage limit. Visit https://chatgpt.com/codex/settings/usage to purchase more credits or try again at {expected_time}."
assert_eq!(
err.to_string(),
"You've hit your usage limit. Visit chatgpt.com/codex/settings/usage to purchase more credits or try again in 1 hour."
);
assert_eq!(err.to_string(), expected);
});
}
@@ -651,14 +632,15 @@ mod tests {
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
let resets_at = base + ChronoDuration::minutes(5);
with_now_override(base, move || {
let expected_time = format_retry_timestamp(&resets_at);
let err = UsageLimitReachedError {
plan_type: None,
resets_at: Some(resets_at),
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!("You've hit your usage limit. Try again at {expected_time}.");
assert_eq!(err.to_string(), expected);
assert_eq!(
err.to_string(),
"You've hit your usage limit. Try again in 5 minutes."
);
});
}
@@ -667,16 +649,15 @@ mod tests {
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
let resets_at = base + ChronoDuration::hours(3) + ChronoDuration::minutes(32);
with_now_override(base, move || {
let expected_time = format_retry_timestamp(&resets_at);
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Plus)),
resets_at: Some(resets_at),
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!(
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit https://chatgpt.com/codex/settings/usage to purchase more credits or try again at {expected_time}."
assert_eq!(
err.to_string(),
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit chatgpt.com/codex/settings/usage to purchase more credits or try again in 3 hours 32 minutes."
);
assert_eq!(err.to_string(), expected);
});
}
@@ -686,14 +667,15 @@ mod tests {
let resets_at =
base + ChronoDuration::days(2) + ChronoDuration::hours(3) + ChronoDuration::minutes(5);
with_now_override(base, move || {
let expected_time = format_retry_timestamp(&resets_at);
let err = UsageLimitReachedError {
plan_type: None,
resets_at: Some(resets_at),
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!("You've hit your usage limit. Try again at {expected_time}.");
assert_eq!(err.to_string(), expected);
assert_eq!(
err.to_string(),
"You've hit your usage limit. Try again in 2 days 3 hours 5 minutes."
);
});
}
@@ -702,14 +684,15 @@ mod tests {
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
let resets_at = base + ChronoDuration::seconds(30);
with_now_override(base, move || {
let expected_time = format_retry_timestamp(&resets_at);
let err = UsageLimitReachedError {
plan_type: None,
resets_at: Some(resets_at),
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!("You've hit your usage limit. Try again at {expected_time}.");
assert_eq!(err.to_string(), expected);
assert_eq!(
err.to_string(),
"You've hit your usage limit. Try again in less than a minute."
);
});
}
}

View File

@@ -13,19 +13,13 @@ use codex_protocol::user_input::UserInput;
use tracing::warn;
use uuid::Uuid;
use crate::user_instructions::UserInstructions;
fn is_session_prefix(text: &str) -> bool {
let trimmed = text.trim_start();
let lowered = trimmed.to_ascii_lowercase();
lowered.starts_with("<environment_context>")
lowered.starts_with("<environment_context>") || lowered.starts_with("<user_instructions>")
}
fn parse_user_message(message: &[ContentItem]) -> Option<UserMessageItem> {
if UserInstructions::is_user_instructions(message) {
return None;
}
let mut content: Vec<UserInput> = Vec::new();
for content_item in message.iter() {
@@ -173,38 +167,6 @@ mod tests {
}
}
#[test]
fn skips_user_instructions_and_env() {
let items = vec![
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "<user_instructions>test_text</user_instructions>".to_string(),
}],
},
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "<environment_context>test_text</environment_context>".to_string(),
}],
},
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "# AGENTS.md instructions for test_directory\n\n<INSTRUCTIONS>\ntest_text\n</INSTRUCTIONS>".to_string(),
}],
},
];
for item in items {
let turn_item = parse_turn_item(&item);
assert!(turn_item.is_none(), "expected none, got {turn_item:?}");
}
}
#[test]
fn parses_agent_message() {
let item = ResponseItem::Message {

View File

@@ -72,9 +72,6 @@ pub enum SandboxType {
/// Only available on Linux.
LinuxSeccomp,
/// Only available on Windows.
WindowsRestrictedToken,
}
#[derive(Clone)]
@@ -161,89 +158,11 @@ pub(crate) async fn execute_exec_env(
};
let start = Instant::now();
let raw_output_result = exec(params, sandbox, sandbox_policy, stdout_stream).await;
let raw_output_result = exec(params, sandbox_policy, stdout_stream).await;
let duration = start.elapsed();
finalize_exec_result(raw_output_result, sandbox, duration)
}
#[cfg(target_os = "windows")]
async fn exec_windows_sandbox(
params: ExecParams,
sandbox_policy: &SandboxPolicy,
) -> Result<RawExecToolCallOutput> {
use crate::config::find_codex_home;
use codex_windows_sandbox::run_windows_sandbox_capture;
let ExecParams {
command,
cwd,
env,
timeout_ms,
..
} = params;
let policy_str = match sandbox_policy {
SandboxPolicy::DangerFullAccess => "workspace-write",
SandboxPolicy::ReadOnly => "read-only",
SandboxPolicy::WorkspaceWrite { .. } => "workspace-write",
};
let sandbox_cwd = cwd.clone();
let logs_base_dir = find_codex_home().ok();
let spawn_res = tokio::task::spawn_blocking(move || {
run_windows_sandbox_capture(
policy_str,
&sandbox_cwd,
command,
&cwd,
env,
timeout_ms,
logs_base_dir.as_deref(),
)
})
.await;
let capture = match spawn_res {
Ok(Ok(v)) => v,
Ok(Err(err)) => {
return Err(CodexErr::Io(io::Error::other(format!(
"windows sandbox: {err}"
))));
}
Err(join_err) => {
return Err(CodexErr::Io(io::Error::other(format!(
"windows sandbox join error: {join_err}"
))));
}
};
let exit_status = synthetic_exit_status(capture.exit_code);
let stdout = StreamOutput {
text: capture.stdout,
truncated_after_lines: None,
};
let stderr = StreamOutput {
text: capture.stderr,
truncated_after_lines: None,
};
// Best-effort aggregate: stdout then stderr
let mut aggregated = Vec::with_capacity(stdout.text.len() + stderr.text.len());
append_all(&mut aggregated, &stdout.text);
append_all(&mut aggregated, &stderr.text);
let aggregated_output = StreamOutput {
text: aggregated,
truncated_after_lines: None,
};
Ok(RawExecToolCallOutput {
exit_status,
stdout,
stderr,
aggregated_output,
timed_out: capture.timed_out,
})
}
fn finalize_exec_result(
raw_output_result: std::result::Result<RawExecToolCallOutput, CodexErr>,
sandbox_type: SandboxType,
@@ -428,17 +347,11 @@ pub struct ExecToolCallOutput {
pub timed_out: bool,
}
#[cfg_attr(not(target_os = "windows"), allow(unused_variables))]
async fn exec(
params: ExecParams,
sandbox: SandboxType,
sandbox_policy: &SandboxPolicy,
stdout_stream: Option<StdoutStream>,
) -> Result<RawExecToolCallOutput> {
#[cfg(target_os = "windows")]
if sandbox == SandboxType::WindowsRestrictedToken {
return exec_windows_sandbox(params, sandbox_policy).await;
}
let timeout = params.timeout_duration();
let ExecParams {
command,
@@ -612,9 +525,8 @@ fn synthetic_exit_status(code: i32) -> ExitStatus {
#[cfg(windows)]
fn synthetic_exit_status(code: i32) -> ExitStatus {
use std::os::windows::process::ExitStatusExt;
// On Windows the raw status is a u32. Use a direct cast to avoid
// panicking on negative i32 values produced by prior narrowing casts.
std::process::ExitStatus::from_raw(code as u32)
#[expect(clippy::unwrap_used)]
std::process::ExitStatus::from_raw(code.try_into().unwrap())
}
#[cfg(test)]

View File

@@ -43,8 +43,6 @@ pub enum Feature {
SandboxCommandAssessment,
/// Create a ghost commit at each turn.
GhostCommit,
/// Enable Windows sandbox (restricted token) on Windows.
WindowsSandbox,
}
impl Feature {
@@ -294,10 +292,4 @@ pub const FEATURES: &[FeatureSpec] = &[
stage: Stage::Experimental,
default_enabled: false,
},
FeatureSpec {
id: Feature::WindowsSandbox,
key: "enable_experimental_windows_sandbox",
stage: Stage::Experimental,
default_enabled: false,
},
];

View File

@@ -123,7 +123,7 @@ fn set_if_some(
if let Some(enabled) = maybe_value {
set_feature(features, feature, enabled);
log_alias(alias_key, feature);
features.record_legacy_usage(alias_key, feature);
features.record_legacy_usage_force(alias_key, feature);
}
}

View File

@@ -18,7 +18,7 @@ mod codex_delegate;
mod command_safety;
pub mod config;
pub mod config_loader;
mod context_manager;
mod conversation_history;
pub mod custom_prompts;
mod environment_context;
pub mod error;
@@ -75,7 +75,6 @@ pub use rollout::find_conversation_path_by_id_str;
pub use rollout::list::ConversationItem;
pub use rollout::list::ConversationsPage;
pub use rollout::list::Cursor;
pub use rollout::list::parse_cursor;
pub use rollout::list::read_head_for_summary;
mod function_tool;
mod state;
@@ -86,7 +85,6 @@ pub mod util;
pub use apply_patch::CODEX_APPLY_PATCH_ARG1;
pub use command_safety::is_safe_command;
pub use safety::get_platform_sandbox;
pub use safety::set_windows_sandbox_enabled;
// Re-export the protocol types from the standalone `codex-protocol` crate so existing
// `codex_core::protocol::...` references continue to work across the workspace.
pub use codex_protocol::protocol;

View File

@@ -160,7 +160,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
support_verbosity: false,
support_verbosity: true,
)
} else if slug.starts_with("gpt-5") {
model_family!(

View File

@@ -273,7 +273,7 @@ async fn traverse_directories_for_paths(
/// Pagination cursor token format: "<file_ts>|<uuid>" where `file_ts` matches the
/// filename timestamp portion (YYYY-MM-DDThh-mm-ss) used in rollout filenames.
/// The cursor orders files by timestamp desc, then UUID desc.
pub fn parse_cursor(token: &str) -> Option<Cursor> {
fn parse_cursor(token: &str) -> Option<Cursor> {
let (file_ts, uuid_str) = token.split_once('|')?;
let Ok(uuid) = Uuid::parse_str(uuid_str) else {

View File

@@ -46,7 +46,6 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool {
| EventMsg::UndoCompleted(_)
| EventMsg::TurnAborted(_) => true,
EventMsg::Error(_)
| EventMsg::Warning(_)
| EventMsg::TaskStarted(_)
| EventMsg::TaskComplete(_)
| EventMsg::AgentMessageDelta(_)

View File

@@ -10,23 +10,6 @@ use crate::exec::SandboxType;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
#[cfg(target_os = "windows")]
use std::sync::atomic::AtomicBool;
#[cfg(target_os = "windows")]
use std::sync::atomic::Ordering;
#[cfg(target_os = "windows")]
static WINDOWS_SANDBOX_ENABLED: AtomicBool = AtomicBool::new(false);
#[cfg(target_os = "windows")]
pub fn set_windows_sandbox_enabled(enabled: bool) {
WINDOWS_SANDBOX_ENABLED.store(enabled, Ordering::Relaxed);
}
#[cfg(not(target_os = "windows"))]
#[allow(dead_code)]
pub fn set_windows_sandbox_enabled(_enabled: bool) {}
#[derive(Debug, PartialEq)]
pub enum SafetyCheck {
AutoApprove {
@@ -101,14 +84,6 @@ pub fn get_platform_sandbox() -> Option<SandboxType> {
Some(SandboxType::MacosSeatbelt)
} else if cfg!(target_os = "linux") {
Some(SandboxType::LinuxSeccomp)
} else if cfg!(target_os = "windows") {
#[cfg(target_os = "windows")]
{
if WINDOWS_SANDBOX_ENABLED.load(Ordering::Relaxed) {
return Some(SandboxType::WindowsRestrictedToken);
}
}
None
} else {
None
}

View File

@@ -25,6 +25,16 @@ use tracing::warn;
const SANDBOX_ASSESSMENT_TIMEOUT: Duration = Duration::from_secs(5);
const SANDBOX_RISK_CATEGORY_VALUES: &[&str] = &[
"data_deletion",
"data_exfiltration",
"privilege_escalation",
"system_modification",
"network_access",
"resource_exhaustion",
"compliance",
];
#[derive(Template)]
#[template(path = "sandboxing/assessment_prompt.md", escape = "none")]
struct SandboxAssessmentPromptTemplate<'a> {
@@ -166,26 +176,27 @@ pub(crate) async fn assess_command(
call_id,
"success",
Some(assessment.risk_level),
&assessment.risk_categories,
duration,
);
return Some(assessment);
}
Err(err) => {
warn!("failed to parse sandbox assessment JSON: {err}");
parent_otel.sandbox_assessment(call_id, "parse_error", None, duration);
parent_otel.sandbox_assessment(call_id, "parse_error", None, &[], duration);
}
},
Ok(Ok(None)) => {
warn!("sandbox assessment response did not include any message");
parent_otel.sandbox_assessment(call_id, "no_output", None, duration);
parent_otel.sandbox_assessment(call_id, "no_output", None, &[], duration);
}
Ok(Err(err)) => {
warn!("sandbox assessment failed: {err}");
parent_otel.sandbox_assessment(call_id, "model_error", None, duration);
parent_otel.sandbox_assessment(call_id, "model_error", None, &[], duration);
}
Err(_) => {
warn!("sandbox assessment timed out");
parent_otel.sandbox_assessment(call_id, "timeout", None, duration);
parent_otel.sandbox_assessment(call_id, "timeout", None, &[], duration);
}
}
@@ -218,7 +229,7 @@ fn sandbox_roots_for_prompt(policy: &SandboxPolicy, cwd: &Path) -> Vec<PathBuf>
fn sandbox_assessment_schema() -> serde_json::Value {
json!({
"type": "object",
"required": ["description", "risk_level"],
"required": ["description", "risk_level", "risk_categories"],
"properties": {
"description": {
"type": "string",
@@ -229,6 +240,13 @@ fn sandbox_assessment_schema() -> serde_json::Value {
"type": "string",
"enum": ["low", "medium", "high"]
},
"risk_categories": {
"type": "array",
"items": {
"type": "string",
"enum": SANDBOX_RISK_CATEGORY_VALUES
}
}
},
"additionalProperties": false
})

View File

@@ -74,13 +74,25 @@ impl SandboxManager {
match pref {
SandboxablePreference::Forbid => SandboxType::None,
SandboxablePreference::Require => {
// Require a platform sandbox when available; on Windows this
// respects the enable_experimental_windows_sandbox feature.
crate::safety::get_platform_sandbox().unwrap_or(SandboxType::None)
#[cfg(target_os = "macos")]
{
return SandboxType::MacosSeatbelt;
}
#[cfg(target_os = "linux")]
{
return SandboxType::LinuxSeccomp;
}
#[allow(unreachable_code)]
SandboxType::None
}
SandboxablePreference::Auto => match policy {
SandboxPolicy::DangerFullAccess => SandboxType::None,
_ => crate::safety::get_platform_sandbox().unwrap_or(SandboxType::None),
#[cfg(target_os = "macos")]
_ => SandboxType::MacosSeatbelt,
#[cfg(target_os = "linux")]
_ => SandboxType::LinuxSeccomp,
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
_ => SandboxType::None,
},
}
}
@@ -131,14 +143,6 @@ impl SandboxManager {
Some("codex-linux-sandbox".to_string()),
)
}
// On Windows, the restricted token sandbox executes in-process via the
// codex-windows-sandbox crate. We leave the command unchanged here and
// branch during execution based on the sandbox type.
#[cfg(target_os = "windows")]
SandboxType::WindowsRestrictedToken => (command, HashMap::new(), None),
// When building for non-Windows targets, this variant is never constructed.
#[cfg(not(target_os = "windows"))]
SandboxType::WindowsRestrictedToken => (command, HashMap::new(), None),
};
env.extend(sandbox_env);

View File

@@ -67,8 +67,7 @@ pub(crate) async fn spawn_child_async(
// This relies on prctl(2), so it only works on Linux.
#[cfg(target_os = "linux")]
unsafe {
let parent_pid = libc::getpid();
cmd.pre_exec(move || {
cmd.pre_exec(|| {
// This prctl call effectively requests, "deliver SIGTERM when my
// current parent dies."
if libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) == -1 {
@@ -77,10 +76,9 @@ pub(crate) async fn spawn_child_async(
// Though if there was a race condition and this pre_exec() block is
// run _after_ the parent (i.e., the Codex process) has already
// exited, then parent will be the closest configured "subreaper"
// ancestor process, or PID 1 (init). If the Codex process has exited
// already, so should the child process.
if libc::getppid() != parent_pid {
// exited, then the parent is the _init_ process (which will never
// die), so we should just terminate the child process now.
if libc::getppid() == 1 {
libc::raise(libc::SIGTERM);
}
Ok(())

View File

@@ -3,7 +3,7 @@
use codex_protocol::models::ResponseItem;
use crate::codex::SessionConfiguration;
use crate::context_manager::ContextManager;
use crate::conversation_history::ConversationHistory;
use crate::protocol::RateLimitSnapshot;
use crate::protocol::TokenUsage;
use crate::protocol::TokenUsageInfo;
@@ -11,7 +11,7 @@ use crate::protocol::TokenUsageInfo;
/// Persistent, session-scoped state previously stored directly on `Session`.
pub(crate) struct SessionState {
pub(crate) session_configuration: SessionConfiguration,
pub(crate) history: ContextManager,
pub(crate) history: ConversationHistory,
pub(crate) latest_rate_limits: Option<RateLimitSnapshot>,
}
@@ -20,7 +20,7 @@ impl SessionState {
pub(crate) fn new(session_configuration: SessionConfiguration) -> Self {
Self {
session_configuration,
history: ContextManager::new(),
history: ConversationHistory::new(),
latest_rate_limits: None,
}
}
@@ -34,7 +34,7 @@ impl SessionState {
self.history.record_items(items)
}
pub(crate) fn clone_history(&self) -> ContextManager {
pub(crate) fn clone_history(&self) -> ConversationHistory {
self.history.clone()
}

View File

@@ -9,7 +9,7 @@ pub mod runtimes;
pub mod sandboxing;
pub mod spec;
use crate::context_manager::format_output_for_model_body;
use crate::conversation_history::format_output_for_model_body;
use crate::exec::ExecToolCallOutput;
pub use router::ToolRouter;
use serde::Serialize;

View File

@@ -54,21 +54,12 @@ impl ToolOrchestrator {
let mut already_approved = false;
if needs_initial_approval {
let mut risk = None;
if let Some(metadata) = req.sandbox_retry_data() {
risk = tool_ctx
.session
.assess_sandbox_command(turn_ctx, &tool_ctx.call_id, &metadata.command, None)
.await;
}
let approval_ctx = ApprovalCtx {
session: tool_ctx.session,
turn: turn_ctx,
call_id: &tool_ctx.call_id,
retry_reason: None,
risk,
risk: None,
};
let decision = tool.start_approval_async(req, approval_ctx).await;
@@ -92,8 +83,6 @@ impl ToolOrchestrator {
if tool.wants_escalated_first_attempt(req) {
initial_sandbox = crate::exec::SandboxType::None;
}
// Platform-specific flag gating is handled by SandboxManager::select_initial
// via crate::safety::get_platform_sandbox().
let initial_attempt = SandboxAttempt {
sandbox: initial_sandbox,
policy: &turn_ctx.sandbox_policy,

View File

@@ -1,5 +1,4 @@
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::RwLock;
use tokio_util::either::Either;
@@ -54,16 +53,13 @@ impl ToolCallRuntime {
let turn = Arc::clone(&self.turn_context);
let tracker = Arc::clone(&self.tracker);
let lock = Arc::clone(&self.parallel_execution);
let started = Instant::now();
let aborted_response = Self::aborted_response(&call);
let readiness = self.turn_context.tool_call_gate.clone();
let handle: AbortOnDropHandle<Result<ResponseInputItem, FunctionCallError>> =
AbortOnDropHandle::new(tokio::spawn(async move {
tokio::select! {
_ = cancellation_token.cancelled() => {
let secs = started.elapsed().as_secs_f32().max(0.1);
Ok(Self::aborted_response(&call, secs))
},
_ = cancellation_token.cancelled() => Ok(aborted_response),
res = async {
tracing::info!("waiting for tool gate");
readiness.wait_ready().await;
@@ -75,7 +71,7 @@ impl ToolCallRuntime {
};
router
.dispatch_tool_call(session, turn, tracker, call.clone())
.dispatch_tool_call(session, turn, tracker, call)
.await
} => res,
}
@@ -95,32 +91,23 @@ impl ToolCallRuntime {
}
impl ToolCallRuntime {
fn aborted_response(call: &ToolCall, secs: f32) -> ResponseInputItem {
fn aborted_response(call: &ToolCall) -> ResponseInputItem {
match &call.payload {
ToolPayload::Custom { .. } => ResponseInputItem::CustomToolCallOutput {
call_id: call.call_id.clone(),
output: Self::abort_message(call, secs),
output: "aborted".to_string(),
},
ToolPayload::Mcp { .. } => ResponseInputItem::McpToolCallOutput {
call_id: call.call_id.clone(),
result: Err(Self::abort_message(call, secs)),
result: Err("aborted".to_string()),
},
_ => ResponseInputItem::FunctionCallOutput {
call_id: call.call_id.clone(),
output: FunctionCallOutputPayload {
content: Self::abort_message(call, secs),
content: "aborted".to_string(),
..Default::default()
},
},
}
}
fn abort_message(call: &ToolCall, secs: f32) -> String {
match call.tool_name.as_str() {
"shell" | "container.exec" | "local_shell" | "unified_exec" => {
format!("Wall time: {secs:.1} seconds\naborted by user")
}
_ => format!("aborted by user after {secs:.1}s"),
}
}
}

View File

@@ -3,25 +3,29 @@ use serde::Serialize;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::USER_INSTRUCTIONS_CLOSE_TAG;
use codex_protocol::protocol::USER_INSTRUCTIONS_OPEN_TAG;
pub const USER_INSTRUCTIONS_OPEN_TAG_LEGACY: &str = "<user_instructions>";
pub const USER_INSTRUCTIONS_PREFIX: &str = "# AGENTS.md instructions for ";
/// Wraps user instructions in a tag so the model can classify them easily.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename = "user_instructions", rename_all = "snake_case")]
pub(crate) struct UserInstructions {
pub directory: String,
pub text: String,
text: String,
}
impl UserInstructions {
pub fn is_user_instructions(message: &[ContentItem]) -> bool {
if let [ContentItem::InputText { text }] = message {
text.starts_with(USER_INSTRUCTIONS_PREFIX)
|| text.starts_with(USER_INSTRUCTIONS_OPEN_TAG_LEGACY)
} else {
false
}
pub fn new<T: Into<String>>(text: T) -> Self {
Self { text: text.into() }
}
/// Serializes the user instructions to an XML-like tagged block that starts
/// with <user_instructions> so clients can classify it.
pub fn serialize_to_xml(self) -> String {
format!(
"{USER_INSTRUCTIONS_OPEN_TAG}\n\n{}\n\n{USER_INSTRUCTIONS_CLOSE_TAG}",
self.text
)
}
}
@@ -31,11 +35,7 @@ impl From<UserInstructions> for ResponseItem {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: format!(
"{USER_INSTRUCTIONS_PREFIX}{directory}\n\n<INSTRUCTIONS>\n{contents}\n</INSTRUCTIONS>",
directory = ui.directory,
contents = ui.text
),
text: ui.serialize_to_xml(),
}],
}
}
@@ -68,51 +68,3 @@ impl From<DeveloperInstructions> for ResponseItem {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_user_instructions() {
let user_instructions = UserInstructions {
directory: "test_directory".to_string(),
text: "test_text".to_string(),
};
let response_item: ResponseItem = user_instructions.into();
let ResponseItem::Message { role, content, .. } = response_item else {
panic!("expected ResponseItem::Message");
};
assert_eq!(role, "user");
let [ContentItem::InputText { text }] = content.as_slice() else {
panic!("expected one InputText content item");
};
assert_eq!(
text,
"# AGENTS.md instructions for test_directory\n\n<INSTRUCTIONS>\ntest_text\n</INSTRUCTIONS>",
);
}
#[test]
fn test_is_user_instructions() {
assert!(UserInstructions::is_user_instructions(
&[ContentItem::InputText {
text: "# AGENTS.md instructions for test_directory\n\n<INSTRUCTIONS>\ntest_text\n</INSTRUCTIONS>".to_string(),
}]
));
assert!(UserInstructions::is_user_instructions(&[
ContentItem::InputText {
text: "<user_instructions>test_text</user_instructions>".to_string(),
}
]));
assert!(!UserInstructions::is_user_instructions(&[
ContentItem::InputText {
text: "test_text".to_string(),
}
]));
}
}

View File

@@ -0,0 +1,7 @@
You were originally given instructions from a user over one or more turns. Here were the user messages:
{{ user_messages_text }}
Another language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:
{{ summary_text }}

View File

@@ -1,9 +1,5 @@
You are performing a CONTEXT CHECKPOINT COMPACTION. Create a handoff summary for another LLM that will resume the task.
Include:
- Current progress and key decisions made
- Important context, constraints, or user preferences
- What remains to be done (clear next steps)
- Any critical data, examples, or references needed to continue
Be concise, structured, and focused on helping the next LLM seamlessly continue the work.
You have exceeded the maximum number of tokens, please stop coding and instead write a short memento message for the next agent. Your note should:
- Summarize what you finished and what still needs work. If there was a recent update_plan call, repeat its steps verbatim.
- List outstanding TODOs with file paths / line numbers so they're easy to find.
- Flag code that needs more tests (edge cases, performance, integration, etc.).
- Record any open bugs, quirks, or setup steps that will make it easier for the next agent to pick up where you left off.

View File

@@ -1,10 +1,13 @@
You are a security analyst evaluating shell commands that were blocked by a sandbox. Given the provided metadata, summarize the command's likely intent and assess the risk to help the user decide whether to approve command execution. Return strictly valid JSON with the keys:
- description (concise summary of command intent and potential effects, no more than one sentence, use present tense)
You are a security analyst evaluating shell commands that were blocked by a sandbox. Given the provided metadata, summarize the command's likely intent and assess the risk. Return strictly valid JSON with the keys:
- description (concise summary, at most two sentences)
- risk_level ("low", "medium", or "high")
- risk_categories (optional array of zero or more category strings)
Risk level examples:
- low: read-only inspections, listing files, printing configuration, fetching artifacts from trusted sources
- medium: modifying project files, installing dependencies
- low: read-only inspections, listing files, printing configuration
- medium: modifying project files, installing dependencies, fetching artifacts from trusted sources
- high: deleting or overwriting data, exfiltrating secrets, escalating privileges, or disabling security controls
Recognized risk_categories: data_deletion, data_exfiltration, privilege_escalation, system_modification, network_access, resource_exhaustion, compliance.
Use multiple categories when appropriate.
If information is insufficient, choose the most cautious risk level supported by the evidence.
Respond with JSON only, without markdown code fences or extra commentary.

View File

@@ -244,7 +244,7 @@ pub mod fs_wait {
if path.exists() {
Ok(path)
} else {
Err(anyhow!("timed out waiting for {path:?}"))
Err(anyhow!("timed out waiting for {:?}", path))
}
}
@@ -284,7 +284,7 @@ pub mod fs_wait {
if let Some(found) = scan_for_match(&root, predicate) {
Ok(found)
} else {
Err(anyhow!("timed out waiting for matching file in {root:?}"))
Err(anyhow!("timed out waiting for matching file in {:?}", root))
}
}

View File

@@ -479,7 +479,6 @@ pub async fn mount_sse_sequence(server: &MockServer, bodies: Vec<String>) -> Res
let (mock, response_mock) = base_mock();
mock.respond_with(responder)
.up_to_n_times(num_calls as u64)
.expect(num_calls as u64)
.mount(server)
.await;

View File

@@ -1,4 +1,3 @@
use assert_matches::assert_matches;
use std::sync::Arc;
use std::time::Duration;
@@ -14,7 +13,6 @@ use core_test_support::responses::sse;
use core_test_support::responses::start_mock_server;
use core_test_support::test_codex::test_codex;
use core_test_support::wait_for_event_with_timeout;
use regex_lite::Regex;
use serde_json::json;
/// Integration test: spawn a longrunning shell tool via a mocked Responses SSE
@@ -125,7 +123,6 @@ async fn interrupt_tool_records_history_entries() {
)
.await;
tokio::time::sleep(Duration::from_secs_f32(0.1)).await;
codex.submit(Op::Interrupt).await.unwrap();
wait_for_event_with_timeout(
@@ -162,26 +159,9 @@ async fn interrupt_tool_records_history_entries() {
response_mock.saw_function_call(call_id),
"function call not recorded in responses payload"
);
let output = response_mock
.function_call_output_text(call_id)
.expect("missing function_call_output text");
let re = Regex::new(r"^Wall time: ([0-9]+(?:\.[0-9])?) seconds\naborted by user$")
.expect("compile regex");
let captures = re.captures(&output);
assert_matches!(
captures.as_ref(),
Some(caps) if caps.get(1).is_some(),
"aborted message with elapsed seconds"
);
let secs: f32 = captures
.expect("aborted message with elapsed seconds")
.get(1)
.unwrap()
.as_str()
.parse()
.unwrap();
assert!(
secs >= 0.1,
"expected at least one tenth of a second of elapsed time, got {secs}"
assert_eq!(
response_mock.function_call_output_text(call_id).as_deref(),
Some("aborted"),
"aborted function call output not recorded in responses payload"
);
}

View File

@@ -1,272 +0,0 @@
use anyhow::Context;
use anyhow::Result;
use base64::Engine;
use chrono::Duration;
use chrono::Utc;
use codex_core::CodexAuth;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::auth::AuthDotJson;
use codex_core::auth::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR;
use codex_core::auth::RefreshTokenError;
use codex_core::auth::load_auth_dot_json;
use codex_core::auth::save_auth;
use codex_core::error::RefreshTokenFailedReason;
use codex_core::token_data::IdTokenInfo;
use codex_core::token_data::TokenData;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use serde::Serialize;
use serde_json::json;
use std::ffi::OsString;
use tempfile::TempDir;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
const INITIAL_ACCESS_TOKEN: &str = "initial-access-token";
const INITIAL_REFRESH_TOKEN: &str = "initial-refresh-token";
#[serial_test::serial(auth_refresh)]
#[tokio::test]
async fn refresh_token_succeeds_updates_storage() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/oauth/token"))
.respond_with(ResponseTemplate::new(200).set_body_json(json!({
"access_token": "new-access-token",
"refresh_token": "new-refresh-token"
})))
.expect(1)
.mount(&server)
.await;
let ctx = RefreshTokenTestContext::new(&server)?;
let auth = ctx.auth.clone();
let access = auth
.refresh_token()
.await
.context("refresh should succeed")?;
assert_eq!(access, "new-access-token");
let stored = ctx.load_auth()?;
let tokens = stored.tokens.as_ref().context("tokens should exist")?;
assert_eq!(tokens.access_token, "new-access-token");
assert_eq!(tokens.refresh_token, "new-refresh-token");
let refreshed_at = stored
.last_refresh
.as_ref()
.context("last_refresh should be recorded")?;
assert!(
*refreshed_at >= ctx.initial_last_refresh,
"last_refresh should advance"
);
let cached = auth
.get_token_data()
.await
.context("token data should be cached")?;
assert_eq!(cached.access_token, "new-access-token");
server.verify().await;
Ok(())
}
#[serial_test::serial(auth_refresh)]
#[tokio::test]
async fn refresh_token_returns_permanent_error_for_expired_refresh_token() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/oauth/token"))
.respond_with(ResponseTemplate::new(401).set_body_json(json!({
"error": {
"code": "refresh_token_expired"
}
})))
.expect(1)
.mount(&server)
.await;
let ctx = RefreshTokenTestContext::new(&server)?;
let auth = ctx.auth.clone();
let err = auth
.refresh_token()
.await
.err()
.context("refresh should fail")?;
assert_eq!(err.failed_reason(), Some(RefreshTokenFailedReason::Expired));
let stored = ctx.load_auth()?;
let tokens = stored.tokens.as_ref().context("tokens should remain")?;
assert_eq!(tokens.access_token, INITIAL_ACCESS_TOKEN);
assert_eq!(tokens.refresh_token, INITIAL_REFRESH_TOKEN);
assert_eq!(
*stored
.last_refresh
.as_ref()
.context("last_refresh should remain unchanged")?,
ctx.initial_last_refresh,
);
server.verify().await;
Ok(())
}
#[serial_test::serial(auth_refresh)]
#[tokio::test]
async fn refresh_token_returns_transient_error_on_server_failure() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/oauth/token"))
.respond_with(ResponseTemplate::new(500).set_body_json(json!({
"error": "temporary-failure"
})))
.expect(1)
.mount(&server)
.await;
let ctx = RefreshTokenTestContext::new(&server)?;
let auth = ctx.auth.clone();
let err = auth
.refresh_token()
.await
.err()
.context("refresh should fail")?;
assert!(matches!(err, RefreshTokenError::Transient(_)));
assert_eq!(err.failed_reason(), None);
let stored = ctx.load_auth()?;
let tokens = stored.tokens.as_ref().context("tokens should remain")?;
assert_eq!(tokens.access_token, INITIAL_ACCESS_TOKEN);
assert_eq!(tokens.refresh_token, INITIAL_REFRESH_TOKEN);
assert_eq!(
*stored
.last_refresh
.as_ref()
.context("last_refresh should remain unchanged")?,
ctx.initial_last_refresh,
);
server.verify().await;
Ok(())
}
struct RefreshTokenTestContext {
codex_home: TempDir,
auth: CodexAuth,
initial_last_refresh: chrono::DateTime<Utc>,
_env_guard: EnvGuard,
}
impl RefreshTokenTestContext {
fn new(server: &MockServer) -> Result<Self> {
let codex_home = TempDir::new()?;
let initial_last_refresh = Utc::now() - Duration::days(1);
let mut id_token = IdTokenInfo::default();
id_token.raw_jwt = minimal_jwt();
let tokens = TokenData {
id_token,
access_token: INITIAL_ACCESS_TOKEN.to_string(),
refresh_token: INITIAL_REFRESH_TOKEN.to_string(),
account_id: Some("account-id".to_string()),
};
let auth_dot_json = AuthDotJson {
openai_api_key: None,
tokens: Some(tokens),
last_refresh: Some(initial_last_refresh),
};
save_auth(
codex_home.path(),
&auth_dot_json,
AuthCredentialsStoreMode::File,
)?;
let endpoint = format!("{}/oauth/token", server.uri());
let env_guard = EnvGuard::set(REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR, endpoint);
let auth = CodexAuth::from_auth_storage(codex_home.path(), AuthCredentialsStoreMode::File)?
.context("auth should load from storage")?;
Ok(Self {
codex_home,
auth,
initial_last_refresh,
_env_guard: env_guard,
})
}
fn load_auth(&self) -> Result<AuthDotJson> {
load_auth_dot_json(self.codex_home.path(), AuthCredentialsStoreMode::File)
.context("load auth.json")?
.context("auth.json should exist")
}
}
struct EnvGuard {
key: &'static str,
original: Option<OsString>,
}
impl EnvGuard {
fn set(key: &'static str, value: String) -> Self {
let original = std::env::var_os(key);
// SAFETY: these tests execute serially, so updating the process environment is safe.
unsafe {
std::env::set_var(key, &value);
}
Self { key, original }
}
}
impl Drop for EnvGuard {
fn drop(&mut self) {
// SAFETY: the guard restores the original environment value before other tests run.
unsafe {
match &self.original {
Some(value) => std::env::set_var(self.key, value),
None => std::env::remove_var(self.key),
}
}
}
}
fn minimal_jwt() -> String {
#[derive(Serialize)]
struct Header {
alg: &'static str,
typ: &'static str,
}
let header = Header {
alg: "none",
typ: "JWT",
};
let payload = json!({ "sub": "user-123" });
fn b64(data: &[u8]) -> String {
base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(data)
}
let header_bytes = match serde_json::to_vec(&header) {
Ok(bytes) => bytes,
Err(err) => panic!("serialize header: {err}"),
};
let payload_bytes = match serde_json::to_vec(&payload) {
Ok(bytes) => bytes,
Err(err) => panic!("serialize payload: {err}"),
};
let header_b64 = b64(&header_bytes);
let payload_b64 = b64(&payload_bytes);
let signature_b64 = b64(b"sig");
format!("{header_b64}.{payload_b64}.{signature_b64}")
}

View File

@@ -613,13 +613,8 @@ async fn includes_user_instructions_message_in_request() {
.contains("be nice")
);
assert_message_role(&request_body["input"][0], "user");
assert_message_starts_with(&request_body["input"][0], "# AGENTS.md instructions for ");
assert_message_ends_with(&request_body["input"][0], "</INSTRUCTIONS>");
let ui_text = request_body["input"][0]["content"][0]["text"]
.as_str()
.expect("invalid message content");
assert!(ui_text.contains("<INSTRUCTIONS>"));
assert!(ui_text.contains("be nice"));
assert_message_starts_with(&request_body["input"][0], "<user_instructions>");
assert_message_ends_with(&request_body["input"][0], "</user_instructions>");
assert_message_role(&request_body["input"][1], "user");
assert_message_starts_with(&request_body["input"][1], "<environment_context>");
assert_message_ends_with(&request_body["input"][1], "</environment_context>");
@@ -676,13 +671,8 @@ async fn includes_developer_instructions_message_in_request() {
assert_message_role(&request_body["input"][0], "developer");
assert_message_equals(&request_body["input"][0], "be useful");
assert_message_role(&request_body["input"][1], "user");
assert_message_starts_with(&request_body["input"][1], "# AGENTS.md instructions for ");
assert_message_ends_with(&request_body["input"][1], "</INSTRUCTIONS>");
let ui_text = request_body["input"][1]["content"][0]["text"]
.as_str()
.expect("invalid message content");
assert!(ui_text.contains("<INSTRUCTIONS>"));
assert!(ui_text.contains("be nice"));
assert_message_starts_with(&request_body["input"][1], "<user_instructions>");
assert_message_ends_with(&request_body["input"][1], "</user_instructions>");
assert_message_role(&request_body["input"][2], "user");
assert_message_starts_with(&request_body["input"][2], "<environment_context>");
assert_message_ends_with(&request_body["input"][2], "</environment_context>");

View File

@@ -8,8 +8,6 @@ use core_test_support::responses::ev_apply_patch_function_call;
use core_test_support::responses::ev_assistant_message;
use core_test_support::responses::ev_completed;
use core_test_support::responses::ev_function_call;
use core_test_support::responses::ev_reasoning_item_added;
use core_test_support::responses::ev_reasoning_summary_text_delta;
use core_test_support::responses::ev_response_created;
use core_test_support::responses::mount_sse_sequence;
use core_test_support::responses::sse;
@@ -17,7 +15,6 @@ use core_test_support::responses::start_mock_server;
use core_test_support::skip_if_no_network;
use core_test_support::test_codex::test_codex;
use core_test_support::wait_for_event;
use pretty_assertions::assert_eq;
/// Delegate should surface ExecApprovalRequest from sub-agent and proceed
/// after parent submits an approval decision.
@@ -174,52 +171,3 @@ async fn codex_delegate_forwards_patch_approval_and_proceeds_on_decision() {
.await;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn codex_delegate_ignores_legacy_deltas() {
skip_if_no_network!();
// Single response with reasoning summary deltas.
let sse_stream = sse(vec![
ev_response_created("resp-1"),
ev_reasoning_item_added("reason-1", &["initial"]),
ev_reasoning_summary_text_delta("think-1"),
ev_completed("resp-1"),
]);
let server = start_mock_server().await;
mount_sse_sequence(&server, vec![sse_stream]).await;
let mut builder = test_codex();
let test = builder.build(&server).await.expect("build test codex");
// Kick off review (delegated).
test.codex
.submit(Op::Review {
review_request: ReviewRequest {
prompt: "Please review".to_string(),
user_facing_hint: "review".to_string(),
},
})
.await
.expect("submit review");
let mut reasoning_delta_count = 0;
let mut legacy_reasoning_delta_count = 0;
loop {
let ev = wait_for_event(&test.codex, |_| true).await;
match ev {
EventMsg::ReasoningContentDelta(_) => reasoning_delta_count += 1,
EventMsg::AgentReasoningDelta(_) => legacy_reasoning_delta_count += 1,
EventMsg::TaskComplete(_) => break,
_ => {}
}
}
assert_eq!(reasoning_delta_count, 1, "expected one new reasoning delta");
assert_eq!(
legacy_reasoning_delta_count, 1,
"expected one legacy reasoning delta"
);
}

View File

@@ -3,20 +3,18 @@ use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::built_in_model_providers;
use codex_core::config::Config;
use codex_core::protocol::ErrorEvent;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
use codex_core::protocol::RolloutItem;
use codex_core::protocol::RolloutLine;
use codex_core::protocol::WarningEvent;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
use core_test_support::skip_if_no_network;
use core_test_support::wait_for_event;
use std::collections::VecDeque;
use tempfile::TempDir;
use codex_core::codex::compact::SUMMARIZATION_PROMPT;
use core_test_support::responses::ev_assistant_message;
use core_test_support::responses::ev_completed;
use core_test_support::responses::ev_completed_with_tokens;
@@ -28,7 +26,6 @@ use core_test_support::responses::sse;
use core_test_support::responses::sse_failed;
use core_test_support::responses::start_mock_server;
use pretty_assertions::assert_eq;
use serde_json::json;
// --- Test helpers -----------------------------------------------------------
pub(super) const FIRST_REPLY: &str = "FIRST_REPLY";
@@ -48,38 +45,6 @@ const CONTEXT_LIMIT_MESSAGE: &str =
const DUMMY_FUNCTION_NAME: &str = "unsupported_tool";
const DUMMY_CALL_ID: &str = "call-multi-auto";
const FUNCTION_CALL_LIMIT_MSG: &str = "function call limit push";
const POST_AUTO_USER_MSG: &str = "post auto follow-up";
const COMPACT_PROMPT_MARKER: &str =
"You are performing a CONTEXT CHECKPOINT COMPACTION for a tool.";
pub(super) const TEST_COMPACT_PROMPT: &str =
"You are performing a CONTEXT CHECKPOINT COMPACTION for a tool.\nTest-only compact prompt.";
pub(super) const COMPACT_WARNING_MESSAGE: &str = "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start new a new conversation when possible to keep conversations small and targeted.";
fn auto_summary(summary: &str) -> String {
summary.to_string()
}
fn drop_call_id(value: &mut serde_json::Value) {
match value {
serde_json::Value::Object(obj) => {
obj.retain(|k, _| k != "call_id");
for v in obj.values_mut() {
drop_call_id(v);
}
}
serde_json::Value::Array(arr) => {
for v in arr {
drop_call_id(v);
}
}
_ => {}
}
}
fn set_test_compact_prompt(config: &mut Config) {
config.compact_prompt = Some(TEST_COMPACT_PROMPT.to_string());
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn summarize_context_three_requests_and_instructions() {
@@ -106,21 +71,22 @@ async fn summarize_context_three_requests_and_instructions() {
// Mount three expectations, one per request, matched by body content.
let first_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains("\"text\":\"hello world\"") && !body.contains(COMPACT_PROMPT_MARKER)
body.contains("\"text\":\"hello world\"")
&& !body.contains("You have exceeded the maximum number of tokens")
};
let first_request_mock = mount_sse_once_match(&server, first_matcher, sse1).await;
mount_sse_once_match(&server, first_matcher, sse1).await;
let second_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(COMPACT_PROMPT_MARKER)
body.contains("You have exceeded the maximum number of tokens")
};
let second_request_mock = mount_sse_once_match(&server, second_matcher, sse2).await;
mount_sse_once_match(&server, second_matcher, sse2).await;
let third_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(&format!("\"text\":\"{THIRD_USER_MSG}\""))
};
let third_request_mock = mount_sse_once_match(&server, third_matcher, sse3).await;
mount_sse_once_match(&server, third_matcher, sse3).await;
// Build config pointing to the mock server and spawn Codex.
let model_provider = ModelProviderInfo {
@@ -130,7 +96,6 @@ async fn summarize_context_three_requests_and_instructions() {
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
set_test_compact_prompt(&mut config);
config.model_auto_compact_token_limit = Some(200_000);
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let NewConversation {
@@ -153,11 +118,6 @@ async fn summarize_context_three_requests_and_instructions() {
// 2) Summarize second hit should include the summarization prompt.
codex.submit(Op::Compact).await.unwrap();
let warning_event = wait_for_event(&codex, |ev| matches!(ev, EventMsg::Warning(_))).await;
let EventMsg::Warning(WarningEvent { message }) = warning_event else {
panic!("expected warning event after compact");
};
assert_eq!(message, COMPACT_WARNING_MESSAGE);
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// 3) Next user input third hit; history should include only the summary.
@@ -172,13 +132,16 @@ async fn summarize_context_three_requests_and_instructions() {
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Inspect the three captured requests.
let req1 = first_request_mock.single_request();
let req2 = second_request_mock.single_request();
let req3 = third_request_mock.single_request();
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 3, "expected exactly three requests");
let body1 = req1.body_json();
let body2 = req2.body_json();
let body3 = req3.body_json();
let req1 = &requests[0];
let req2 = &requests[1];
let req3 = &requests[2];
let body1 = req1.body_json::<serde_json::Value>().unwrap();
let body2 = req2.body_json::<serde_json::Value>().unwrap();
let body3 = req3.body_json::<serde_json::Value>().unwrap();
// Manual compact should keep the baseline developer instructions.
let instr1 = body1.get("instructions").and_then(|v| v.as_str()).unwrap();
@@ -196,11 +159,11 @@ async fn summarize_context_three_requests_and_instructions() {
assert_eq!(last2.get("role").unwrap().as_str().unwrap(), "user");
let text2 = last2["content"][0]["text"].as_str().unwrap();
assert_eq!(
text2, TEST_COMPACT_PROMPT,
text2, SUMMARIZATION_PROMPT,
"expected summarize trigger, got `{text2}`"
);
// Third request must contain the refreshed instructions, compacted user history, and new user message.
// Third request must contain the refreshed instructions, bridge summary message and new user msg.
let input3 = body3.get("input").and_then(|v| v.as_array()).unwrap();
assert!(
@@ -208,21 +171,13 @@ async fn summarize_context_three_requests_and_instructions() {
"expected refreshed context and new user message in third request"
);
// Collect all (role, text) message tuples.
let mut messages: Vec<(String, String)> = Vec::new();
for item in input3 {
if let Some("message") = item.get("type").and_then(|v| v.as_str()) {
let role = item
.get("role")
.and_then(|v| v.as_str())
.unwrap_or_default()
.to_string();
let text = item
.get("content")
.and_then(|v| v.as_array())
.and_then(|arr| arr.first())
.and_then(|entry| entry.get("text"))
.and_then(|v| v.as_str())
if item["type"].as_str() == Some("message") {
let role = item["role"].as_str().unwrap_or_default().to_string();
let text = item["content"][0]["text"]
.as_str()
.unwrap_or_default()
.to_string();
messages.push((role, text));
@@ -238,22 +193,26 @@ async fn summarize_context_three_requests_and_instructions() {
.any(|(r, t)| r == "user" && t == THIRD_USER_MSG),
"third request should include the new user message"
);
let Some((_, bridge_text)) = messages.iter().find(|(role, text)| {
role == "user"
&& (text.contains("Here were the user messages")
|| text.contains("Here are all the user messages"))
&& text.contains(SUMMARY_TEXT)
}) else {
panic!("expected a bridge message containing the summary");
};
assert!(
messages
.iter()
.any(|(r, t)| r == "user" && t == "hello world"),
"third request should include the original user message"
bridge_text.contains("hello world"),
"bridge should capture earlier user messages"
);
assert!(
messages
.iter()
.any(|(r, t)| r == "user" && t == SUMMARY_TEXT),
"third request should include the summary message"
!bridge_text.contains(SUMMARIZATION_PROMPT),
"bridge text should not echo the summarize trigger"
);
assert!(
!messages
.iter()
.any(|(_, text)| text.contains(TEST_COMPACT_PROMPT)),
.any(|(_, text)| text.contains(SUMMARIZATION_PROMPT)),
"third request should not include the summarize trigger"
);
@@ -329,11 +288,6 @@ async fn manual_compact_uses_custom_prompt() {
.conversation;
codex.submit(Op::Compact).await.expect("trigger compact");
let warning_event = wait_for_event(&codex, |ev| matches!(ev, EventMsg::Warning(_))).await;
let EventMsg::Warning(WarningEvent { message }) = warning_event else {
panic!("expected warning event after compact");
};
assert_eq!(message, COMPACT_WARNING_MESSAGE);
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let requests = server.received_requests().await.expect("collect requests");
@@ -357,7 +311,7 @@ async fn manual_compact_uses_custom_prompt() {
if text == custom_prompt {
found_custom_prompt = true;
}
if text == TEST_COMPACT_PROMPT {
if text == SUMMARIZATION_PROMPT {
found_default_prompt = true;
}
}
@@ -388,17 +342,12 @@ async fn auto_compact_runs_after_token_limit_hit() {
ev_assistant_message("m3", AUTO_SUMMARY_TEXT),
ev_completed_with_tokens("r3", 200),
]);
let sse_resume = sse(vec![ev_completed("r3-resume")]);
let sse4 = sse(vec![
ev_assistant_message("m4", FINAL_REPLY),
ev_completed_with_tokens("r4", 120),
]);
let first_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(FIRST_AUTO_MSG)
&& !body.contains(SECOND_AUTO_MSG)
&& !body.contains(COMPACT_PROMPT_MARKER)
&& !body.contains("You have exceeded the maximum number of tokens")
};
mount_sse_once_match(&server, first_matcher, sse1).await;
@@ -406,30 +355,16 @@ async fn auto_compact_runs_after_token_limit_hit() {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(SECOND_AUTO_MSG)
&& body.contains(FIRST_AUTO_MSG)
&& !body.contains(COMPACT_PROMPT_MARKER)
&& !body.contains("You have exceeded the maximum number of tokens")
};
mount_sse_once_match(&server, second_matcher, sse2).await;
let third_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(COMPACT_PROMPT_MARKER)
body.contains("You have exceeded the maximum number of tokens")
};
mount_sse_once_match(&server, third_matcher, sse3).await;
let resume_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(AUTO_SUMMARY_TEXT)
&& !body.contains(COMPACT_PROMPT_MARKER)
&& !body.contains(POST_AUTO_USER_MSG)
};
mount_sse_once_match(&server, resume_matcher, sse_resume).await;
let fourth_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(POST_AUTO_USER_MSG) && !body.contains(COMPACT_PROMPT_MARKER)
};
mount_sse_once_match(&server, fourth_matcher, sse4).await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
@@ -438,7 +373,6 @@ async fn auto_compact_runs_after_token_limit_hit() {
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
set_test_compact_prompt(&mut config);
config.model_auto_compact_token_limit = Some(200_000);
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let codex = conversation_manager
@@ -468,29 +402,18 @@ async fn auto_compact_runs_after_token_limit_hit() {
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex
.submit(Op::UserInput {
items: vec![UserInput::Text {
text: POST_AUTO_USER_MSG.into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let requests = server.received_requests().await.unwrap();
assert_eq!(
requests.len(),
5,
"expected user turns, a compaction request, a resumed turn, and the follow-up turn; got {}",
assert!(
requests.len() >= 3,
"auto compact should add at least a third request, got {}",
requests.len()
);
let is_auto_compact = |req: &wiremock::Request| {
std::str::from_utf8(&req.body)
.unwrap_or("")
.contains(COMPACT_PROMPT_MARKER)
.contains("You have exceeded the maximum number of tokens")
};
let auto_compact_count = requests.iter().filter(|req| is_auto_compact(req)).count();
assert_eq!(
@@ -507,41 +430,11 @@ async fn auto_compact_runs_after_token_limit_hit() {
"auto compact should add a third request"
);
let resume_index = requests
.iter()
.enumerate()
.find_map(|(idx, req)| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
(body.contains(AUTO_SUMMARY_TEXT)
&& !body.contains(COMPACT_PROMPT_MARKER)
&& !body.contains(POST_AUTO_USER_MSG))
.then_some(idx)
})
.expect("resume request missing after compaction");
let follow_up_index = requests
.iter()
.enumerate()
.rev()
.find_map(|(idx, req)| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
(body.contains(POST_AUTO_USER_MSG) && !body.contains(COMPACT_PROMPT_MARKER))
.then_some(idx)
})
.expect("follow-up request missing");
assert_eq!(follow_up_index, 4, "follow-up request should be last");
let body_first = requests[0].body_json::<serde_json::Value>().unwrap();
let body_auto = requests[auto_compact_index]
let body3 = requests[auto_compact_index]
.body_json::<serde_json::Value>()
.unwrap();
let body_resume = requests[resume_index]
.body_json::<serde_json::Value>()
.unwrap();
let body_follow_up = requests[follow_up_index]
.body_json::<serde_json::Value>()
.unwrap();
let instructions = body_auto
let instructions = body3
.get("instructions")
.and_then(|v| v.as_str())
.unwrap_or_default();
@@ -555,16 +448,13 @@ async fn auto_compact_runs_after_token_limit_hit() {
"auto compact should keep the standard developer instructions",
);
let input_auto = body_auto.get("input").and_then(|v| v.as_array()).unwrap();
let last_auto = input_auto
let input3 = body3.get("input").and_then(|v| v.as_array()).unwrap();
let last3 = input3
.last()
.expect("auto compact request should append a user message");
assert_eq!(
last_auto.get("type").and_then(|v| v.as_str()),
Some("message")
);
assert_eq!(last_auto.get("role").and_then(|v| v.as_str()), Some("user"));
let last_text = last_auto
assert_eq!(last3.get("type").and_then(|v| v.as_str()), Some("message"));
assert_eq!(last3.get("role").and_then(|v| v.as_str()), Some("user"));
let last_text = last3
.get("content")
.and_then(|v| v.as_array())
.and_then(|items| items.first())
@@ -572,59 +462,9 @@ async fn auto_compact_runs_after_token_limit_hit() {
.and_then(|text| text.as_str())
.unwrap_or_default();
assert_eq!(
last_text, TEST_COMPACT_PROMPT,
last_text, SUMMARIZATION_PROMPT,
"auto compact should send the summarization prompt as a user message",
);
let input_resume = body_resume.get("input").and_then(|v| v.as_array()).unwrap();
assert!(
input_resume.iter().any(|item| {
item.get("type").and_then(|v| v.as_str()) == Some("message")
&& item.get("role").and_then(|v| v.as_str()) == Some("user")
&& item
.get("content")
.and_then(|v| v.as_array())
.and_then(|arr| arr.first())
.and_then(|entry| entry.get("text"))
.and_then(|v| v.as_str())
== Some(AUTO_SUMMARY_TEXT)
}),
"resume request should include compacted history"
);
let input_follow_up = body_follow_up
.get("input")
.and_then(|v| v.as_array())
.unwrap();
let user_texts: Vec<String> = input_follow_up
.iter()
.filter(|item| item.get("type").and_then(|v| v.as_str()) == Some("message"))
.filter(|item| item.get("role").and_then(|v| v.as_str()) == Some("user"))
.filter_map(|item| {
item.get("content")
.and_then(|v| v.as_array())
.and_then(|arr| arr.first())
.and_then(|entry| entry.get("text"))
.and_then(|v| v.as_str())
.map(std::string::ToString::to_string)
})
.collect();
assert!(
user_texts.iter().any(|text| text == FIRST_AUTO_MSG),
"auto compact follow-up request should include the first user message"
);
assert!(
user_texts.iter().any(|text| text == SECOND_AUTO_MSG),
"auto compact follow-up request should include the second user message"
);
assert!(
user_texts.iter().any(|text| text == POST_AUTO_USER_MSG),
"auto compact follow-up request should include the new user message"
);
assert!(
user_texts.iter().any(|text| text == AUTO_SUMMARY_TEXT),
"auto compact follow-up request should include the summary message"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@@ -643,9 +483,8 @@ async fn auto_compact_persists_rollout_entries() {
ev_completed_with_tokens("r2", 330_000),
]);
let auto_summary_payload = auto_summary(AUTO_SUMMARY_TEXT);
let sse3 = sse(vec![
ev_assistant_message("m3", &auto_summary_payload),
ev_assistant_message("m3", AUTO_SUMMARY_TEXT),
ev_completed_with_tokens("r3", 200),
]);
@@ -653,7 +492,7 @@ async fn auto_compact_persists_rollout_entries() {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(FIRST_AUTO_MSG)
&& !body.contains(SECOND_AUTO_MSG)
&& !body.contains(COMPACT_PROMPT_MARKER)
&& !body.contains("You have exceeded the maximum number of tokens")
};
mount_sse_once_match(&server, first_matcher, sse1).await;
@@ -661,13 +500,13 @@ async fn auto_compact_persists_rollout_entries() {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(SECOND_AUTO_MSG)
&& body.contains(FIRST_AUTO_MSG)
&& !body.contains(COMPACT_PROMPT_MARKER)
&& !body.contains("You have exceeded the maximum number of tokens")
};
mount_sse_once_match(&server, second_matcher, sse2).await;
let third_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(COMPACT_PROMPT_MARKER)
body.contains("You have exceeded the maximum number of tokens")
};
mount_sse_once_match(&server, third_matcher, sse3).await;
@@ -679,7 +518,6 @@ async fn auto_compact_persists_rollout_entries() {
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
set_test_compact_prompt(&mut config);
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let NewConversation {
conversation: codex,
@@ -753,9 +591,8 @@ async fn auto_compact_stops_after_failed_attempt() {
ev_completed_with_tokens("r1", 500),
]);
let summary_payload = auto_summary(SUMMARY_TEXT);
let sse2 = sse(vec![
ev_assistant_message("m2", &summary_payload),
ev_assistant_message("m2", SUMMARY_TEXT),
ev_completed_with_tokens("r2", 50),
]);
@@ -766,19 +603,21 @@ async fn auto_compact_stops_after_failed_attempt() {
let first_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(FIRST_AUTO_MSG) && !body.contains(COMPACT_PROMPT_MARKER)
body.contains(FIRST_AUTO_MSG)
&& !body.contains("You have exceeded the maximum number of tokens")
};
mount_sse_once_match(&server, first_matcher, sse1.clone()).await;
let second_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(COMPACT_PROMPT_MARKER)
body.contains("You have exceeded the maximum number of tokens")
};
mount_sse_once_match(&server, second_matcher, sse2.clone()).await;
let third_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
!body.contains(COMPACT_PROMPT_MARKER) && body.contains(SUMMARY_TEXT)
!body.contains("You have exceeded the maximum number of tokens")
&& body.contains(SUMMARY_TEXT)
};
mount_sse_once_match(&server, third_matcher, sse3.clone()).await;
@@ -790,7 +629,6 @@ async fn auto_compact_stops_after_failed_attempt() {
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
set_test_compact_prompt(&mut config);
config.model_auto_compact_token_limit = Some(200);
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let codex = conversation_manager
@@ -839,7 +677,7 @@ async fn auto_compact_stops_after_failed_attempt() {
.and_then(|items| items.first())
.and_then(|entry| entry.get("text"))
.and_then(|text| text.as_str())
.map(|text| text == TEST_COMPACT_PROMPT)
.map(|text| text == SUMMARIZATION_PROMPT)
.unwrap_or(false)
});
assert!(
@@ -886,7 +724,6 @@ async fn manual_compact_retries_after_context_window_error() {
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
set_test_compact_prompt(&mut config);
config.model_auto_compact_token_limit = Some(200_000);
let codex = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"))
.new_conversation(config)
@@ -905,6 +742,7 @@ async fn manual_compact_retries_after_context_window_error() {
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex.submit(Op::Compact).await.unwrap();
let EventMsg::BackgroundEvent(event) =
wait_for_event(&codex, |ev| matches!(ev, EventMsg::BackgroundEvent(_))).await
else {
@@ -915,11 +753,6 @@ async fn manual_compact_retries_after_context_window_error() {
"background event should mention trimmed item count: {}",
event.message
);
let warning_event = wait_for_event(&codex, |ev| matches!(ev, EventMsg::Warning(_))).await;
let EventMsg::Warning(WarningEvent { message }) = warning_event else {
panic!("expected warning event after compact retry");
};
assert_eq!(message, COMPACT_WARNING_MESSAGE);
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let requests = request_log.requests();
@@ -946,7 +779,7 @@ async fn manual_compact_retries_after_context_window_error() {
.and_then(|items| items.first())
.and_then(|entry| entry.get("text"))
.and_then(|text| text.as_str()),
Some(TEST_COMPACT_PROMPT),
Some(SUMMARIZATION_PROMPT),
"compact attempt should include summarization prompt"
);
assert_eq!(
@@ -957,7 +790,7 @@ async fn manual_compact_retries_after_context_window_error() {
.and_then(|items| items.first())
.and_then(|entry| entry.get("text"))
.and_then(|text| text.as_str()),
Some(TEST_COMPACT_PROMPT),
Some(SUMMARIZATION_PROMPT),
"retry attempt should include summarization prompt"
);
assert_eq!(
@@ -977,228 +810,6 @@ async fn manual_compact_retries_after_context_window_error() {
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn manual_compact_twice_preserves_latest_user_messages() {
skip_if_no_network!();
let first_user_message = "first manual turn";
let second_user_message = "second manual turn";
let final_user_message = "post compact follow-up";
let first_summary = "FIRST_MANUAL_SUMMARY";
let second_summary = "SECOND_MANUAL_SUMMARY";
let server = start_mock_server().await;
let first_turn = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed("r1"),
]);
let first_compact_summary = auto_summary(first_summary);
let first_compact = sse(vec![
ev_assistant_message("m2", &first_compact_summary),
ev_completed("r2"),
]);
let second_turn = sse(vec![
ev_assistant_message("m3", SECOND_LARGE_REPLY),
ev_completed("r3"),
]);
let second_compact_summary = auto_summary(second_summary);
let second_compact = sse(vec![
ev_assistant_message("m4", &second_compact_summary),
ev_completed("r4"),
]);
let final_turn = sse(vec![
ev_assistant_message("m5", FINAL_REPLY),
ev_completed("r5"),
]);
let responses_mock = mount_sse_sequence(
&server,
vec![
first_turn,
first_compact,
second_turn,
second_compact,
final_turn,
],
)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
set_test_compact_prompt(&mut config);
let codex = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"))
.new_conversation(config)
.await
.unwrap()
.conversation;
codex
.submit(Op::UserInput {
items: vec![UserInput::Text {
text: first_user_message.into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex.submit(Op::Compact).await.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex
.submit(Op::UserInput {
items: vec![UserInput::Text {
text: second_user_message.into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex.submit(Op::Compact).await.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex
.submit(Op::UserInput {
items: vec![UserInput::Text {
text: final_user_message.into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let requests = responses_mock.requests();
assert_eq!(
requests.len(),
5,
"expected exactly 5 requests (user turn, compact, user turn, compact, final turn)"
);
let contains_user_text = |input: &[serde_json::Value], expected: &str| -> bool {
input.iter().any(|item| {
item.get("type").and_then(|v| v.as_str()) == Some("message")
&& item.get("role").and_then(|v| v.as_str()) == Some("user")
&& item
.get("content")
.and_then(|v| v.as_array())
.map(|arr| {
arr.iter().any(|entry| {
entry.get("text").and_then(|v| v.as_str()) == Some(expected)
})
})
.unwrap_or(false)
})
};
let first_turn_input = requests[0].input();
assert!(
contains_user_text(&first_turn_input, first_user_message),
"first turn request missing first user message"
);
assert!(
!contains_user_text(&first_turn_input, TEST_COMPACT_PROMPT),
"first turn request should not include summarization prompt"
);
let first_compact_input = requests[1].input();
assert!(
contains_user_text(&first_compact_input, TEST_COMPACT_PROMPT),
"first compact request should include summarization prompt"
);
assert!(
contains_user_text(&first_compact_input, first_user_message),
"first compact request should include history before compaction"
);
let second_turn_input = requests[2].input();
assert!(
contains_user_text(&second_turn_input, second_user_message),
"second turn request missing second user message"
);
assert!(
contains_user_text(&second_turn_input, first_user_message),
"second turn request should include the compacted user history"
);
let second_compact_input = requests[3].input();
assert!(
contains_user_text(&second_compact_input, TEST_COMPACT_PROMPT),
"second compact request should include summarization prompt"
);
assert!(
contains_user_text(&second_compact_input, second_user_message),
"second compact request should include latest history"
);
let mut final_output = requests
.last()
.unwrap_or_else(|| panic!("final turn request missing for {final_user_message}"))
.input()
.into_iter()
.collect::<VecDeque<_>>();
// System prompt
final_output.pop_front();
// Developer instructions
final_output.pop_front();
let _ = final_output
.iter_mut()
.map(drop_call_id)
.collect::<Vec<_>>();
let expected = vec![
json!({
"content": vec![json!({
"text": first_user_message,
"type": "input_text",
})],
"role": "user",
"type": "message",
}),
json!({
"content": vec![json!({
"text": first_summary,
"type": "input_text",
})],
"role": "user",
"type": "message",
}),
json!({
"content": vec![json!({
"text": second_user_message,
"type": "input_text",
})],
"role": "user",
"type": "message",
}),
json!({
"content": vec![json!({
"text": second_summary,
"type": "input_text",
})],
"role": "user",
"type": "message",
}),
json!({
"content": vec![json!({
"text": final_user_message,
"type": "input_text",
})],
"role": "user",
"type": "message",
}),
];
assert_eq!(final_output, expected);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn auto_compact_allows_multiple_attempts_when_interleaved_with_other_turn_events() {
skip_if_no_network!();
@@ -1209,9 +820,8 @@ async fn auto_compact_allows_multiple_attempts_when_interleaved_with_other_turn_
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 500),
]);
let first_summary_payload = auto_summary(FIRST_AUTO_SUMMARY);
let sse2 = sse(vec![
ev_assistant_message("m2", &first_summary_payload),
ev_assistant_message("m2", FIRST_AUTO_SUMMARY),
ev_completed_with_tokens("r2", 50),
]);
let sse3 = sse(vec![
@@ -1222,9 +832,8 @@ async fn auto_compact_allows_multiple_attempts_when_interleaved_with_other_turn_
ev_assistant_message("m4", SECOND_LARGE_REPLY),
ev_completed_with_tokens("r4", 450),
]);
let second_summary_payload = auto_summary(SECOND_AUTO_SUMMARY);
let sse5 = sse(vec![
ev_assistant_message("m5", &second_summary_payload),
ev_assistant_message("m5", SECOND_AUTO_SUMMARY),
ev_completed_with_tokens("r5", 60),
]);
let sse6 = sse(vec![
@@ -1242,7 +851,6 @@ async fn auto_compact_allows_multiple_attempts_when_interleaved_with_other_turn_
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
set_test_compact_prompt(&mut config);
config.model_auto_compact_token_limit = Some(200);
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let codex = conversation_manager
@@ -1301,7 +909,7 @@ async fn auto_compact_allows_multiple_attempts_when_interleaved_with_other_turn_
"first request should contain the user input"
);
assert!(
request_bodies[1].contains(COMPACT_PROMPT_MARKER),
request_bodies[1].contains("You have exceeded the maximum number of tokens"),
"first auto compact request should include the summarization prompt"
);
assert!(
@@ -1309,7 +917,7 @@ async fn auto_compact_allows_multiple_attempts_when_interleaved_with_other_turn_
"function call output should be sent before the second auto compact"
);
assert!(
request_bodies[4].contains(COMPACT_PROMPT_MARKER),
request_bodies[4].contains("You have exceeded the maximum number of tokens"),
"second auto compact request should include the summarization prompt"
);
}
@@ -1332,9 +940,8 @@ async fn auto_compact_triggers_after_function_call_over_95_percent_usage() {
ev_assistant_message("m2", FINAL_REPLY),
ev_completed_with_tokens("r2", over_limit_tokens),
]);
let auto_summary_payload = auto_summary(AUTO_SUMMARY_TEXT);
let auto_compact_turn = sse(vec![
ev_assistant_message("m3", &auto_summary_payload),
ev_assistant_message("m3", AUTO_SUMMARY_TEXT),
ev_completed_with_tokens("r3", 10),
]);
let post_auto_compact_turn = sse(vec![ev_completed_with_tokens("r4", 10)]);
@@ -1354,7 +961,6 @@ async fn auto_compact_triggers_after_function_call_over_95_percent_usage() {
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
set_test_compact_prompt(&mut config);
config.model_context_window = Some(context_window);
config.model_auto_compact_token_limit = Some(limit);
@@ -1405,7 +1011,7 @@ async fn auto_compact_triggers_after_function_call_over_95_percent_usage() {
let auto_compact_body = auto_compact_mock.single_request().body_json().to_string();
assert!(
auto_compact_body.contains(COMPACT_PROMPT_MARKER),
auto_compact_body.contains("You have exceeded the maximum number of tokens"),
"auto compact request should include the summarization prompt after exceeding 95% (limit {limit})"
);
}

View File

@@ -7,21 +7,19 @@
//! request payload that Codex would send to the model and assert that the
//! model-visible history matches the expected sequence of messages.
use super::compact::COMPACT_WARNING_MESSAGE;
use super::compact::FIRST_REPLY;
use super::compact::SUMMARY_TEXT;
use super::compact::TEST_COMPACT_PROMPT;
use codex_core::CodexAuth;
use codex_core::CodexConversation;
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::built_in_model_providers;
use codex_core::codex::compact::SUMMARIZATION_PROMPT;
use codex_core::config::Config;
use codex_core::config::OPENAI_DEFAULT_MODEL;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
use codex_core::protocol::WarningEvent;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
@@ -38,8 +36,6 @@ use tempfile::TempDir;
use wiremock::MockServer;
const AFTER_SECOND_RESUME: &str = "AFTER_SECOND_RESUME";
const COMPACT_PROMPT_MARKER: &str =
"You are performing a CONTEXT CHECKPOINT COMPACTION for a tool.";
fn network_disabled() -> bool {
std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok()
@@ -68,27 +64,6 @@ fn is_ghost_snapshot_message(item: &Value) -> bool {
.is_some_and(|text| text.trim_start().starts_with("<ghost_snapshot>"))
}
fn extract_summary_message(request: &Value, summary_text: &str) -> Value {
request
.get("input")
.and_then(Value::as_array)
.and_then(|items| {
items.iter().find(|item| {
item.get("type").and_then(Value::as_str) == Some("message")
&& item.get("role").and_then(Value::as_str) == Some("user")
&& item
.get("content")
.and_then(Value::as_array)
.and_then(|arr| arr.first())
.and_then(|entry| entry.get("text"))
.and_then(Value::as_str)
== Some(summary_text)
})
})
.cloned()
.unwrap_or_else(|| panic!("expected summary message {summary_text}"))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
/// Scenario: compact an initial conversation, resume it, fork one turn back, and
/// ensure the model-visible history matches expectations at each request.
@@ -180,9 +155,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
.unwrap_or_default()
.to_string();
let expected_model = OPENAI_DEFAULT_MODEL;
let summary_after_compact = extract_summary_message(&requests[2], SUMMARY_TEXT);
let summary_after_resume = extract_summary_message(&requests[3], SUMMARY_TEXT);
let summary_after_fork = extract_summary_message(&requests[4], SUMMARY_TEXT);
let user_turn_1 = json!(
{
"model": expected_model,
@@ -283,7 +255,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"content": [
{
"type": "input_text",
"text": TEST_COMPACT_PROMPT
"text": SUMMARIZATION_PROMPT
}
]
}
@@ -332,11 +304,16 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"content": [
{
"type": "input_text",
"text": "hello world"
"text": "You were originally given instructions from a user over one or more turns. Here were the user messages:
hello world
Another language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:
SUMMARY_ONLY_CONTEXT"
}
]
},
summary_after_compact,
{
"type": "message",
"role": "user",
@@ -392,11 +369,16 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"content": [
{
"type": "input_text",
"text": "hello world"
"text": "You were originally given instructions from a user over one or more turns. Here were the user messages:
hello world
Another language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:
SUMMARY_ONLY_CONTEXT"
}
]
},
summary_after_resume,
{
"type": "message",
"role": "user",
@@ -472,11 +454,16 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"content": [
{
"type": "input_text",
"text": "hello world"
"text": "You were originally given instructions from a user over one or more turns. Here were the user messages:
hello world
Another language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:
SUMMARY_ONLY_CONTEXT"
}
]
},
summary_after_fork,
{
"type": "message",
"role": "user",
@@ -616,11 +603,6 @@ async fn compact_resume_after_second_compaction_preserves_history() {
.unwrap_or_default()
.to_string();
// Build expected final request input: initial context + forked user message +
// compacted summary + post-compact user message + resumed user message.
let summary_after_second_compact =
extract_summary_message(&requests[requests.len() - 3], SUMMARY_TEXT);
let mut expected = json!([
{
"instructions": prompt,
@@ -651,11 +633,10 @@ async fn compact_resume_after_second_compaction_preserves_history() {
"content": [
{
"type": "input_text",
"text": "AFTER_FORK"
"text": "You were originally given instructions from a user over one or more turns. Here were the user messages:\n\nAFTER_FORK\n\nAnother language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:\n\nSUMMARY_ONLY_CONTEXT"
}
]
},
summary_after_second_compact,
{
"type": "message",
"role": "user",
@@ -741,7 +722,7 @@ async fn mount_initial_flow(server: &MockServer) {
let match_first = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains("\"text\":\"hello world\"")
&& !body.contains(COMPACT_PROMPT_MARKER)
&& !body.contains("You have exceeded the maximum number of tokens")
&& !body.contains(&format!("\"text\":\"{SUMMARY_TEXT}\""))
&& !body.contains("\"text\":\"AFTER_COMPACT\"")
&& !body.contains("\"text\":\"AFTER_RESUME\"")
@@ -751,7 +732,7 @@ async fn mount_initial_flow(server: &MockServer) {
let match_compact = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(COMPACT_PROMPT_MARKER)
body.contains("You have exceeded the maximum number of tokens")
};
mount_sse_once_match(server, match_compact, sse2).await;
@@ -785,7 +766,8 @@ async fn mount_second_compact_flow(server: &MockServer) {
let match_second_compact = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(COMPACT_PROMPT_MARKER) && body.contains("AFTER_FORK")
body.contains("You have exceeded the maximum number of tokens")
&& body.contains("AFTER_FORK")
};
mount_sse_once_match(server, match_second_compact, sse6).await;
@@ -806,7 +788,6 @@ async fn start_test_conversation(
let home = TempDir::new().expect("create temp dir");
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
config.compact_prompt = Some(TEST_COMPACT_PROMPT.to_string());
let manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let NewConversation { conversation, .. } = manager
@@ -832,11 +813,6 @@ async fn compact_conversation(conversation: &Arc<CodexConversation>) {
.submit(Op::Compact)
.await
.expect("compact conversation");
let warning_event = wait_for_event(conversation, |ev| matches!(ev, EventMsg::Warning(_))).await;
let EventMsg::Warning(WarningEvent { message }) = warning_event else {
panic!("expected warning event after compact");
};
assert_eq!(message, COMPACT_WARNING_MESSAGE);
wait_for_event(conversation, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
}

View File

@@ -43,7 +43,7 @@ async fn emits_deprecation_notice_for_legacy_feature_flag() -> anyhow::Result<()
assert_eq!(
details.as_deref(),
Some(
"Enable it with `--enable streamable_shell` or `[features].streamable_shell` in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details."
"You can either enable it using the CLI with `--enable streamable_shell` or through the config.toml file with `[features].streamable_shell`"
),
);

View File

@@ -8,7 +8,6 @@ mod apply_patch_cli;
mod apply_patch_freeform;
#[cfg(not(target_os = "windows"))]
mod approvals;
mod auth_refresh;
mod cli_stream;
mod client;
mod codex_delegate;
@@ -39,7 +38,6 @@ mod tool_harness;
mod tool_parallelism;
mod tools;
mod truncation;
mod undo;
mod unified_exec;
mod user_notification;
mod user_shell_cmd;

View File

@@ -1,5 +1,6 @@
#![allow(clippy::unwrap_used)]
use chrono::Local;
use codex_core::CodexAuth;
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
@@ -18,7 +19,10 @@ use codex_core::shell::default_user_shell;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
use core_test_support::responses;
use core_test_support::responses::mount_sse_once;
use core_test_support::skip_if_no_network;
use core_test_support::test_codex::test_codex;
use core_test_support::wait_for_event;
use std::collections::HashMap;
use tempfile::TempDir;
@@ -37,9 +41,11 @@ fn text_user_input(text: String) -> serde_json::Value {
}
fn default_env_context_str(cwd: &str, shell: &Shell) -> String {
let local_date = Local::now().format("%Y-%m-%d %:z").to_string();
format!(
r#"<environment_context>
<cwd>{}</cwd>
<local_date>{local_date}</local_date>
<approval_policy>on-request</approval_policy>
<sandbox_mode>read-only</sandbox_mode>
<network_access>restricted</network_access>
@@ -341,23 +347,25 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
let shell = default_user_shell().await;
let expected_env_text = format!(
r#"<environment_context>
let expected_env_text = {
let local_date = Local::now().format("%Y-%m-%d %:z").to_string();
format!(
r#"<environment_context>
<cwd>{}</cwd>
<local_date>{local_date}</local_date>
<approval_policy>on-request</approval_policy>
<sandbox_mode>read-only</sandbox_mode>
<network_access>restricted</network_access>
{}</environment_context>"#,
cwd.path().to_string_lossy(),
match shell.name() {
Some(name) => format!(" <shell>{name}</shell>\n"),
None => String::new(),
}
);
let expected_ui_text = format!(
"# AGENTS.md instructions for {}\n\n<INSTRUCTIONS>\nbe consistent and helpful\n</INSTRUCTIONS>",
cwd.path().to_string_lossy()
);
cwd.path().to_string_lossy(),
match shell.name() {
Some(name) => format!(" <shell>{name}</shell>\n"),
None => String::new(),
}
)
};
let expected_ui_text =
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
let expected_env_msg = serde_json::json!({
"type": "message",
@@ -736,11 +744,9 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() {
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
let shell = default_user_shell().await;
let expected_ui_text = format!(
"# AGENTS.md instructions for {}\n\n<INSTRUCTIONS>\nbe consistent and helpful\n</INSTRUCTIONS>",
default_cwd.to_string_lossy()
);
let expected_ui_msg = text_user_input(expected_ui_text);
let expected_ui_text =
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
let expected_ui_msg = text_user_input(expected_ui_text.to_string());
let expected_env_msg_1 = text_user_input(default_env_context_str(
&cwd.path().to_string_lossy(),
@@ -852,10 +858,8 @@ async fn send_user_turn_with_changes_sends_environment_context() {
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
let shell = default_user_shell().await;
let expected_ui_text = format!(
"# AGENTS.md instructions for {}\n\n<INSTRUCTIONS>\nbe consistent and helpful\n</INSTRUCTIONS>",
default_cwd.to_string_lossy()
);
let expected_ui_text =
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
let expected_ui_msg = serde_json::json!({
"type": "message",
"role": "user",
@@ -889,3 +893,68 @@ async fn send_user_turn_with_changes_sends_environment_context() {
]);
assert_eq!(body2["input"], expected_input_2);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn cached_prompt_filters_reasoning_items_from_previous_turns() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let call_id = "shell-call";
let shell_args = serde_json::json!({
"command": ["/bin/echo", "tool output"],
"timeout_ms": 1_000,
});
let initial_response = responses::sse(vec![
responses::ev_response_created("resp-first"),
responses::ev_reasoning_item("reason-1", &["Planning shell command"], &[]),
responses::ev_function_call(
call_id,
"shell",
&serde_json::to_string(&shell_args).expect("serialize shell args"),
),
responses::ev_completed("resp-first"),
]);
let follow_up_response = responses::sse(vec![
responses::ev_response_created("resp-follow-up"),
responses::ev_reasoning_item(
"reason-2",
&["Shell execution completed"],
&["stdout: tool output"],
),
responses::ev_assistant_message("assistant-1", "First turn reply"),
responses::ev_completed("resp-follow-up"),
]);
let second_turn_response = responses::sse(vec![
responses::ev_response_created("resp-second"),
responses::ev_assistant_message("assistant-2", "Second turn reply"),
responses::ev_completed("resp-second"),
]);
mount_sse_once(&server, initial_response).await;
let second_request = mount_sse_once(&server, follow_up_response).await;
let third_request = mount_sse_once(&server, second_turn_response).await;
let mut builder = test_codex();
let test = builder.build(&server).await?;
test.submit_turn("hello 1").await?;
test.submit_turn("hello 2").await?;
let second_request_input = second_request.single_request();
let reasoning_items = second_request_input.inputs_of_type("reasoning");
assert_eq!(
reasoning_items.len(),
1,
"expected first turn follow-up to include reasoning item"
);
let third_request_input = third_request.single_request();
let cached_reasoning = third_request_input.inputs_of_type("reasoning");
assert_eq!(
cached_reasoning.len(),
0,
"expected cached prompt to filter out prior reasoning items"
);
Ok(())
}

View File

@@ -30,18 +30,6 @@ use serde_json::Value;
use serde_json::json;
use std::fs;
const FIXTURE_JSON: &str = r#"{
"description": "This is an example JSON file.",
"foo": "bar",
"isTest": true,
"testNumber": 123,
"testArray": [1, 2, 3],
"testObject": {
"foo": "bar"
}
}
"#;
async fn submit_turn(test: &TestCodex, prompt: &str, sandbox_policy: SandboxPolicy) -> Result<()> {
let session_model = test.session_configured.model.clone();
@@ -237,154 +225,6 @@ freeform shell
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn shell_output_preserves_fixture_json_without_serialization() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let mut builder = test_codex().with_config(|config| {
config.features.disable(Feature::ApplyPatchFreeform);
config.model = "gpt-5".to_string();
config.model_family = find_family_for_model("gpt-5").expect("gpt-5 is a model family");
});
let test = builder.build(&server).await?;
let fixture_path = test.cwd.path().join("fixture.json");
fs::write(&fixture_path, FIXTURE_JSON)?;
let fixture_path_str = fixture_path.to_string_lossy().to_string();
let call_id = "shell-json-fixture";
let args = json!({
"command": ["/usr/bin/sed", "-n", "p", fixture_path_str],
"timeout_ms": 1_000,
});
let responses = vec![
sse(vec![
ev_response_created("resp-1"),
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
ev_completed("resp-1"),
]),
sse(vec![
ev_assistant_message("msg-1", "done"),
ev_completed("resp-2"),
]),
];
mount_sse_sequence(&server, responses).await;
submit_turn(
&test,
"read the fixture JSON with sed",
SandboxPolicy::DangerFullAccess,
)
.await?;
let requests = server
.received_requests()
.await
.expect("recorded requests present");
let bodies = request_bodies(&requests)?;
let output_item = find_function_call_output(&bodies, call_id).expect("shell output present");
let output = output_item
.get("output")
.and_then(Value::as_str)
.expect("shell output string");
let mut parsed: Value = serde_json::from_str(output)?;
if let Some(metadata) = parsed.get_mut("metadata").and_then(Value::as_object_mut) {
let _ = metadata.remove("duration_seconds");
}
assert_eq!(
parsed
.get("metadata")
.and_then(|metadata| metadata.get("exit_code"))
.and_then(Value::as_i64),
Some(0),
"expected zero exit code when serialization is disabled",
);
let stdout = parsed
.get("output")
.and_then(Value::as_str)
.unwrap_or_default()
.to_string();
assert_eq!(
stdout, FIXTURE_JSON,
"expected shell output to match the fixture contents"
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn shell_output_structures_fixture_with_serialization() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let mut builder = test_codex().with_config(|config| {
config.features.enable(Feature::ApplyPatchFreeform);
});
let test = builder.build(&server).await?;
let fixture_path = test.cwd.path().join("fixture.json");
fs::write(&fixture_path, FIXTURE_JSON)?;
let fixture_path_str = fixture_path.to_string_lossy().to_string();
let call_id = "shell-structured-fixture";
let args = json!({
"command": ["/usr/bin/sed", "-n", "p", fixture_path_str],
"timeout_ms": 1_000,
});
let responses = vec![
sse(vec![
ev_response_created("resp-1"),
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
ev_completed("resp-1"),
]),
sse(vec![
ev_assistant_message("msg-1", "done"),
ev_completed("resp-2"),
]),
];
mount_sse_sequence(&server, responses).await;
submit_turn(
&test,
"read the fixture JSON with structured output",
SandboxPolicy::DangerFullAccess,
)
.await?;
let requests = server
.received_requests()
.await
.expect("recorded requests present");
let bodies = request_bodies(&requests)?;
let output_item =
find_function_call_output(&bodies, call_id).expect("structured output present");
let output = output_item
.get("output")
.and_then(Value::as_str)
.expect("structured output string");
assert!(
serde_json::from_str::<Value>(output).is_err(),
"expected structured output to be plain text"
);
let (header, body) = output
.split_once("Output:\n")
.expect("structured output contains an Output section");
assert_regex_match(
r"(?s)^Exit code: 0\nWall time: [0-9]+(?:\.[0-9]+)? seconds$",
header.trim_end(),
);
assert_eq!(
body, FIXTURE_JSON,
"expected Output section to include the fixture contents"
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn shell_output_for_freeform_tool_records_duration() -> Result<()> {
skip_if_no_network!(Ok(()));

View File

@@ -3,16 +3,9 @@
use anyhow::Context;
use anyhow::Result;
use codex_core::config::types::McpServerConfig;
use codex_core::config::types::McpServerTransportConfig;
use codex_core::features::Feature;
use codex_core::model_family::find_family_for_model;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
use codex_core::protocol::SandboxPolicy;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::user_input::UserInput;
use core_test_support::assert_regex_match;
use core_test_support::responses;
use core_test_support::responses::ev_assistant_message;
@@ -25,13 +18,10 @@ use core_test_support::responses::sse;
use core_test_support::responses::start_mock_server;
use core_test_support::skip_if_no_network;
use core_test_support::test_codex::test_codex;
use core_test_support::wait_for_event;
use escargot::CargoBuild;
use regex_lite::Regex;
use serde_json::Value;
use serde_json::json;
use std::collections::HashMap;
use std::time::Duration;
use wiremock::matchers::any;
// Verifies byte-truncation formatting for function error output (RespondToModel errors)
@@ -278,105 +268,3 @@ async fn mcp_tool_call_output_exceeds_limit_truncated_for_model() -> Result<()>
Ok(())
}
// Verifies that an MCP image tool output is serialized as content_items array with
// the image preserved and no truncation summary appended (since there are no text items).
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn mcp_image_output_preserves_image_and_no_text_summary() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let call_id = "rmcp-image-no-trunc";
let server_name = "rmcp";
let tool_name = format!("mcp__{server_name}__image");
mount_sse_once_match(
&server,
any(),
sse(vec![
ev_response_created("resp-1"),
ev_function_call(call_id, &tool_name, "{}"),
ev_completed("resp-1"),
]),
)
.await;
let final_mock = mount_sse_once_match(
&server,
any(),
sse(vec![
ev_assistant_message("msg-1", "done"),
ev_completed("resp-2"),
]),
)
.await;
// Build the stdio rmcp server and pass a tiny PNG via data URL so it can construct ImageContent.
let rmcp_test_server_bin = CargoBuild::new()
.package("codex-rmcp-client")
.bin("test_stdio_server")
.run()?
.path()
.to_string_lossy()
.into_owned();
// 1x1 PNG data URL
let openai_png = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMB/ee9bQAAAABJRU5ErkJggg==";
let mut builder = test_codex().with_config(move |config| {
config.features.enable(Feature::RmcpClient);
config.mcp_servers.insert(
server_name.to_string(),
McpServerConfig {
transport: McpServerTransportConfig::Stdio {
command: rmcp_test_server_bin,
args: Vec::new(),
env: Some(HashMap::from([(
"MCP_TEST_IMAGE_DATA_URL".to_string(),
openai_png.to_string(),
)])),
env_vars: Vec::new(),
cwd: None,
},
enabled: true,
startup_timeout_sec: Some(Duration::from_secs(10)),
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
},
);
});
let fixture = builder.build(&server).await?;
let session_model = fixture.session_configured.model.clone();
fixture
.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "call the rmcp image tool".into(),
}],
final_output_json_schema: None,
cwd: fixture.cwd.path().to_path_buf(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
})
.await?;
// Wait for completion to ensure the outbound request is captured.
wait_for_event(&fixture.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let output_item = final_mock.single_request().function_call_output(call_id);
// Expect exactly one array element: the image item; and no trailing summary text.
let output = output_item.get("output").expect("output");
assert!(output.is_array(), "expected array output");
let arr = output.as_array().unwrap();
assert_eq!(arr.len(), 1, "no truncation summary should be appended");
assert_eq!(
arr[0],
json!({"type": "input_image", "image_url": openai_png})
);
Ok(())
}

Some files were not shown because too many files have changed in this diff Show More