Compare commits

..

1 Commits

Author SHA1 Message Date
kevin zhao
8832459f7f draft impl of execpolicy in posix 2025-12-01 13:04:56 -05:00
252 changed files with 2944 additions and 10533 deletions

View File

@@ -95,6 +95,14 @@ function detectPackageManager() {
return "bun";
}
if (
process.env.BUN_INSTALL ||
process.env.BUN_INSTALL_GLOBAL_DIR ||
process.env.BUN_INSTALL_BIN_DIR
) {
return "bun";
}
return userAgent ? "npm" : null;
}

48
codex-rs/Cargo.lock generated
View File

@@ -858,7 +858,6 @@ dependencies = [
"http",
"pretty_assertions",
"regex-lite",
"reqwest",
"serde",
"serde_json",
"thiserror 2.0.17",
@@ -866,7 +865,6 @@ dependencies = [
"tokio-test",
"tokio-util",
"tracing",
"wiremock",
]
[[package]]
@@ -1070,7 +1068,6 @@ dependencies = [
"serde_json",
"thiserror 2.0.17",
"tokio",
"tracing",
]
[[package]]
@@ -1119,10 +1116,12 @@ name = "codex-common"
version = "0.0.0"
dependencies = [
"clap",
"codex-app-server-protocol",
"codex-core",
"codex-lmstudio",
"codex-ollama",
"codex-protocol",
"once_cell",
"serde",
"toml",
]
@@ -1187,7 +1186,6 @@ dependencies = [
"seccompiler",
"serde",
"serde_json",
"serde_yaml",
"serial_test",
"sha1",
"sha2",
@@ -1266,6 +1264,7 @@ dependencies = [
"shlex",
"socket2 0.6.0",
"tempfile",
"thiserror 2.0.17",
"tokio",
"tokio-util",
"tracing",
@@ -1284,7 +1283,6 @@ dependencies = [
"serde_json",
"shlex",
"starlark",
"tempfile",
"thiserror 2.0.17",
]
@@ -1616,7 +1614,6 @@ dependencies = [
"textwrap 0.16.2",
"tokio",
"tokio-stream",
"tokio-util",
"toml",
"tracing",
"tracing-appender",
@@ -1626,7 +1623,6 @@ dependencies = [
"unicode-segmentation",
"unicode-width 0.2.1",
"url",
"uuid",
"vt100",
]
@@ -4470,12 +4466,6 @@ version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "pastey"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57d6c094ee800037dff99e02cab0eaf3142826586742a270ab3d7a62656bd27a"
[[package]]
name = "path-absolutize"
version = "3.1.1"
@@ -5154,9 +5144,8 @@ dependencies = [
[[package]]
name = "rmcp"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38b18323edc657390a6ed4d7a9110b0dec2dc3ed128eb2a123edfbafabdbddc5"
version = "0.9.0"
source = "git+https://github.com/bolinfest/rust-sdk?branch=pr556#4d9cc16f4c76c84486344f542ed9a3e9364019ba"
dependencies = [
"async-trait",
"base64",
@@ -5167,7 +5156,7 @@ dependencies = [
"http-body",
"http-body-util",
"oauth2",
"pastey",
"paste",
"pin-project-lite",
"process-wrap",
"rand 0.9.2",
@@ -5189,9 +5178,8 @@ dependencies = [
[[package]]
name = "rmcp-macros"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c75d0a62676bf8c8003c4e3c348e2ceb6a7b3e48323681aaf177fdccdac2ce50"
version = "0.9.0"
source = "git+https://github.com/bolinfest/rust-sdk?branch=pr556#4d9cc16f4c76c84486344f542ed9a3e9364019ba"
dependencies = [
"darling 0.21.3",
"proc-macro2",
@@ -5779,19 +5767,6 @@ dependencies = [
"syn 2.0.104",
]
[[package]]
name = "serde_yaml"
version = "0.9.34+deprecated"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
dependencies = [
"indexmap 2.12.0",
"itoa",
"ryu",
"serde",
"unsafe-libyaml",
]
[[package]]
name = "serial2"
version = "0.2.31"
@@ -6603,7 +6578,6 @@ dependencies = [
"futures-sink",
"futures-util",
"pin-project-lite",
"slab",
"tokio",
]
@@ -7007,12 +6981,6 @@ version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
[[package]]
name = "unsafe-libyaml"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
[[package]]
name = "untrusted"
version = "0.9.0"

View File

@@ -59,15 +59,15 @@ license = "Apache-2.0"
# Internal
app_test_support = { path = "app-server/tests/common" }
codex-ansi-escape = { path = "ansi-escape" }
codex-api = { path = "codex-api" }
codex-app-server = { path = "app-server" }
codex-app-server-protocol = { path = "app-server-protocol" }
codex-apply-patch = { path = "apply-patch" }
codex-arg0 = { path = "arg0" }
codex-async-utils = { path = "async-utils" }
codex-backend-client = { path = "backend-client" }
codex-chatgpt = { path = "chatgpt" }
codex-api = { path = "codex-api" }
codex-client = { path = "codex-client" }
codex-chatgpt = { path = "chatgpt" }
codex-common = { path = "common" }
codex-core = { path = "core" }
codex-exec = { path = "exec" }
@@ -169,16 +169,15 @@ pulldown-cmark = "0.10"
rand = "0.9"
ratatui = "0.29.0"
ratatui-macros = "0.6.0"
regex = "1.12.2"
regex-lite = "0.1.7"
regex = "1.12.2"
reqwest = "0.12"
rmcp = { version = "0.10.0", default-features = false }
rmcp = { version = "0.9.0", default-features = false }
schemars = "0.8.22"
seccompiler = "0.5.0"
sentry = "0.34.0"
serde = "1"
serde_json = "1"
serde_yaml = "0.9"
serde_with = "3.16"
serial_test = "3.2.0"
sha1 = "0.10.6"
@@ -289,6 +288,7 @@ opt-level = 0
# ratatui = { path = "../../ratatui" }
crossterm = { git = "https://github.com/nornagon/crossterm", branch = "nornagon/color-query" }
ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" }
rmcp = { git = "https://github.com/bolinfest/rust-sdk", branch = "pr556" }
# Uncomment to debug local changes.
# rmcp = { path = "../../rust-sdk/crates/rmcp" }

View File

@@ -139,11 +139,6 @@ client_request_definitions! {
response: v2::ModelListResponse,
},
McpServersList => "mcpServers/list" {
params: v2::ListMcpServersParams,
response: v2::ListMcpServersResponse,
},
LoginAccount => "account/login/start" {
params: v2::LoginAccountParams,
response: v2::LoginAccountResponse,
@@ -169,12 +164,6 @@ client_request_definitions! {
response: v2::FeedbackUploadResponse,
},
/// Execute a command (argv vector) under the server's sandbox.
OneOffCommandExec => "command/exec" {
params: v2::CommandExecParams,
response: v2::CommandExecResponse,
},
ConfigRead => "config/read" {
params: v2::ConfigReadParams,
response: v2::ConfigReadResponse,
@@ -522,7 +511,6 @@ server_notification_definitions! {
ItemCompleted => "item/completed" (v2::ItemCompletedNotification),
AgentMessageDelta => "item/agentMessage/delta" (v2::AgentMessageDeltaNotification),
CommandExecutionOutputDelta => "item/commandExecution/outputDelta" (v2::CommandExecutionOutputDeltaNotification),
FileChangeOutputDelta => "item/fileChange/outputDelta" (v2::FileChangeOutputDeltaNotification),
McpToolCallProgress => "item/mcpToolCall/progress" (v2::McpToolCallProgressNotification),
AccountUpdated => "account/updated" (v2::AccountUpdatedNotification),
AccountRateLimitsUpdated => "account/rateLimits/updated" (v2::AccountRateLimitsUpdatedNotification),

View File

@@ -1,15 +0,0 @@
use crate::protocol::v1;
use crate::protocol::v2;
impl From<v1::ExecOneOffCommandParams> for v2::CommandExecParams {
fn from(value: v1::ExecOneOffCommandParams) -> Self {
Self {
command: value.command,
timeout_ms: value
.timeout_ms
.map(|timeout| i64::try_from(timeout).unwrap_or(60_000)),
cwd: value.cwd,
sandbox_policy: value.sandbox_policy.map(std::convert::Into::into),
}
}
}

View File

@@ -2,7 +2,6 @@
// Exposes protocol pieces used by `lib.rs` via `pub use protocol::common::*;`.
pub mod common;
mod mappers;
pub mod thread_history;
pub mod v1;
pub mod v2;

View File

@@ -1,6 +1,5 @@
use crate::protocol::v2::ThreadItem;
use crate::protocol::v2::Turn;
use crate::protocol::v2::TurnError;
use crate::protocol::v2::TurnStatus;
use crate::protocol::v2::UserInput;
use codex_protocol::protocol::AgentReasoningEvent;
@@ -143,7 +142,6 @@ impl ThreadHistoryBuilder {
PendingTurn {
id: self.next_turn_id(),
items: Vec::new(),
error: None,
status: TurnStatus::Completed,
}
}
@@ -192,7 +190,6 @@ impl ThreadHistoryBuilder {
struct PendingTurn {
id: String,
items: Vec<ThreadItem>,
error: Option<TurnError>,
status: TurnStatus,
}
@@ -201,7 +198,6 @@ impl From<PendingTurn> for Turn {
Self {
id: value.id,
items: value.items,
error: value.error,
status: value.status,
}
}

View File

@@ -3,11 +3,11 @@ use std::path::PathBuf;
use codex_protocol::ConversationId;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::Verbosity;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::EventMsg;

View File

@@ -2,13 +2,14 @@ use std::collections::HashMap;
use std::path::PathBuf;
use crate::protocol::common::AuthMode;
use codex_protocol::ConversationId;
use codex_protocol::account::PlanType;
use codex_protocol::approvals::SandboxCommandAssessment as CoreSandboxCommandAssessment;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::items::AgentMessageContent as CoreAgentMessageContent;
use codex_protocol::items::TurnItem as CoreTurnItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::parse_command::ParsedCommand as CoreParsedCommand;
use codex_protocol::plan_tool::PlanItemArg as CorePlanItemArg;
use codex_protocol::plan_tool::StepStatus as CorePlanStepStatus;
@@ -21,9 +22,6 @@ use codex_protocol::protocol::TokenUsage as CoreTokenUsage;
use codex_protocol::protocol::TokenUsageInfo as CoreTokenUsageInfo;
use codex_protocol::user_input::UserInput as CoreUserInput;
use mcp_types::ContentBlock as McpContentBlock;
use mcp_types::Resource as McpResource;
use mcp_types::ResourceTemplate as McpResourceTemplate;
use mcp_types::Tool as McpTool;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
@@ -140,15 +138,6 @@ v2_enum_from_core!(
}
);
v2_enum_from_core!(
pub enum McpAuthStatus from codex_protocol::protocol::McpAuthStatus {
Unsupported,
NotLoggedIn,
BearerToken,
OAuth
}
);
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -209,8 +198,6 @@ pub struct OverriddenMetadata {
pub struct ConfigWriteResponse {
pub status: WriteStatus,
pub version: String,
/// Canonical path to the config file that was written.
pub file_path: String,
pub overridden_metadata: Option<OverriddenMetadata>,
}
@@ -247,11 +234,10 @@ pub struct ConfigReadResponse {
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ConfigValueWriteParams {
pub file_path: String,
pub key_path: String,
pub value: JsonValue,
pub merge_strategy: MergeStrategy,
/// Path to the config file to write; defaults to the user's `config.toml` when omitted.
pub file_path: Option<String>,
pub expected_version: Option<String>,
}
@@ -259,9 +245,8 @@ pub struct ConfigValueWriteParams {
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ConfigBatchWriteParams {
pub file_path: String,
pub edits: Vec<ConfigEdit>,
/// Path to the config file to write; defaults to the user's `config.toml` when omitted.
pub file_path: Option<String>,
pub expected_version: Option<String>,
}
@@ -630,44 +615,13 @@ pub struct ModelListResponse {
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ListMcpServersParams {
/// Opaque pagination cursor returned by a previous call.
pub cursor: Option<String>,
/// Optional page size; defaults to a server-defined value.
pub limit: Option<u32>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpServer {
pub name: String,
pub tools: std::collections::HashMap<String, McpTool>,
pub resources: Vec<McpResource>,
pub resource_templates: Vec<McpResourceTemplate>,
pub auth_status: McpAuthStatus,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ListMcpServersResponse {
pub data: Vec<McpServer>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// If None, there are no more items to return.
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct FeedbackUploadParams {
pub classification: String,
pub reason: Option<String>,
pub thread_id: Option<String>,
pub conversation_id: Option<ConversationId>,
pub include_logs: bool,
}
@@ -678,26 +632,6 @@ pub struct FeedbackUploadResponse {
pub thread_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CommandExecParams {
pub command: Vec<String>,
#[ts(type = "number | null")]
pub timeout_ms: Option<i64>,
pub cwd: Option<PathBuf>,
pub sandbox_policy: Option<SandboxPolicy>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CommandExecResponse {
pub exit_code: i32,
pub stdout: String,
pub stderr: String,
}
// === Threads, Turns, and Items ===
// Thread APIs
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
@@ -832,7 +766,6 @@ pub struct Thread {
/// Model provider used for this thread (for example, 'openai').
pub model_provider: String,
/// Unix timestamp (in seconds) when the thread was created.
#[ts(type = "number")]
pub created_at: i64,
/// [UNSTABLE] Path to the thread on disk.
pub path: PathBuf,
@@ -923,9 +856,8 @@ pub struct Turn {
/// For all other responses and notifications returning a Turn,
/// the items field will be an empty list.
pub items: Vec<ThreadItem>,
#[serde(flatten)]
pub status: TurnStatus,
/// Only populated when the Turn's status is failed.
pub error: Option<TurnError>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS, Error)]
@@ -942,20 +874,17 @@ pub struct TurnError {
#[ts(export_to = "v2/")]
pub struct ErrorNotification {
pub error: TurnError,
// Set to true if the error is transient and the app-server process will automatically retry.
// If true, this will not interrupt a turn.
pub will_retry: bool,
pub thread_id: String,
pub turn_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
#[serde(tag = "status", rename_all = "camelCase")]
#[ts(tag = "status", export_to = "v2/")]
pub enum TurnStatus {
Completed,
Interrupted,
Failed,
Failed { error: TurnError },
InProgress,
}
@@ -1124,7 +1053,6 @@ pub enum ThreadItem {
/// The command's exit code.
exit_code: Option<i32>,
/// The duration of the command execution in milliseconds.
#[ts(type = "number | null")]
duration_ms: Option<i64>,
},
#[serde(rename_all = "camelCase")]
@@ -1144,15 +1072,15 @@ pub enum ThreadItem {
arguments: JsonValue,
result: Option<McpToolCallResult>,
error: Option<McpToolCallError>,
/// The duration of the MCP tool call in milliseconds.
#[ts(type = "number | null")]
duration_ms: Option<i64>,
},
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
WebSearch { id: String, query: String },
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
TodoList { id: String, items: Vec<TodoItem> },
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
ImageView { id: String, path: String },
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
@@ -1255,6 +1183,15 @@ pub struct McpToolCallError {
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TodoItem {
pub id: String,
pub text: String,
pub completed: bool,
}
// === Server Notifications ===
// Thread/Turn lifecycle notifications and item progress events
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1304,7 +1241,6 @@ pub struct TurnDiffUpdatedNotification {
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnPlanUpdatedNotification {
pub thread_id: String,
pub turn_id: String,
pub explanation: Option<String>,
pub plan: Vec<TurnPlanStep>,
@@ -1383,7 +1319,6 @@ pub struct ReasoningSummaryTextDeltaNotification {
pub turn_id: String,
pub item_id: String,
pub delta: String,
#[ts(type = "number")]
pub summary_index: i64,
}
@@ -1394,7 +1329,6 @@ pub struct ReasoningSummaryPartAddedNotification {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
#[ts(type = "number")]
pub summary_index: i64,
}
@@ -1406,7 +1340,6 @@ pub struct ReasoningTextDeltaNotification {
pub turn_id: String,
pub item_id: String,
pub delta: String,
#[ts(type = "number")]
pub content_index: i64,
}
@@ -1420,16 +1353,6 @@ pub struct CommandExecutionOutputDeltaNotification {
pub delta: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct FileChangeOutputDeltaNotification {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
pub delta: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1524,7 +1447,6 @@ pub struct RateLimitSnapshot {
pub primary: Option<RateLimitWindow>,
pub secondary: Option<RateLimitWindow>,
pub credits: Option<CreditsSnapshot>,
pub plan_type: Option<PlanType>,
}
impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
@@ -1533,7 +1455,6 @@ impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
primary: value.primary.map(RateLimitWindow::from),
secondary: value.secondary.map(RateLimitWindow::from),
credits: value.credits.map(CreditsSnapshot::from),
plan_type: value.plan_type,
}
}
}
@@ -1543,9 +1464,7 @@ impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
#[ts(export_to = "v2/")]
pub struct RateLimitWindow {
pub used_percent: i32,
#[ts(type = "number | null")]
pub window_duration_mins: Option<i64>,
#[ts(type = "number | null")]
pub resets_at: Option<i64>,
}

View File

@@ -563,9 +563,7 @@ impl CodexClient {
ServerNotification::TurnCompleted(payload) => {
if payload.turn.id == turn_id {
println!("\n< turn/completed notification: {:?}", payload.turn.status);
if payload.turn.status == TurnStatus::Failed
&& let Some(error) = payload.turn.error
{
if let TurnStatus::Failed { error } = &payload.turn.status {
println!("[turn error] {}", error.message);
}
break;

View File

@@ -31,7 +31,6 @@ chrono = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
sha2 = { workspace = true }
mcp-types = { workspace = true }
tempfile = { workspace = true }
toml = { workspace = true }
tokio = { workspace = true, features = [

View File

@@ -1,15 +1,15 @@
# codex-app-server
`codex app-server` is the interface Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt).
`codex app-server` is the interface Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt). The message schema is currently unstable, but those who wish to build experimental UIs on top of Codex may find it valuable.
## Table of Contents
- [Protocol](#protocol)
- [Message Schema](#message-schema)
- [Core Primitives](#core-primitives)
- [Lifecycle Overview](#lifecycle-overview)
- [Initialization](#initialization)
- [API Overview](#api-overview)
- [Events](#events)
- [Core primitives](#core-primitives)
- [Thread & turn endpoints](#thread--turn-endpoints)
- [Events (work-in-progress)](#events-work-in-progress)
- [Auth endpoints](#auth-endpoints)
## Protocol
@@ -25,15 +25,6 @@ codex app-server generate-ts --out DIR
codex app-server generate-json-schema --out DIR
```
## Core Primitives
The API exposes three top level primitives representing an interaction between a user and Codex:
- **Thread**: A conversation between a user and the Codex agent. Each thread contains multiple turns.
- **Turn**: One turn of the conversation, typically starting with a user message and finishing with an agent message. Each turn contains multiple items.
- **Item**: Represents user inputs and agent outputs as part of the turn, persisted and used as the context for future conversations. Example items include user message, agent reasoning, agent message, shell command, file edit, etc.
Use the thread APIs to create, list, or archive conversations. Drive a conversation with turn APIs and stream progress via turn notifications.
## Lifecycle Overview
- Initialize once: Immediately after launching the codex app-server process, send an `initialize` request with your client metadata, then emit an `initialized` notification. Any other request before this handshake gets rejected.
@@ -46,16 +37,28 @@ Use the thread APIs to create, list, or archive conversations. Drive a conversat
Clients must send a single `initialize` request before invoking any other method, then acknowledge with an `initialized` notification. The server returns the user agent string it will present to upstream services; subsequent requests issued before initialization receive a `"Not initialized"` error, and repeated `initialize` calls receive an `"Already initialized"` error.
Applications building on top of `codex app-server` should identify themselves via the `clientInfo` parameter.
Example:
Example (from OpenAI's official VSCode extension):
```json
{ "method": "initialize", "id": 0, "params": {
"clientInfo": { "name": "codex-vscode", "title": "Codex VS Code Extension", "version": "0.1.0" }
} }
{ "id": 0, "result": { "userAgent": "codex-app-server/0.1.0 codex-vscode/0.1.0" } }
{ "method": "initialized" }
```
## API Overview
## Core primitives
We have 3 top level primitives:
- Thread - a conversation between the Codex agent and a user. Each thread contains multiple turns.
- Turn - one turn of the conversation, typically starting with a user message and finishing with an agent message. Each turn contains multiple items.
- Item - represents user inputs and agent outputs as part of the turn, persisted and used as the context for future conversations.
## Thread & turn endpoints
The JSON-RPC API exposes dedicated methods for managing Codex conversations. Threads store long-lived conversation metadata, and turns store the per-message exchange (input → Codex output, including streamed items). Use the thread APIs to create, list, or archive sessions, then drive the conversation with turn APIs and notifications.
### Quick reference
- `thread/start` — create a new thread; emits `thread/started` and auto-subscribes you to turn/item events for that thread.
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it.
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders` filtering.
@@ -63,15 +66,8 @@ Example (from OpenAI's official VSCode extension):
- `turn/start` — add user input to a thread and begin Codex generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications.
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
- `review/start` — kick off Codexs automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
- `model/list` — list available models (with reasoning effort options).
- `feedback/upload` — submit a feedback report (classification + optional reason/logs and conversation_id); returns the tracking thread id.
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
- `config/read` — fetch the effective config on disk after resolving config layering.
- `config/value/write` — write a single config key/value to the user's config.toml on disk.
- `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk.
### Example: Start or resume a thread
### 1) Start or resume a thread
Start a fresh thread when you need a new Codex conversation.
@@ -102,7 +98,7 @@ To continue a stored session, call `thread/resume` with the `thread.id` you prev
{ "id": 11, "result": { "thread": { "id": "thr_123", } } }
```
### Example: List threads (with pagination & filters)
### 2) List threads (pagination & filters)
`thread/list` lets you render a history UI. Pass any combination of:
- `cursor` — opaque string from a prior response; omit for the first page.
@@ -127,7 +123,7 @@ Example:
When `nextCursor` is `null`, youve reached the final page.
### Example: Archive a thread
### 3) Archive a thread
Use `thread/archive` to move the persisted rollout (stored as a JSONL file on disk) into the archived sessions directory.
@@ -138,7 +134,7 @@ Use `thread/archive` to move the persisted rollout (stored as a JSONL file on di
An archived thread will not appear in future calls to `thread/list`.
### Example: Start a turn (send user input)
### 4) Start a turn (send user input)
Turns attach user input (text or images) to a thread and trigger Codex generation. The `input` field is a list of discriminated unions:
@@ -172,7 +168,7 @@ You can optionally specify config overrides on the new turn. If specified, these
} } }
```
### Example: Interrupt an active turn
### 5) Interrupt an active turn
You can cancel a running Turn with `turn/interrupt`.
@@ -186,7 +182,7 @@ You can cancel a running Turn with `turn/interrupt`.
The server requests cancellations for running subprocesses, then emits a `turn/completed` event with `status: "interrupted"`. Rely on the `turn/completed` to know when Codex-side cleanup is done.
### Example: Request a code review
### 6) Request a code review
Use `review/start` to run Codexs reviewer on the currently checked-out project. The request takes the thread id plus a `target` describing what should be reviewed:
@@ -245,26 +241,7 @@ containing an `exitedReviewMode` item with the final review text:
The `review` string is plain text that already bundles the overall explanation plus a bullet list for each structured finding (matching `ThreadItem::ExitedReviewMode` in the generated schema). Use this notification to render the reviewer output in your client.
### Example: One-off command execution
Run a standalone command (argv vector) in the servers sandbox without creating a thread or turn:
```json
{ "method": "command/exec", "id": 32, "params": {
"command": ["ls", "-la"],
"cwd": "/Users/me/project", // optional; defaults to server cwd
"sandboxPolicy": { "type": "workspaceWrite" }, // optional; defaults to user config
"timeoutMs": 10000 // optional; ms timeout; defaults to server timeout
} }
{ "id": 32, "result": { "exitCode": 0, "stdout": "...", "stderr": "" } }
```
Notes:
- Empty `command` arrays are rejected.
- `sandboxPolicy` accepts the same shape used by `turn/start` (e.g., `dangerFullAccess`, `readOnly`, `workspaceWrite` with flags).
- When omitted, `timeoutMs` falls back to the server default.
## Events
## Events (work-in-progress)
Event notifications are the server-initiated event stream for thread lifecycles, turn lifecycles, and the items within them. After you start or resume a thread, keep reading stdout for `thread/started`, `turn/*`, and `item/*` notifications.
@@ -274,12 +251,11 @@ The app-server streams JSON-RPC notifications while a turn is running. Each turn
- `turn/started``{ turn }` with the turn id, empty `items`, and `status: "inProgress"`.
- `turn/completed``{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`.
- `turn/diff/updated``{ threadId, turnId, diff }` represents the up-to-date snapshot of the turn-level unified diff, emitted after every FileChange item. `diff` is the latest aggregated unified diff across every file change in the turn. UIs can render this to show the full "what changed" view without stitching individual `fileChange` items.
- `turn/plan/updated``{ turnId, explanation?, plan }` whenever the agent shares or changes its plan; each `plan` entry is `{ step, status }` with `status` in `pending`, `inProgress`, or `completed`.
Today both notifications carry an empty `items` array even when item events were streamed; rely on `item/*` notifications for the canonical item list until this is fixed.
#### Items
#### Thread items
`ThreadItem` is the tagged union carried in turn responses and `item/*` notifications. Currently we support events for the following items:
- `userMessage``{id, content}` where `content` is a list of user inputs (`text`, `image`, or `localImage`).
@@ -289,9 +265,6 @@ Today both notifications carry an empty `items` array even when item events were
- `fileChange``{id, changes, status}` describing proposed edits; `changes` list `{path, kind, diff}` and `status` is `inProgress`, `completed`, `failed`, or `declined`.
- `mcpToolCall``{id, server, tool, status, arguments, result?, error?}` describing MCP calls; `status` is `inProgress`, `completed`, or `failed`.
- `webSearch``{id, query}` for a web search request issued by the agent.
- `imageView``{id, path}` emitted when the agent invokes the image viewer tool.
- `enteredReviewMode``{id, review}` sent when the reviewer starts; `review` is a short user-facing label such as `"current changes"` or the requested target description.
- `exitedReviewMode``{id, review}` emitted when the reviewer finishes; `review` is the full plain-text review (usually, overall notes plus bullet point findings).
- `compacted` - `{threadId, turnId}` when codex compacts the conversation history. This can happen automatically.
All items emit two shared lifecycle events:
@@ -309,7 +282,7 @@ There are additional item-specific events:
- `item/commandExecution/outputDelta` — streams stdout/stderr for the command; append deltas in order to render live output alongside `aggregatedOutput` in the final item.
Final `commandExecution` items include parsed `commandActions`, `status`, `exitCode`, and `durationMs` so the UI can summarize what ran and whether it succeeded.
#### fileChange
- `item/fileChange/outputDelta` - contains the tool call response of the underlying `apply_patch` tool call.
`fileChange` items contain a `changes` list with `{path, kind, diff}` entries (`kind` is `add`, `delete`, or `update` with an optional `movePath`). The `status` tracks whether apply succeeded (`completed`), failed, or was `declined`.
### Errors
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
@@ -358,7 +331,7 @@ UI guidance for IDEs: surface an approval dialog as soon as the request arrives.
The JSON-RPC auth/account surface exposes request/response methods plus server-initiated notifications (no `id`). Use these to determine auth state, start or cancel logins, logout, and inspect ChatGPT rate limits.
### API Overview
### Quick reference
- `account/read` — fetch current account info; optionally refresh tokens.
- `account/login/start` — begin login (`apiKey` or `chatgpt`).
- `account/login/completed` (notify) — emitted when a login attempt finishes (success or error).
@@ -443,3 +416,9 @@ Field notes:
- `usedPercent` is current usage within the OpenAI quota window.
- `windowDurationMins` is the quota window length.
- `resetsAt` is a Unix timestamp (seconds) for the next reset.
### Dev notes
- `codex app-server generate-ts --out <dir>` emits v2 types under `v2/`.
- `codex app-server generate-json-schema --out <dir>` outputs `codex_app_server_protocol.schemas.json`.
- See [“Authentication and authorization” in the config docs](../../docs/config.md#authentication-and-authorization) for configuration knobs.

View File

@@ -18,7 +18,6 @@ use codex_app_server_protocol::ContextCompactedNotification;
use codex_app_server_protocol::ErrorNotification;
use codex_app_server_protocol::ExecCommandApprovalParams;
use codex_app_server_protocol::ExecCommandApprovalResponse;
use codex_app_server_protocol::FileChangeOutputDeltaNotification;
use codex_app_server_protocol::FileChangeRequestApprovalParams;
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
use codex_app_server_protocol::FileUpdateChange;
@@ -62,7 +61,6 @@ use codex_core::protocol::ReviewDecision;
use codex_core::protocol::TokenCountEvent;
use codex_core::protocol::TurnDiffEvent;
use codex_core::review_format::format_review_findings_block;
use codex_core::review_prompts;
use codex_protocol::ConversationId;
use codex_protocol::plan_tool::UpdatePlanArgs;
use codex_protocol::protocol::ReviewOutputEvent;
@@ -179,7 +177,6 @@ pub(crate) async fn apply_bespoke_event_handling(
cwd,
reason,
risk,
proposed_execpolicy_amendment: _,
parsed_cmd,
}) => match api_version {
ApiVersion::V1 => {
@@ -333,7 +330,6 @@ pub(crate) async fn apply_bespoke_event_handling(
outgoing
.send_server_notification(ServerNotification::Error(ErrorNotification {
error: turn_error,
will_retry: false,
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
}))
@@ -349,38 +345,13 @@ pub(crate) async fn apply_bespoke_event_handling(
outgoing
.send_server_notification(ServerNotification::Error(ErrorNotification {
error: turn_error,
will_retry: true,
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
}))
.await;
}
EventMsg::ViewImageToolCall(view_image_event) => {
let item = ThreadItem::ImageView {
id: view_image_event.call_id.clone(),
path: view_image_event.path.to_string_lossy().into_owned(),
};
let started = ItemStartedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item: item.clone(),
};
outgoing
.send_server_notification(ServerNotification::ItemStarted(started))
.await;
let completed = ItemCompletedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item,
};
outgoing
.send_server_notification(ServerNotification::ItemCompleted(completed))
.await;
}
EventMsg::EnteredReviewMode(review_request) => {
let review = review_request
.user_facing_hint
.unwrap_or_else(|| review_prompts::user_facing_hint(&review_request.target));
let review = review_request.user_facing_hint;
let item = ThreadItem::EnteredReviewMode {
id: event_turn_id.clone(),
review,
@@ -530,44 +501,17 @@ pub(crate) async fn apply_bespoke_event_handling(
.await;
}
EventMsg::ExecCommandOutputDelta(exec_command_output_delta_event) => {
let item_id = exec_command_output_delta_event.call_id.clone();
let delta = String::from_utf8_lossy(&exec_command_output_delta_event.chunk).to_string();
// The underlying EventMsg::ExecCommandOutputDelta is used for shell, unified_exec,
// and apply_patch tool calls. We represent apply_patch with the FileChange item, and
// everything else with the CommandExecution item.
//
// We need to detect which item type it is so we can emit the right notification.
// We already have state tracking FileChange items on item/started, so let's use that.
let is_file_change = {
let map = turn_summary_store.lock().await;
map.get(&conversation_id)
.is_some_and(|summary| summary.file_change_started.contains(&item_id))
let notification = CommandExecutionOutputDeltaNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item_id: exec_command_output_delta_event.call_id.clone(),
delta: String::from_utf8_lossy(&exec_command_output_delta_event.chunk).to_string(),
};
if is_file_change {
let notification = FileChangeOutputDeltaNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item_id,
delta,
};
outgoing
.send_server_notification(ServerNotification::FileChangeOutputDelta(
notification,
))
.await;
} else {
let notification = CommandExecutionOutputDeltaNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item_id,
delta,
};
outgoing
.send_server_notification(ServerNotification::CommandExecutionOutputDelta(
notification,
))
.await;
}
outgoing
.send_server_notification(ServerNotification::CommandExecutionOutputDelta(
notification,
))
.await;
}
EventMsg::ExecCommandEnd(exec_command_end_event) => {
let ExecCommandEndEvent {
@@ -664,7 +608,6 @@ pub(crate) async fn apply_bespoke_event_handling(
}
EventMsg::PlanUpdate(plan_update_event) => {
handle_turn_plan_update(
conversation_id,
&event_turn_id,
plan_update_event,
api_version,
@@ -697,7 +640,6 @@ async fn handle_turn_diff(
}
async fn handle_turn_plan_update(
conversation_id: ConversationId,
event_turn_id: &str,
plan_update_event: UpdatePlanArgs,
api_version: ApiVersion,
@@ -705,7 +647,6 @@ async fn handle_turn_plan_update(
) {
if let ApiVersion::V2 = api_version {
let notification = TurnPlanUpdatedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.to_string(),
explanation: plan_update_event.explanation,
plan: plan_update_event
@@ -724,7 +665,6 @@ async fn emit_turn_completed_with_status(
conversation_id: ConversationId,
event_turn_id: String,
status: TurnStatus,
error: Option<TurnError>,
outgoing: &OutgoingMessageSender,
) {
let notification = TurnCompletedNotification {
@@ -732,7 +672,6 @@ async fn emit_turn_completed_with_status(
turn: Turn {
id: event_turn_id,
items: vec![],
error,
status,
},
};
@@ -821,12 +760,13 @@ async fn handle_turn_complete(
) {
let turn_summary = find_and_remove_turn_summary(conversation_id, turn_summary_store).await;
let (status, error) = match turn_summary.last_error {
Some(error) => (TurnStatus::Failed, Some(error)),
None => (TurnStatus::Completed, None),
let status = if let Some(error) = turn_summary.last_error {
TurnStatus::Failed { error }
} else {
TurnStatus::Completed
};
emit_turn_completed_with_status(conversation_id, event_turn_id, status, error, outgoing).await;
emit_turn_completed_with_status(conversation_id, event_turn_id, status, outgoing).await;
}
async fn handle_turn_interrupted(
@@ -841,7 +781,6 @@ async fn handle_turn_interrupted(
conversation_id,
event_turn_id,
TurnStatus::Interrupted,
None,
outgoing,
)
.await;
@@ -1180,7 +1119,6 @@ async fn construct_mcp_tool_call_notification(
arguments: begin_event.invocation.arguments.unwrap_or(JsonValue::Null),
result: None,
error: None,
duration_ms: None,
};
ItemStartedNotification {
thread_id,
@@ -1200,7 +1138,6 @@ async fn construct_mcp_tool_call_end_notification(
} else {
McpToolCallStatus::Failed
};
let duration_ms = i64::try_from(end_event.duration.as_millis()).ok();
let (result, error) = match &end_event.result {
Ok(value) => (
@@ -1226,7 +1163,6 @@ async fn construct_mcp_tool_call_end_notification(
arguments: end_event.invocation.arguments.unwrap_or(JsonValue::Null),
result,
error,
duration_ms,
};
ItemCompletedNotification {
thread_id,
@@ -1317,7 +1253,6 @@ mod tests {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, event_turn_id);
assert_eq!(n.turn.status, TurnStatus::Completed);
assert_eq!(n.turn.error, None);
}
other => bail!("unexpected message: {other:?}"),
}
@@ -1358,7 +1293,6 @@ mod tests {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, event_turn_id);
assert_eq!(n.turn.status, TurnStatus::Interrupted);
assert_eq!(n.turn.error, None);
}
other => bail!("unexpected message: {other:?}"),
}
@@ -1398,13 +1332,14 @@ mod tests {
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, event_turn_id);
assert_eq!(n.turn.status, TurnStatus::Failed);
assert_eq!(
n.turn.error,
Some(TurnError {
message: "bad".to_string(),
codex_error_info: Some(V2CodexErrorInfo::Other),
})
n.turn.status,
TurnStatus::Failed {
error: TurnError {
message: "bad".to_string(),
codex_error_info: Some(V2CodexErrorInfo::Other),
}
}
);
}
other => bail!("unexpected message: {other:?}"),
@@ -1431,16 +1366,7 @@ mod tests {
],
};
let conversation_id = ConversationId::new();
handle_turn_plan_update(
conversation_id,
"turn-123",
update,
ApiVersion::V2,
&outgoing,
)
.await;
handle_turn_plan_update("turn-123", update, ApiVersion::V2, &outgoing).await;
let msg = rx
.recv()
@@ -1448,7 +1374,6 @@ mod tests {
.ok_or_else(|| anyhow!("should send one notification"))?;
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnPlanUpdated(n)) => {
assert_eq!(n.thread_id, conversation_id.to_string());
assert_eq!(n.turn_id, "turn-123");
assert_eq!(n.explanation.as_deref(), Some("need plan"));
assert_eq!(n.plan.len(), 2);
@@ -1499,7 +1424,6 @@ mod tests {
unlimited: false,
balance: Some("5".to_string()),
}),
plan_type: None,
};
handle_token_count_event(
@@ -1604,7 +1528,6 @@ mod tests {
arguments: serde_json::json!({"server": ""}),
result: None,
error: None,
duration_ms: None,
},
};
@@ -1677,13 +1600,14 @@ mod tests {
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, a_turn1);
assert_eq!(n.turn.status, TurnStatus::Failed);
assert_eq!(
n.turn.error,
Some(TurnError {
message: "a1".to_string(),
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
})
n.turn.status,
TurnStatus::Failed {
error: TurnError {
message: "a1".to_string(),
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
}
}
);
}
other => bail!("unexpected message: {other:?}"),
@@ -1697,13 +1621,14 @@ mod tests {
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, b_turn1);
assert_eq!(n.turn.status, TurnStatus::Failed);
assert_eq!(
n.turn.error,
Some(TurnError {
message: "b1".to_string(),
codex_error_info: None,
})
n.turn.status,
TurnStatus::Failed {
error: TurnError {
message: "b1".to_string(),
codex_error_info: None,
}
}
);
}
other => bail!("unexpected message: {other:?}"),
@@ -1718,7 +1643,6 @@ mod tests {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, a_turn2);
assert_eq!(n.turn.status, TurnStatus::Completed);
assert_eq!(n.turn.error, None);
}
other => bail!("unexpected message: {other:?}"),
}
@@ -1758,7 +1682,6 @@ mod tests {
arguments: JsonValue::Null,
result: None,
error: None,
duration_ms: None,
},
};
@@ -1812,7 +1735,6 @@ mod tests {
structured_content: None,
}),
error: None,
duration_ms: Some(0),
},
};
@@ -1854,7 +1776,6 @@ mod tests {
error: Some(McpToolCallError {
message: "boom".to_string(),
}),
duration_ms: Some(1),
},
};

View File

@@ -21,9 +21,9 @@ use codex_app_server_protocol::CancelLoginAccountParams;
use codex_app_server_protocol::CancelLoginAccountResponse;
use codex_app_server_protocol::CancelLoginChatGptResponse;
use codex_app_server_protocol::ClientRequest;
use codex_app_server_protocol::CommandExecParams;
use codex_app_server_protocol::ConversationGitInfo;
use codex_app_server_protocol::ConversationSummary;
use codex_app_server_protocol::ExecOneOffCommandParams;
use codex_app_server_protocol::ExecOneOffCommandResponse;
use codex_app_server_protocol::FeedbackUploadParams;
use codex_app_server_protocol::FeedbackUploadResponse;
@@ -45,8 +45,6 @@ use codex_app_server_protocol::InterruptConversationParams;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::ListConversationsResponse;
use codex_app_server_protocol::ListMcpServersParams;
use codex_app_server_protocol::ListMcpServersResponse;
use codex_app_server_protocol::LoginAccountParams;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::LoginApiKeyResponse;
@@ -54,7 +52,6 @@ use codex_app_server_protocol::LoginChatGptCompleteNotification;
use codex_app_server_protocol::LoginChatGptResponse;
use codex_app_server_protocol::LogoutAccountResponse;
use codex_app_server_protocol::LogoutChatGptResponse;
use codex_app_server_protocol::McpServer;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::ModelListResponse;
use codex_app_server_protocol::NewConversationParams;
@@ -67,7 +64,7 @@ use codex_app_server_protocol::ResumeConversationResponse;
use codex_app_server_protocol::ReviewDelivery as ApiReviewDelivery;
use codex_app_server_protocol::ReviewStartParams;
use codex_app_server_protocol::ReviewStartResponse;
use codex_app_server_protocol::ReviewTarget as ApiReviewTarget;
use codex_app_server_protocol::ReviewTarget;
use codex_app_server_protocol::SandboxMode;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
@@ -122,14 +119,11 @@ use codex_core::exec_env::create_env;
use codex_core::features::Feature;
use codex_core::find_conversation_path_by_id_str;
use codex_core::git_info::git_diff_to_remote;
use codex_core::mcp::collect_mcp_snapshot;
use codex_core::mcp::group_tools_by_server;
use codex_core::parse_cursor;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
use codex_core::protocol::ReviewDelivery as CoreReviewDelivery;
use codex_core::protocol::ReviewRequest;
use codex_core::protocol::ReviewTarget as CoreReviewTarget;
use codex_core::protocol::SessionConfiguredEvent;
use codex_core::read_head_for_summary;
use codex_feedback::CodexFeedback;
@@ -141,7 +135,6 @@ use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::items::TurnItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::GitInfo as CoreGitInfo;
use codex_protocol::protocol::McpAuthStatus as CoreMcpAuthStatus;
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::SessionMetaLine;
@@ -262,7 +255,7 @@ impl CodexMessageProcessor {
}
fn review_request_from_target(
target: ApiReviewTarget,
target: ReviewTarget,
) -> Result<(ReviewRequest, String), JSONRPCErrorError> {
fn invalid_request(message: String) -> JSONRPCErrorError {
JSONRPCErrorError {
@@ -272,52 +265,73 @@ impl CodexMessageProcessor {
}
}
let cleaned_target = match target {
ApiReviewTarget::UncommittedChanges => ApiReviewTarget::UncommittedChanges,
ApiReviewTarget::BaseBranch { branch } => {
match target {
// TODO(jif) those messages will be extracted in a follow-up PR.
ReviewTarget::UncommittedChanges => Ok((
ReviewRequest {
prompt: "Review the current code changes (staged, unstaged, and untracked files) and provide prioritized findings.".to_string(),
user_facing_hint: "current changes".to_string(),
},
"Review uncommitted changes".to_string(),
)),
ReviewTarget::BaseBranch { branch } => {
let branch = branch.trim().to_string();
if branch.is_empty() {
return Err(invalid_request("branch must not be empty".to_string()));
}
ApiReviewTarget::BaseBranch { branch }
let prompt = format!("Review the code changes against the base branch '{branch}'. Start by finding the merge diff between the current branch and {branch}'s upstream e.g. (`git merge-base HEAD \"$(git rev-parse --abbrev-ref \"{branch}@{{upstream}}\")\"`), then run `git diff` against that SHA to see what changes we would merge into the {branch} branch. Provide prioritized, actionable findings.");
let hint = format!("changes against '{branch}'");
let display = format!("Review changes against base branch '{branch}'");
Ok((
ReviewRequest {
prompt,
user_facing_hint: hint,
},
display,
))
}
ApiReviewTarget::Commit { sha, title } => {
ReviewTarget::Commit { sha, title } => {
let sha = sha.trim().to_string();
if sha.is_empty() {
return Err(invalid_request("sha must not be empty".to_string()));
}
let title = title
let brief_title = title
.map(|t| t.trim().to_string())
.filter(|t| !t.is_empty());
ApiReviewTarget::Commit { sha, title }
let prompt = if let Some(title) = brief_title.clone() {
format!("Review the code changes introduced by commit {sha} (\"{title}\"). Provide prioritized, actionable findings.")
} else {
format!("Review the code changes introduced by commit {sha}. Provide prioritized, actionable findings.")
};
let short_sha = sha.chars().take(7).collect::<String>();
let hint = format!("commit {short_sha}");
let display = if let Some(title) = brief_title {
format!("Review commit {short_sha}: {title}")
} else {
format!("Review commit {short_sha}")
};
Ok((
ReviewRequest {
prompt,
user_facing_hint: hint,
},
display,
))
}
ApiReviewTarget::Custom { instructions } => {
ReviewTarget::Custom { instructions } => {
let trimmed = instructions.trim().to_string();
if trimmed.is_empty() {
return Err(invalid_request(
"instructions must not be empty".to_string(),
));
}
ApiReviewTarget::Custom {
instructions: trimmed,
return Err(invalid_request("instructions must not be empty".to_string()));
}
Ok((
ReviewRequest {
prompt: trimmed.clone(),
user_facing_hint: trimmed.clone(),
},
trimmed,
))
}
};
let core_target = match cleaned_target {
ApiReviewTarget::UncommittedChanges => CoreReviewTarget::UncommittedChanges,
ApiReviewTarget::BaseBranch { branch } => CoreReviewTarget::BaseBranch { branch },
ApiReviewTarget::Commit { sha, title } => CoreReviewTarget::Commit { sha, title },
ApiReviewTarget::Custom { instructions } => CoreReviewTarget::Custom { instructions },
};
let hint = codex_core::review_prompts::user_facing_hint(&core_target);
let review_request = ReviewRequest {
target: core_target,
user_facing_hint: Some(hint.clone()),
};
Ok((review_request, hint))
}
}
pub async fn process_request(&mut self, request: ClientRequest) {
@@ -369,9 +383,6 @@ impl CodexMessageProcessor {
ClientRequest::ModelList { request_id, params } => {
self.list_models(request_id, params).await;
}
ClientRequest::McpServersList { request_id, params } => {
self.list_mcp_servers(request_id, params).await;
}
ClientRequest::LoginAccount { request_id, params } => {
self.login_v2(request_id, params).await;
}
@@ -456,11 +467,8 @@ impl CodexMessageProcessor {
ClientRequest::FuzzyFileSearch { request_id, params } => {
self.fuzzy_file_search(request_id, params).await;
}
ClientRequest::OneOffCommandExec { request_id, params } => {
self.exec_one_off_command(request_id, params).await;
}
ClientRequest::ExecOneOffCommand { request_id, params } => {
self.exec_one_off_command(request_id, params.into()).await;
self.exec_one_off_command(request_id, params).await;
}
ClientRequest::ConfigRead { .. }
| ClientRequest::ConfigValueWrite { .. }
@@ -1146,7 +1154,7 @@ impl CodexMessageProcessor {
}
}
async fn exec_one_off_command(&self, request_id: RequestId, params: CommandExecParams) {
async fn exec_one_off_command(&self, request_id: RequestId, params: ExecOneOffCommandParams) {
tracing::debug!("ExecOneOffCommand params: {params:?}");
if params.command.is_empty() {
@@ -1161,9 +1169,7 @@ impl CodexMessageProcessor {
let cwd = params.cwd.unwrap_or_else(|| self.config.cwd.clone());
let env = create_env(&self.config.shell_environment_policy);
let timeout_ms = params
.timeout_ms
.and_then(|timeout_ms| u64::try_from(timeout_ms).ok());
let timeout_ms = params.timeout_ms;
let exec_params = ExecParams {
command: params.command,
cwd,
@@ -1176,7 +1182,6 @@ impl CodexMessageProcessor {
let effective_policy = params
.sandbox_policy
.map(|policy| policy.to_core())
.unwrap_or_else(|| self.config.sandbox_policy.clone());
let codex_linux_sandbox_exe = self.config.codex_linux_sandbox_exe.clone();
@@ -1862,7 +1867,8 @@ impl CodexMessageProcessor {
async fn list_models(&self, request_id: RequestId, params: ModelListParams) {
let ModelListParams { limit, cursor } = params;
let models = supported_models(self.conversation_manager.clone()).await;
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
let models = supported_models(auth_mode);
let total = models.len();
if total == 0 {
@@ -1916,85 +1922,6 @@ impl CodexMessageProcessor {
self.outgoing.send_response(request_id, response).await;
}
async fn list_mcp_servers(&self, request_id: RequestId, params: ListMcpServersParams) {
let snapshot = collect_mcp_snapshot(self.config.as_ref()).await;
let tools_by_server = group_tools_by_server(&snapshot.tools);
let mut server_names: Vec<String> = self
.config
.mcp_servers
.keys()
.cloned()
.chain(snapshot.auth_statuses.keys().cloned())
.chain(snapshot.resources.keys().cloned())
.chain(snapshot.resource_templates.keys().cloned())
.collect();
server_names.sort();
server_names.dedup();
let total = server_names.len();
let limit = params.limit.unwrap_or(total as u32).max(1) as usize;
let effective_limit = limit.min(total);
let start = match params.cursor {
Some(cursor) => match cursor.parse::<usize>() {
Ok(idx) => idx,
Err(_) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("invalid cursor: {cursor}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
},
None => 0,
};
if start > total {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("cursor {start} exceeds total MCP servers {total}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
let end = start.saturating_add(effective_limit).min(total);
let data: Vec<McpServer> = server_names[start..end]
.iter()
.map(|name| McpServer {
name: name.clone(),
tools: tools_by_server.get(name).cloned().unwrap_or_default(),
resources: snapshot.resources.get(name).cloned().unwrap_or_default(),
resource_templates: snapshot
.resource_templates
.get(name)
.cloned()
.unwrap_or_default(),
auth_status: snapshot
.auth_statuses
.get(name)
.cloned()
.unwrap_or(CoreMcpAuthStatus::Unsupported)
.into(),
})
.collect();
let next_cursor = if end < total {
Some(end.to_string())
} else {
None
};
let response = ListMcpServersResponse { data, next_cursor };
self.outgoing.send_response(request_id, response).await;
}
async fn handle_resume_conversation(
&self,
request_id: RequestId,
@@ -2542,7 +2469,6 @@ impl CodexMessageProcessor {
let turn = Turn {
id: turn_id.clone(),
items: vec![],
error: None,
status: TurnStatus::InProgress,
};
@@ -2584,7 +2510,6 @@ impl CodexMessageProcessor {
Turn {
id: turn_id,
items,
error: None,
status: TurnStatus::InProgress,
}
}
@@ -3020,26 +2945,10 @@ impl CodexMessageProcessor {
let FeedbackUploadParams {
classification,
reason,
thread_id,
conversation_id,
include_logs,
} = params;
let conversation_id = match thread_id.as_deref() {
Some(thread_id) => match ConversationId::from_string(thread_id) {
Ok(conversation_id) => Some(conversation_id),
Err(err) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("invalid thread id: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
},
None => None,
};
let snapshot = self.feedback.snapshot(conversation_id);
let thread_id = snapshot.thread_id.clone();

View File

@@ -109,17 +109,12 @@ impl ConfigApi {
async fn apply_edits(
&self,
file_path: Option<String>,
file_path: String,
expected_version: Option<String>,
edits: Vec<(String, JsonValue, MergeStrategy)>,
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
let allowed_path = self.codex_home.join(CONFIG_FILE_NAME);
let provided_path = file_path
.as_ref()
.map(PathBuf::from)
.unwrap_or_else(|| allowed_path.clone());
if !paths_match(&allowed_path, &provided_path) {
if !paths_match(&allowed_path, &file_path) {
return Err(config_write_error(
ConfigWriteErrorCode::ConfigLayerReadonly,
"Only writes to the user config are allowed",
@@ -195,16 +190,9 @@ impl ConfigApi {
.map(|_| WriteStatus::OkOverridden)
.unwrap_or(WriteStatus::Ok);
let file_path = provided_path
.canonicalize()
.unwrap_or(provided_path.clone())
.display()
.to_string();
Ok(ConfigWriteResponse {
status,
version: updated_layers.user.version.clone(),
file_path,
overridden_metadata: overridden,
})
}
@@ -599,14 +587,15 @@ fn canonical_json(value: &JsonValue) -> JsonValue {
}
}
fn paths_match(expected: &Path, provided: &Path) -> bool {
fn paths_match(expected: &Path, provided: &str) -> bool {
let provided_path = PathBuf::from(provided);
if let (Ok(expanded_expected), Ok(expanded_provided)) =
(expected.canonicalize(), provided.canonicalize())
(expected.canonicalize(), provided_path.canonicalize())
{
return expanded_expected == expanded_provided;
}
expected == provided
expected == provided_path
}
fn value_at_path<'a>(root: &'a TomlValue, segments: &[String]) -> Option<&'a TomlValue> {
@@ -806,7 +795,7 @@ mod tests {
let result = api
.write_value(ConfigValueWriteParams {
file_path: Some(tmp.path().join(CONFIG_FILE_NAME).display().to_string()),
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
key_path: "approval_policy".to_string(),
value: json!("never"),
merge_strategy: MergeStrategy::Replace,
@@ -843,7 +832,7 @@ mod tests {
let api = ConfigApi::new(tmp.path().to_path_buf(), vec![]);
let error = api
.write_value(ConfigValueWriteParams {
file_path: Some(tmp.path().join(CONFIG_FILE_NAME).display().to_string()),
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
key_path: "model".to_string(),
value: json!("gpt-5"),
merge_strategy: MergeStrategy::Replace,
@@ -863,30 +852,6 @@ mod tests {
);
}
#[tokio::test]
async fn write_value_defaults_to_user_config_path() {
let tmp = tempdir().expect("tempdir");
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "").unwrap();
let api = ConfigApi::new(tmp.path().to_path_buf(), vec![]);
api.write_value(ConfigValueWriteParams {
file_path: None,
key_path: "model".to_string(),
value: json!("gpt-new"),
merge_strategy: MergeStrategy::Replace,
expected_version: None,
})
.await
.expect("write succeeds");
let contents =
std::fs::read_to_string(tmp.path().join(CONFIG_FILE_NAME)).expect("read config");
assert!(
contents.contains("model = \"gpt-new\""),
"config.toml should be updated even when file_path is omitted"
);
}
#[tokio::test]
async fn invalid_user_value_rejected_even_if_overridden_by_managed() {
let tmp = tempdir().expect("tempdir");
@@ -907,7 +872,7 @@ mod tests {
let error = api
.write_value(ConfigValueWriteParams {
file_path: Some(tmp.path().join(CONFIG_FILE_NAME).display().to_string()),
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
key_path: "approval_policy".to_string(),
value: json!("bogus"),
merge_strategy: MergeStrategy::Replace,
@@ -992,7 +957,7 @@ mod tests {
let result = api
.write_value(ConfigValueWriteParams {
file_path: Some(tmp.path().join(CONFIG_FILE_NAME).display().to_string()),
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
key_path: "approval_policy".to_string(),
value: json!("on-request"),
merge_strategy: MergeStrategy::Replace,

View File

@@ -1,15 +1,12 @@
use std::sync::Arc;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_core::ConversationManager;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_common::model_presets::builtin_model_presets;
pub async fn supported_models(conversation_manager: Arc<ConversationManager>) -> Vec<Model> {
conversation_manager
.list_models()
.await
pub fn supported_models(auth_mode: Option<AuthMode>) -> Vec<Model> {
builtin_model_presets(auth_mode)
.into_iter()
.map(model_from_preset)
.collect()
@@ -30,7 +27,7 @@ fn model_from_preset(preset: ModelPreset) -> Model {
}
fn reasoning_efforts_from_preset(
efforts: Vec<ReasoningEffortPreset>,
efforts: &'static [ReasoningEffortPreset],
) -> Vec<ReasoningEffortOption> {
efforts
.iter()

View File

@@ -16,9 +16,6 @@ use tracing::warn;
use crate::error_code::INTERNAL_ERROR_CODE;
#[cfg(test)]
use codex_protocol::account::PlanType;
/// Sends messages to the client and manages request callbacks.
pub(crate) struct OutgoingMessageSender {
next_request_id: AtomicI64,
@@ -233,7 +230,6 @@ mod tests {
}),
secondary: None,
credits: None,
plan_type: Some(PlanType::Plus),
},
});
@@ -249,8 +245,7 @@ mod tests {
"resetsAt": 123
},
"secondary": null,
"credits": null,
"planType": "plus"
"credits": null
}
},
}),

View File

@@ -23,10 +23,10 @@ use codex_app_server_protocol::SendUserTurnResponse;
use codex_app_server_protocol::ServerRequest;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::SandboxPolicy;
use codex_core::protocol_config_types::ReasoningEffort;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;

View File

@@ -10,10 +10,10 @@ use codex_app_server_protocol::Tools;
use codex_app_server_protocol::UserSavedConfig;
use codex_core::protocol::AskForApproval;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::Verbosity;
use codex_protocol::openai_models::ReasoningEffort;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
use std::path::Path;

View File

@@ -206,7 +206,7 @@ model = "gpt-old"
let write_id = mcp
.send_config_value_write_request(ConfigValueWriteParams {
file_path: None,
file_path: codex_home.path().join("config.toml").display().to_string(),
key_path: "model".to_string(),
value: json!("gpt-new"),
merge_strategy: MergeStrategy::Replace,
@@ -219,16 +219,8 @@ model = "gpt-old"
)
.await??;
let write: ConfigWriteResponse = to_response(write_resp)?;
let expected_file_path = codex_home
.path()
.join("config.toml")
.canonicalize()
.unwrap()
.display()
.to_string();
assert_eq!(write.status, WriteStatus::Ok);
assert_eq!(write.file_path, expected_file_path);
assert!(write.overridden_metadata.is_none());
let verify_id = mcp
@@ -262,7 +254,7 @@ model = "gpt-old"
let write_id = mcp
.send_config_value_write_request(ConfigValueWriteParams {
file_path: Some(codex_home.path().join("config.toml").display().to_string()),
file_path: codex_home.path().join("config.toml").display().to_string(),
key_path: "model".to_string(),
value: json!("gpt-new"),
merge_strategy: MergeStrategy::Replace,
@@ -296,7 +288,7 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
let batch_id = mcp
.send_config_batch_write_request(ConfigBatchWriteParams {
file_path: Some(codex_home.path().join("config.toml").display().to_string()),
file_path: codex_home.path().join("config.toml").display().to_string(),
edits: vec![
ConfigEdit {
key_path: "sandbox_mode".to_string(),
@@ -322,14 +314,6 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
.await??;
let batch_write: ConfigWriteResponse = to_response(batch_resp)?;
assert_eq!(batch_write.status, WriteStatus::Ok);
let expected_file_path = codex_home
.path()
.join("config.toml")
.canonicalize()
.unwrap()
.display()
.to_string();
assert_eq!(batch_write.file_path, expected_file_path);
let read_id = mcp
.send_config_read_request(ConfigReadParams {

View File

@@ -11,7 +11,7 @@ use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::ModelListResponse;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_app_server_protocol::RequestId;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::config_types::ReasoningEffort;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;

View File

@@ -11,7 +11,6 @@ use codex_app_server_protocol::RateLimitSnapshot;
use codex_app_server_protocol::RateLimitWindow;
use codex_app_server_protocol::RequestId;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_protocol::account::PlanType as AccountPlanType;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::path::Path;
@@ -154,7 +153,6 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
resets_at: Some(secondary_reset_timestamp),
}),
credits: None,
plan_type: Some(AccountPlanType::Pro),
},
};
assert_eq!(received, expected);

View File

@@ -93,7 +93,7 @@ async fn review_start_runs_review_turn_and_emits_code_review_item() -> Result<()
match started.item {
ThreadItem::EnteredReviewMode { id, review } => {
assert_eq!(id, turn_id);
assert_eq!(review, "commit 1234567: Tidy UI colors");
assert_eq!(review, "commit 1234567");
saw_entered_review_mode = true;
break;
}

View File

@@ -11,7 +11,6 @@ use app_test_support::to_response;
use codex_app_server_protocol::ApprovalDecision;
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
use codex_app_server_protocol::CommandExecutionStatus;
use codex_app_server_protocol::FileChangeOutputDeltaNotification;
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
use codex_app_server_protocol::ItemCompletedNotification;
use codex_app_server_protocol::ItemStartedNotification;
@@ -30,8 +29,8 @@ use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStartedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::protocol_config_types::ReasoningEffort;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_protocol::openai_models::ReasoningEffort;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::path::Path;
@@ -726,26 +725,6 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
)
.await?;
let output_delta_notif = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("item/fileChange/outputDelta"),
)
.await??;
let output_delta: FileChangeOutputDeltaNotification = serde_json::from_value(
output_delta_notif
.params
.clone()
.expect("item/fileChange/outputDelta params"),
)?;
assert_eq!(output_delta.thread_id, thread.id);
assert_eq!(output_delta.turn_id, turn.id);
assert_eq!(output_delta.item_id, "patch-call");
assert!(
!output_delta.delta.is_empty(),
"expected delta to be non-empty, got: {}",
output_delta.delta
);
let completed_file_change = timeout(DEFAULT_READ_TIMEOUT, async {
loop {
let completed_notif = mcp

View File

@@ -1 +0,0 @@
** text eol=lf

View File

@@ -1 +0,0 @@
This is a new file

View File

@@ -1,4 +0,0 @@
*** Begin Patch
*** Add File: bar.md
+This is a new file
*** End Patch

View File

@@ -1,9 +0,0 @@
*** Begin Patch
*** Add File: nested/new.txt
+created
*** Delete File: delete.txt
*** Update File: modify.txt
@@
-line2
+changed
*** End Patch

View File

@@ -1,4 +0,0 @@
line1
changed2
line3
changed4

View File

@@ -1,4 +0,0 @@
line1
line2
line3
line4

View File

@@ -1,9 +0,0 @@
*** Begin Patch
*** Update File: multi.txt
@@
-line2
+changed2
@@
-line4
+changed4
*** End Patch

View File

@@ -1,7 +0,0 @@
*** Begin Patch
*** Update File: old/name.txt
*** Move to: renamed/dir/name.txt
@@
-old content
+new content
*** End Patch

View File

@@ -1,2 +0,0 @@
*** Begin Patch
*** End Patch

View File

@@ -1,6 +0,0 @@
*** Begin Patch
*** Update File: modify.txt
@@
-missing
+changed
*** End Patch

View File

@@ -1,3 +0,0 @@
*** Begin Patch
*** Delete File: missing.txt
*** End Patch

View File

@@ -1,3 +0,0 @@
*** Begin Patch
*** Update File: foo.txt
*** End Patch

View File

@@ -1,6 +0,0 @@
*** Begin Patch
*** Update File: missing.txt
@@
-old
+new
*** End Patch

View File

@@ -1,7 +0,0 @@
*** Begin Patch
*** Update File: old/name.txt
*** Move to: renamed/dir/name.txt
@@
-from
+new
*** End Patch

View File

@@ -1,4 +0,0 @@
*** Begin Patch
*** Add File: duplicate.txt
+new content
*** End Patch

View File

@@ -1,3 +0,0 @@
*** Begin Patch
*** Delete File: dir
*** End Patch

View File

@@ -1,3 +0,0 @@
*** Begin Patch
*** Frobnicate File: foo
*** End Patch

View File

@@ -1,7 +0,0 @@
*** Begin Patch
*** Update File: no_newline.txt
@@
-no newline at end
+first line
+second line
*** End Patch

View File

@@ -1,8 +0,0 @@
*** Begin Patch
*** Add File: created.txt
+hello
*** Update File: missing.txt
@@
-old
+new
*** End Patch

View File

@@ -1,4 +0,0 @@
line1
line2
added line 1
added line 2

View File

@@ -1,6 +0,0 @@
*** Begin Patch
*** Update File: input.txt
@@
+added line 1
+added line 2
*** End Patch

View File

@@ -1,6 +0,0 @@
*** Begin Patch
*** Update File: foo.txt
@@
-old
+new
*** End Patch

View File

@@ -1,6 +0,0 @@
*** Begin Patch
*** Update File: file.txt
@@
-one
+two
*** End Patch

View File

@@ -1,18 +0,0 @@
# Overview
This directory is a collection of end to end tests for the apply-patch specification, meant to be easily portable to other languages or platforms.
# Specification
Each test case is one directory, composed of input state (input/), the patch operation (patch.txt), and the expected final state (expected/). This structure is designed to keep tests simple (i.e. test exactly one patch at a time) while still providing enough flexibility to test any given operation across files.
Here's what this would look like for a simple test apply-patch test case to create a new file:
```
001_add/
input/
foo.md
expected/
foo.md
bar.md
patch.txt
```

View File

@@ -1,4 +1,3 @@
mod cli;
mod scenarios;
#[cfg(not(target_os = "windows"))]
mod tool;

View File

@@ -1,114 +0,0 @@
use assert_cmd::prelude::*;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use tempfile::tempdir;
#[test]
fn test_apply_patch_scenarios() -> anyhow::Result<()> {
for scenario in fs::read_dir("tests/fixtures/scenarios")? {
let scenario = scenario?;
let path = scenario.path();
if path.is_dir() {
run_apply_patch_scenario(&path)?;
}
}
Ok(())
}
/// Reads a scenario directory, copies the input files to a temporary directory, runs apply-patch,
/// and asserts that the final state matches the expected state exactly.
fn run_apply_patch_scenario(dir: &Path) -> anyhow::Result<()> {
let tmp = tempdir()?;
// Copy the input files to the temporary directory
let input_dir = dir.join("input");
if input_dir.is_dir() {
copy_dir_recursive(&input_dir, tmp.path())?;
}
// Read the patch.txt file
let patch = fs::read_to_string(dir.join("patch.txt"))?;
// Run apply_patch in the temporary directory. We intentionally do not assert
// on the exit status here; the scenarios are specified purely in terms of
// final filesystem state, which we compare below.
Command::cargo_bin("apply_patch")?
.arg(patch)
.current_dir(tmp.path())
.output()?;
// Assert that the final state matches the expected state exactly
let expected_dir = dir.join("expected");
let expected_snapshot = snapshot_dir(&expected_dir)?;
let actual_snapshot = snapshot_dir(tmp.path())?;
assert_eq!(
actual_snapshot,
expected_snapshot,
"Scenario {} did not match expected final state",
dir.display()
);
Ok(())
}
#[derive(Debug, Clone, PartialEq, Eq)]
enum Entry {
File(Vec<u8>),
Dir,
}
fn snapshot_dir(root: &Path) -> anyhow::Result<BTreeMap<PathBuf, Entry>> {
let mut entries = BTreeMap::new();
if root.is_dir() {
snapshot_dir_recursive(root, root, &mut entries)?;
}
Ok(entries)
}
fn snapshot_dir_recursive(
base: &Path,
dir: &Path,
entries: &mut BTreeMap<PathBuf, Entry>,
) -> anyhow::Result<()> {
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
let Some(stripped) = path.strip_prefix(base).ok() else {
continue;
};
let rel = stripped.to_path_buf();
let file_type = entry.file_type()?;
if file_type.is_dir() {
entries.insert(rel.clone(), Entry::Dir);
snapshot_dir_recursive(base, &path, entries)?;
} else if file_type.is_file() {
let contents = fs::read(&path)?;
entries.insert(rel, Entry::File(contents));
}
}
Ok(())
}
fn copy_dir_recursive(src: &Path, dst: &Path) -> anyhow::Result<()> {
for entry in fs::read_dir(src)? {
let entry = entry?;
let path = entry.path();
let file_type = entry.file_type()?;
let dest_path = dst.join(entry.file_name());
if file_type.is_dir() {
fs::create_dir_all(&dest_path)?;
copy_dir_recursive(&path, &dest_path)?;
} else if file_type.is_file() {
if let Some(parent) = dest_path.parent() {
fs::create_dir_all(parent)?;
}
fs::copy(&path, &dest_path)?;
}
}
Ok(())
}

View File

@@ -7,7 +7,6 @@ use crate::types::TurnAttemptsSiblingTurnsResponse;
use anyhow::Result;
use codex_core::auth::CodexAuth;
use codex_core::default_client::get_codex_user_agent;
use codex_protocol::account::PlanType as AccountPlanType;
use codex_protocol::protocol::CreditsSnapshot;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow;
@@ -292,7 +291,6 @@ impl Client {
primary,
secondary,
credits: Self::map_credits(payload.credits),
plan_type: Some(Self::map_plan_type(payload.plan_type)),
}
}
@@ -327,23 +325,6 @@ impl Client {
})
}
fn map_plan_type(plan_type: crate::types::PlanType) -> AccountPlanType {
match plan_type {
crate::types::PlanType::Free => AccountPlanType::Free,
crate::types::PlanType::Plus => AccountPlanType::Plus,
crate::types::PlanType::Pro => AccountPlanType::Pro,
crate::types::PlanType::Team => AccountPlanType::Team,
crate::types::PlanType::Business => AccountPlanType::Business,
crate::types::PlanType::Enterprise => AccountPlanType::Enterprise,
crate::types::PlanType::Edu | crate::types::PlanType::Education => AccountPlanType::Edu,
crate::types::PlanType::Guest
| crate::types::PlanType::Go
| crate::types::PlanType::FreeWorkspace
| crate::types::PlanType::Quorum
| crate::types::PlanType::K12 => AccountPlanType::Unknown,
}
}
fn window_minutes_from_seconds(seconds: i32) -> Option<i64> {
if seconds <= 0 {
return None;

View File

@@ -18,8 +18,6 @@ use codex_cli::login::run_logout;
use codex_cloud_tasks::Cli as CloudTasksCli;
use codex_common::CliConfigOverrides;
use codex_exec::Cli as ExecCli;
use codex_exec::Command as ExecCommand;
use codex_exec::ReviewArgs;
use codex_execpolicy::ExecPolicyCheckCommand;
use codex_responses_api_proxy::Args as ResponsesApiProxyArgs;
use codex_tui::AppExitInfo;
@@ -74,9 +72,6 @@ enum Subcommand {
#[clap(visible_alias = "e")]
Exec(ExecCli),
/// Run a code review non-interactively.
Review(ReviewArgs),
/// Manage login.
Login(LoginCommand),
@@ -454,15 +449,6 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
);
codex_exec::run_main(exec_cli, codex_linux_sandbox_exe).await?;
}
Some(Subcommand::Review(review_args)) => {
let mut exec_cli = ExecCli::try_parse_from(["codex", "exec"])?;
exec_cli.command = Some(ExecCommand::Review(review_args));
prepend_config_flags(
&mut exec_cli.config_overrides,
root_config_overrides.clone(),
);
codex_exec::run_main(exec_cli, codex_linux_sandbox_exe).await?;
}
Some(Subcommand::McpServer) => {
codex_mcp_server::run_main(codex_linux_sandbox_exe, root_config_overrides).await?;
}

View File

@@ -40,15 +40,17 @@ prefix_rule(
assert_eq!(
result,
json!({
"decision": "forbidden",
"matchedRules": [
{
"prefixRuleMatch": {
"matchedPrefix": ["git", "push"],
"decision": "forbidden"
"match": {
"decision": "forbidden",
"matchedRules": [
{
"prefixRuleMatch": {
"matchedPrefix": ["git", "push"],
"decision": "forbidden"
}
}
}
]
]
}
})
);

View File

@@ -28,10 +28,6 @@ pub struct ExecCommand {
#[arg(long = "env", value_name = "ENV_ID")]
pub environment: String,
/// Git branch to run in Codex Cloud.
#[arg(long = "branch", value_name = "BRANCH", default_value = "main")]
pub branch: String,
/// Number of assistant attempts (best-of-N).
#[arg(
long = "attempts",

View File

@@ -101,7 +101,6 @@ async fn run_exec_command(args: crate::cli::ExecCommand) -> anyhow::Result<()> {
let crate::cli::ExecCommand {
query,
environment,
branch,
attempts,
} = args;
let ctx = init_backend("codex_cloud_tasks_exec").await?;
@@ -111,7 +110,7 @@ async fn run_exec_command(args: crate::cli::ExecCommand) -> anyhow::Result<()> {
&*ctx.backend,
&env_id,
&prompt,
&branch,
"main",
false,
attempts,
)

View File

@@ -25,8 +25,6 @@ anyhow = { workspace = true }
assert_matches = { workspace = true }
pretty_assertions = { workspace = true }
tokio-test = { workspace = true }
wiremock = { workspace = true }
reqwest = { workspace = true }
[lints]
workspace = true

View File

@@ -1,8 +1,8 @@
use crate::error::ApiError;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::config_types::Verbosity as VerbosityConfig;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::TokenUsage;
use futures::Stream;

View File

@@ -1,5 +1,4 @@
pub mod chat;
pub mod compact;
pub mod models;
pub mod responses;
mod streaming;

View File

@@ -1,216 +0,0 @@
use crate::auth::AuthProvider;
use crate::auth::add_auth_headers;
use crate::error::ApiError;
use crate::provider::Provider;
use crate::telemetry::run_with_request_telemetry;
use codex_client::HttpTransport;
use codex_client::RequestTelemetry;
use codex_protocol::openai_models::ModelsResponse;
use http::HeaderMap;
use http::Method;
use std::sync::Arc;
pub struct ModelsClient<T: HttpTransport, A: AuthProvider> {
transport: T,
provider: Provider,
auth: A,
request_telemetry: Option<Arc<dyn RequestTelemetry>>,
}
impl<T: HttpTransport, A: AuthProvider> ModelsClient<T, A> {
pub fn new(transport: T, provider: Provider, auth: A) -> Self {
Self {
transport,
provider,
auth,
request_telemetry: None,
}
}
pub fn with_telemetry(mut self, request: Option<Arc<dyn RequestTelemetry>>) -> Self {
self.request_telemetry = request;
self
}
fn path(&self) -> &'static str {
"models"
}
pub async fn list_models(
&self,
client_version: &str,
extra_headers: HeaderMap,
) -> Result<ModelsResponse, ApiError> {
let builder = || {
let mut req = self.provider.build_request(Method::GET, self.path());
req.headers.extend(extra_headers.clone());
let separator = if req.url.contains('?') { '&' } else { '?' };
req.url = format!("{}{}client_version={client_version}", req.url, separator);
add_auth_headers(&self.auth, req)
};
let resp = run_with_request_telemetry(
self.provider.retry.to_policy(),
self.request_telemetry.clone(),
builder,
|req| self.transport.execute(req),
)
.await?;
serde_json::from_slice::<ModelsResponse>(&resp.body).map_err(|e| {
ApiError::Stream(format!(
"failed to decode models response: {e}; body: {}",
String::from_utf8_lossy(&resp.body)
))
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::provider::RetryConfig;
use crate::provider::WireApi;
use async_trait::async_trait;
use codex_client::Request;
use codex_client::Response;
use codex_client::StreamResponse;
use codex_client::TransportError;
use http::HeaderMap;
use http::StatusCode;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
#[derive(Clone, Default)]
struct CapturingTransport {
last_request: Arc<Mutex<Option<Request>>>,
body: Arc<ModelsResponse>,
}
#[async_trait]
impl HttpTransport for CapturingTransport {
async fn execute(&self, req: Request) -> Result<Response, TransportError> {
*self.last_request.lock().unwrap() = Some(req);
let body = serde_json::to_vec(&*self.body).unwrap();
Ok(Response {
status: StatusCode::OK,
headers: HeaderMap::new(),
body: body.into(),
})
}
async fn stream(&self, _req: Request) -> Result<StreamResponse, TransportError> {
Err(TransportError::Build("stream should not run".to_string()))
}
}
#[derive(Clone, Default)]
struct DummyAuth;
impl AuthProvider for DummyAuth {
fn bearer_token(&self) -> Option<String> {
None
}
}
fn provider(base_url: &str) -> Provider {
Provider {
name: "test".to_string(),
base_url: base_url.to_string(),
query_params: None,
wire: WireApi::Responses,
headers: HeaderMap::new(),
retry: RetryConfig {
max_attempts: 1,
base_delay: Duration::from_millis(1),
retry_429: false,
retry_5xx: true,
retry_transport: true,
},
stream_idle_timeout: Duration::from_secs(1),
}
}
#[tokio::test]
async fn appends_client_version_query() {
let response = ModelsResponse { models: Vec::new() };
let transport = CapturingTransport {
last_request: Arc::new(Mutex::new(None)),
body: Arc::new(response),
};
let client = ModelsClient::new(
transport.clone(),
provider("https://example.com/api/codex"),
DummyAuth,
);
let result = client
.list_models("0.99.0", HeaderMap::new())
.await
.expect("request should succeed");
assert_eq!(result.models.len(), 0);
let url = transport
.last_request
.lock()
.unwrap()
.as_ref()
.unwrap()
.url
.clone();
assert_eq!(
url,
"https://example.com/api/codex/models?client_version=0.99.0"
);
}
#[tokio::test]
async fn parses_models_response() {
let response = ModelsResponse {
models: vec![
serde_json::from_value(json!({
"slug": "gpt-test",
"display_name": "gpt-test",
"description": "desc",
"default_reasoning_level": "medium",
"supported_reasoning_levels": ["low", "medium", "high"],
"shell_type": "shell_command",
"visibility": "list",
"minimal_client_version": [0, 99, 0],
"supported_in_api": true,
"priority": 1
}))
.unwrap(),
],
};
let transport = CapturingTransport {
last_request: Arc::new(Mutex::new(None)),
body: Arc::new(response),
};
let client = ModelsClient::new(
transport,
provider("https://example.com/api/codex"),
DummyAuth,
);
let result = client
.list_models("0.99.0", HeaderMap::new())
.await
.expect("request should succeed");
assert_eq!(result.models.len(), 1);
assert_eq!(result.models[0].slug, "gpt-test");
assert_eq!(result.models[0].supported_in_api, true);
assert_eq!(result.models[0].priority, 1);
}
}

View File

@@ -22,7 +22,6 @@ pub use crate::common::create_text_param_for_request;
pub use crate::endpoint::chat::AggregateStreamExt;
pub use crate::endpoint::chat::ChatClient;
pub use crate::endpoint::compact::CompactClient;
pub use crate::endpoint::models::ModelsClient;
pub use crate::endpoint::responses::ResponsesClient;
pub use crate::endpoint::responses::ResponsesOptions;
pub use crate::error::ApiError;

View File

@@ -37,7 +37,6 @@ pub fn parse_rate_limit(headers: &HeaderMap) -> Option<RateLimitSnapshot> {
primary,
secondary,
credits,
plan_type: None,
})
}

View File

@@ -161,10 +161,8 @@ pub async fn process_chat_sse<S>(
}
if let Some(func) = tool_call.get("function") {
if let Some(fname) = func.get("name").and_then(|n| n.as_str())
&& !fname.is_empty()
{
call_state.name.get_or_insert_with(|| fname.to_string());
if let Some(fname) = func.get("name").and_then(|n| n.as_str()) {
call_state.name = Some(fname.to_string());
}
if let Some(arguments) = func.get("arguments").and_then(|a| a.as_str())
{
@@ -434,47 +432,6 @@ mod tests {
);
}
#[tokio::test]
async fn preserves_tool_call_name_when_empty_deltas_arrive() {
let delta_with_name = json!({
"choices": [{
"delta": {
"tool_calls": [{
"id": "call_a",
"function": { "name": "do_a" }
}]
}
}]
});
let delta_with_empty_name = json!({
"choices": [{
"delta": {
"tool_calls": [{
"id": "call_a",
"function": { "name": "", "arguments": "{}" }
}]
}
}]
});
let finish = json!({
"choices": [{
"finish_reason": "tool_calls"
}]
});
let body = build_body(&[delta_with_name, delta_with_empty_name, finish]);
let events = collect_events(&body).await;
assert_matches!(
&events[..],
[
ResponseEvent::OutputItemDone(ResponseItem::FunctionCall { name, arguments, .. }),
ResponseEvent::Completed { .. }
] if name == "do_a" && arguments == "{}"
);
}
#[tokio::test]
async fn emits_tool_calls_even_when_content_and_reasoning_present() {
let delta_content_and_tools = json!({

View File

@@ -1,100 +0,0 @@
use codex_api::AuthProvider;
use codex_api::ModelsClient;
use codex_api::provider::Provider;
use codex_api::provider::RetryConfig;
use codex_api::provider::WireApi;
use codex_client::ReqwestTransport;
use codex_protocol::openai_models::ClientVersion;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ModelVisibility;
use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::openai_models::ReasoningEffort;
use http::HeaderMap;
use http::Method;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
#[derive(Clone, Default)]
struct DummyAuth;
impl AuthProvider for DummyAuth {
fn bearer_token(&self) -> Option<String> {
None
}
}
fn provider(base_url: &str) -> Provider {
Provider {
name: "test".to_string(),
base_url: base_url.to_string(),
query_params: None,
wire: WireApi::Responses,
headers: HeaderMap::new(),
retry: RetryConfig {
max_attempts: 1,
base_delay: std::time::Duration::from_millis(1),
retry_429: false,
retry_5xx: true,
retry_transport: true,
},
stream_idle_timeout: std::time::Duration::from_secs(1),
}
}
#[tokio::test]
async fn models_client_hits_models_endpoint() {
let server = MockServer::start().await;
let base_url = format!("{}/api/codex", server.uri());
let response = ModelsResponse {
models: vec![ModelInfo {
slug: "gpt-test".to_string(),
display_name: "gpt-test".to_string(),
description: Some("desc".to_string()),
default_reasoning_level: ReasoningEffort::Medium,
supported_reasoning_levels: vec![
ReasoningEffort::Low,
ReasoningEffort::Medium,
ReasoningEffort::High,
],
shell_type: ConfigShellToolType::ShellCommand,
visibility: ModelVisibility::List,
minimal_client_version: ClientVersion(0, 1, 0),
supported_in_api: true,
priority: 1,
}],
};
Mock::given(method("GET"))
.and(path("/api/codex/models"))
.respond_with(
ResponseTemplate::new(200)
.insert_header("content-type", "application/json")
.set_body_json(&response),
)
.mount(&server)
.await;
let transport = ReqwestTransport::new(reqwest::Client::new());
let client = ModelsClient::new(transport, provider(&base_url), DummyAuth);
let result = client
.list_models("0.1.0", HeaderMap::new())
.await
.expect("models request should succeed");
assert_eq!(result.models.len(), 1);
assert_eq!(result.models[0].slug, "gpt-test");
let received = server
.received_requests()
.await
.expect("should capture requests");
assert_eq!(received.len(), 1);
assert_eq!(received[0].method, Method::GET.as_str());
assert_eq!(received[0].url.path(), "/api/codex/models");
}

View File

@@ -1,22 +1,21 @@
[package]
edition.workspace = true
license.workspace = true
name = "codex-client"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
async-trait = { workspace = true }
bytes = { workspace = true }
eventsource-stream = { workspace = true }
futures = { workspace = true }
http = { workspace = true }
rand = { workspace = true }
reqwest = { workspace = true, features = ["json", "stream"] }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "time", "sync"] }
tracing = { workspace = true }
rand = { workspace = true }
eventsource-stream = { workspace = true }
[lints]
workspace = true

View File

@@ -8,9 +8,6 @@ use futures::stream::BoxStream;
use http::HeaderMap;
use http::Method;
use http::StatusCode;
use tracing::Level;
use tracing::enabled;
use tracing::trace;
pub type ByteStream = BoxStream<'static, Result<Bytes, TransportError>>;
@@ -86,15 +83,6 @@ impl HttpTransport for ReqwestTransport {
}
async fn stream(&self, req: Request) -> Result<StreamResponse, TransportError> {
if enabled!(Level::TRACE) {
trace!(
"{} to {}: {}",
req.method,
req.url,
req.body.as_ref().unwrap_or_default()
);
}
let builder = self.build(req)?;
let resp = builder.send().await.map_err(Self::map_error)?;
let status = resp.status();

View File

@@ -9,10 +9,12 @@ workspace = true
[dependencies]
clap = { workspace = true, features = ["derive", "wrap_help"], optional = true }
codex-app-server-protocol = { workspace = true }
codex-core = { workspace = true }
codex-lmstudio = { workspace = true }
codex-ollama = { workspace = true }
codex-protocol = { workspace = true }
once_cell = { workspace = true }
serde = { workspace = true, optional = true }
toml = { workspace = true, optional = true }

View File

@@ -12,14 +12,15 @@ pub fn create_config_summary_entries(config: &Config) -> Vec<(&'static str, Stri
("approval", config.approval_policy.to_string()),
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
];
if config.model_provider.wire_api == WireApi::Responses {
if config.model_provider.wire_api == WireApi::Responses
&& config.model_family.supports_reasoning_summaries
{
let reasoning_effort = config
.model_reasoning_effort
.map(|effort| effort.to_string());
entries.push((
"reasoning effort",
reasoning_effort.unwrap_or_else(|| "none".to_string()),
));
.or(config.model_family.default_reasoning_effort)
.map(|effort| effort.to_string())
.unwrap_or_else(|| "none".to_string());
entries.push(("reasoning effort", reasoning_effort));
entries.push((
"reasoning summaries",
config.model_reasoning_summary.to_string(),

View File

@@ -32,6 +32,8 @@ mod config_summary;
pub use config_summary::create_config_summary_entries;
// Shared fuzzy matcher (used by TUI selection popups and other UI filtering)
pub mod fuzzy_match;
// Shared model presets used by TUI and MCP server
pub mod model_presets;
// Shared approval presets (AskForApproval + Sandbox) used by TUI and MCP server
// Not to be confused with AskForApproval, which we should probably rename to EscalationPolicy.
pub mod approval_presets;

View File

@@ -1,38 +1,76 @@
use std::collections::HashMap;
use codex_app_server_protocol::AuthMode;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelUpgrade;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_core::protocol_config_types::ReasoningEffort;
use once_cell::sync::Lazy;
pub const HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG: &str = "hide_gpt5_1_migration_prompt";
pub const HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG: &str =
"hide_gpt-5.1-codex-max_migration_prompt";
/// A reasoning effort option that can be surfaced for a model.
#[derive(Debug, Clone, Copy)]
pub struct ReasoningEffortPreset {
/// Effort level that the model supports.
pub effort: ReasoningEffort,
/// Short human description shown next to the effort in UIs.
pub description: &'static str,
}
#[derive(Debug, Clone)]
pub struct ModelUpgrade {
pub id: &'static str,
pub reasoning_effort_mapping: Option<HashMap<ReasoningEffort, ReasoningEffort>>,
pub migration_config_key: &'static str,
}
/// Metadata describing a Codex-supported model.
#[derive(Debug, Clone)]
pub struct ModelPreset {
/// Stable identifier for the preset.
pub id: &'static str,
/// Model slug (e.g., "gpt-5").
pub model: &'static str,
/// Display name shown in UIs.
pub display_name: &'static str,
/// Short human description shown in UIs.
pub description: &'static str,
/// Reasoning effort applied when none is explicitly chosen.
pub default_reasoning_effort: ReasoningEffort,
/// Supported reasoning effort options.
pub supported_reasoning_efforts: &'static [ReasoningEffortPreset],
/// Whether this is the default model for new users.
pub is_default: bool,
/// recommended upgrade model
pub upgrade: Option<ModelUpgrade>,
/// Whether this preset should appear in the picker UI.
pub show_in_picker: bool,
}
static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
vec![
ModelPreset {
id: "gpt-5.1-codex-max".to_string(),
model: "gpt-5.1-codex-max".to_string(),
display_name: "gpt-5.1-codex-max".to_string(),
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
id: "gpt-5.1-codex-max",
model: "gpt-5.1-codex-max",
display_name: "gpt-5.1-codex-max",
description: "Latest Codex-optimized flagship for deep and fast reasoning.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fast responses with lighter reasoning".to_string(),
description: "Fast responses with lighter reasoning",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Balances speed and reasoning depth for everyday tasks".to_string(),
description: "Balances speed and reasoning depth for everyday tasks",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex problems".to_string(),
description: "Maximizes reasoning depth for complex problems",
},
ReasoningEffortPreset {
effort: ReasoningEffort::XHigh,
description: "Extra high reasoning depth for complex problems".to_string(),
description: "Extra high reasoning depth for complex problems",
},
],
is_default: true,
@@ -40,184 +78,184 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
show_in_picker: true,
},
ModelPreset {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
display_name: "gpt-5.1-codex".to_string(),
description: "Optimized for codex.".to_string(),
id: "gpt-5.1-codex",
model: "gpt-5.1-codex",
display_name: "gpt-5.1-codex",
description: "Optimized for codex.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning".to_string(),
description: "Fastest responses with limited reasoning",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
description: "Dynamically adjusts reasoning based on the task",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "gpt-5.1-codex-max",
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: true,
},
ModelPreset {
id: "gpt-5.1-codex-mini".to_string(),
model: "gpt-5.1-codex-mini".to_string(),
display_name: "gpt-5.1-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
id: "gpt-5.1-codex-mini",
model: "gpt-5.1-codex-mini",
display_name: "gpt-5.1-codex-mini",
description: "Optimized for codex. Cheaper, faster, but less capable.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
description: "Dynamically adjusts reasoning based on the task",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "gpt-5.1-codex-max",
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: true,
},
ModelPreset {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
id: "gpt-5.1",
model: "gpt-5.1",
display_name: "gpt-5.1",
description: "Broad world knowledge with strong general reasoning.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations".to_string(),
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks".to_string(),
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "gpt-5.1-codex-max",
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: true,
},
// Deprecated models.
ModelPreset {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for codex.".to_string(),
id: "gpt-5-codex",
model: "gpt-5-codex",
display_name: "gpt-5-codex",
description: "Optimized for codex.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning".to_string(),
description: "Fastest responses with limited reasoning",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
description: "Dynamically adjusts reasoning based on the task",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "gpt-5.1-codex-max",
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: false,
},
ModelPreset {
id: "gpt-5-codex-mini".to_string(),
model: "gpt-5-codex-mini".to_string(),
display_name: "gpt-5-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
id: "gpt-5-codex-mini",
model: "gpt-5-codex-mini",
display_name: "gpt-5-codex-mini",
description: "Optimized for codex. Cheaper, faster, but less capable.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
description: "Dynamically adjusts reasoning based on the task",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-mini".to_string(),
id: "gpt-5.1-codex-mini",
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: false,
},
ModelPreset {
id: "gpt-5".to_string(),
model: "gpt-5".to_string(),
display_name: "gpt-5".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
id: "gpt-5",
model: "gpt-5",
display_name: "gpt-5",
description: "Broad world knowledge with strong general reasoning.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Minimal,
description: "Fastest responses with little reasoning".to_string(),
description: "Fastest responses with little reasoning",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations".to_string(),
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks".to_string(),
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "gpt-5.1-codex-max",
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: false,
},
]
});
pub(crate) fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS
.iter()
.filter(|preset| preset.show_in_picker)
.filter(|preset| match auth_mode {
Some(AuthMode::ApiKey) => preset.show_in_picker && preset.id != "gpt-5.1-codex-max",
_ => preset.show_in_picker,
})
.cloned()
.collect()
}
// todo(aibrahim): remove this once we migrate tests
pub fn all_model_presets() -> &'static Vec<ModelPreset> {
&PRESETS
}
@@ -225,10 +263,21 @@ pub fn all_model_presets() -> &'static Vec<ModelPreset> {
#[cfg(test)]
mod tests {
use super::*;
use codex_app_server_protocol::AuthMode;
#[test]
fn only_one_default_model_is_configured() {
let default_models = PRESETS.iter().filter(|preset| preset.is_default).count();
assert!(default_models == 1);
}
#[test]
fn gpt_5_1_codex_max_hidden_for_api_key_auth() {
let presets = builtin_model_presets(Some(AuthMode::ApiKey));
assert!(
presets
.iter()
.all(|preset| preset.id != "gpt-5.1-codex-max")
);
}
}

View File

@@ -52,7 +52,6 @@ regex-lite = { workspace = true }
reqwest = { workspace = true, features = ["json", "stream"] }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
sha1 = { workspace = true }
sha2 = { workspace = true }
shlex = { workspace = true }

View File

@@ -33,20 +33,12 @@ pub(crate) fn map_api_error(err: ApiError) -> CodexErr {
headers,
body,
} => {
let body_text = body.unwrap_or_default();
if status == http::StatusCode::BAD_REQUEST {
if body_text
.contains("The image data you provided does not represent a valid image")
{
CodexErr::InvalidImageRequest()
} else {
CodexErr::InvalidRequest(body_text)
}
} else if status == http::StatusCode::INTERNAL_SERVER_ERROR {
if status == http::StatusCode::INTERNAL_SERVER_ERROR {
CodexErr::InternalServerError
} else if status == http::StatusCode::TOO_MANY_REQUESTS {
if let Ok(err) = serde_json::from_str::<UsageErrorResponse>(&body_text) {
if let Some(body) = body
&& let Ok(err) = serde_json::from_str::<UsageErrorResponse>(&body)
{
if err.error.error_type.as_deref() == Some("usage_limit_reached") {
let rate_limits = headers.as_ref().and_then(parse_rate_limit);
let resets_at = err
@@ -70,7 +62,7 @@ pub(crate) fn map_api_error(err: ApiError) -> CodexErr {
} else {
CodexErr::UnexpectedStatus(UnexpectedResponseError {
status,
body: body_text,
body: body.unwrap_or_default(),
request_id: extract_request_id(headers.as_ref()),
})
}

View File

@@ -70,9 +70,7 @@ pub(crate) async fn apply_patch(
)
.await;
match rx_approve.await.unwrap_or_default() {
ReviewDecision::Approved
| ReviewDecision::ApprovedExecpolicyAmendment { .. }
| ReviewDecision::ApprovedForSession => {
ReviewDecision::Approved | ReviewDecision::ApprovedForSession => {
InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec {
action,
user_explicitly_approved_this_action: true,

View File

@@ -227,6 +227,23 @@ impl CodexAuth {
})
}
/// Raw plan string from the ID token (including unknown/new plan types).
pub fn raw_plan_type(&self) -> Option<String> {
self.get_plan_type().map(|plan| match plan {
InternalPlanType::Known(k) => format!("{k:?}"),
InternalPlanType::Unknown(raw) => raw,
})
}
/// Raw internal plan value from the ID token.
/// Exposes the underlying `token_data::PlanType` without mapping it to the
/// public `AccountPlanType`. Use this when downstream code needs to inspect
/// internal/unknown plan strings exactly as issued in the token.
pub(crate) fn get_plan_type(&self) -> Option<InternalPlanType> {
self.get_current_token_data()
.and_then(|t| t.id_token.chatgpt_plan_type)
}
fn get_current_auth_json(&self) -> Option<AuthDotJson> {
#[expect(clippy::unwrap_used)]
self.auth_dot_json.lock().unwrap().clone()
@@ -1024,6 +1041,10 @@ mod tests {
.expect("auth available");
pretty_assertions::assert_eq!(auth.account_plan_type(), Some(AccountPlanType::Pro));
pretty_assertions::assert_eq!(
auth.get_plan_type(),
Some(InternalPlanType::Known(InternalKnownPlan::Pro))
);
}
#[test]
@@ -1044,6 +1065,10 @@ mod tests {
.expect("auth available");
pretty_assertions::assert_eq!(auth.account_plan_type(), Some(AccountPlanType::Unknown));
pretty_assertions::assert_eq!(
auth.get_plan_type(),
Some(InternalPlanType::Unknown("mystery-tier".to_string()))
);
}
}
@@ -1176,8 +1201,4 @@ impl AuthManager {
self.reload();
Ok(removed)
}
pub fn get_auth_mode(&self) -> Option<AuthMode> {
self.auth().map(|a| a.mode)
}
}

View File

@@ -20,9 +20,9 @@ use codex_api::error::ApiError;
use codex_app_server_protocol::AuthMode;
use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::ConversationId;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::protocol::SessionSource;
use eventsource_stream::Event;
use eventsource_stream::EventStreamError;
@@ -46,10 +46,10 @@ use crate::default_client::build_reqwest_client;
use crate::error::CodexErr;
use crate::error::Result;
use crate::flags::CODEX_RS_SSE_FIXTURE;
use crate::model_family::ModelFamily;
use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
use crate::openai_model_info::get_model_info;
use crate::openai_models::model_family::ModelFamily;
use crate::tools::spec::create_tools_json_for_chat_completions_api;
use crate::tools::spec::create_tools_json_for_responses_api;
@@ -57,7 +57,6 @@ use crate::tools::spec::create_tools_json_for_responses_api;
pub struct ModelClient {
config: Arc<Config>,
auth_manager: Option<Arc<AuthManager>>,
model_family: ModelFamily,
otel_event_manager: OtelEventManager,
provider: ModelProviderInfo,
conversation_id: ConversationId,
@@ -71,7 +70,6 @@ impl ModelClient {
pub fn new(
config: Arc<Config>,
auth_manager: Option<Arc<AuthManager>>,
model_family: ModelFamily,
otel_event_manager: OtelEventManager,
provider: ModelProviderInfo,
effort: Option<ReasoningEffortConfig>,
@@ -82,7 +80,6 @@ impl ModelClient {
Self {
config,
auth_manager,
model_family,
otel_event_manager,
provider,
conversation_id,
@@ -93,18 +90,16 @@ impl ModelClient {
}
pub fn get_model_context_window(&self) -> Option<i64> {
let model_family = self.get_model_family();
let effective_context_window_percent = model_family.effective_context_window_percent;
let pct = self.config.model_family.effective_context_window_percent;
self.config
.model_context_window
.or_else(|| get_model_info(&model_family).map(|info| info.context_window))
.map(|w| w.saturating_mul(effective_context_window_percent) / 100)
.or_else(|| get_model_info(&self.config.model_family).map(|info| info.context_window))
.map(|w| w.saturating_mul(pct) / 100)
}
pub fn get_auto_compact_token_limit(&self) -> Option<i64> {
let model_family = self.get_model_family();
self.config.model_auto_compact_token_limit.or_else(|| {
get_model_info(&model_family).and_then(|info| info.auto_compact_token_limit)
get_model_info(&self.config.model_family).and_then(|info| info.auto_compact_token_limit)
})
}
@@ -154,8 +149,9 @@ impl ModelClient {
}
let auth_manager = self.auth_manager.clone();
let model_family = self.get_model_family();
let instructions = prompt.get_full_instructions(&model_family).into_owned();
let instructions = prompt
.get_full_instructions(&self.config.model_family)
.into_owned();
let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?;
let api_prompt = build_api_prompt(prompt, instructions, tools_json);
let conversation_id = self.conversation_id.to_string();
@@ -208,13 +204,16 @@ impl ModelClient {
}
let auth_manager = self.auth_manager.clone();
let model_family = self.get_model_family();
let instructions = prompt.get_full_instructions(&model_family).into_owned();
let instructions = prompt
.get_full_instructions(&self.config.model_family)
.into_owned();
let tools_json: Vec<Value> = create_tools_json_for_responses_api(&prompt.tools)?;
let reasoning = if model_family.supports_reasoning_summaries {
let reasoning = if self.config.model_family.supports_reasoning_summaries {
Some(Reasoning {
effort: self.effort.or(model_family.default_reasoning_effort),
effort: self
.effort
.or(self.config.model_family.default_reasoning_effort),
summary: Some(self.summary),
})
} else {
@@ -227,15 +226,15 @@ impl ModelClient {
vec![]
};
let verbosity = if model_family.support_verbosity {
let verbosity = if self.config.model_family.support_verbosity {
self.config
.model_verbosity
.or(model_family.default_verbosity)
.or(self.config.model_family.default_verbosity)
} else {
if self.config.model_verbosity.is_some() {
warn!(
"model_verbosity is set but ignored as the model does not support verbosity: {}",
model_family.family
self.config.model_family.family
);
}
None
@@ -306,7 +305,7 @@ impl ModelClient {
/// Returns the currently configured model family.
pub fn get_model_family(&self) -> ModelFamily {
self.model_family.clone()
self.config.model_family.clone()
}
/// Returns the current reasoning effort setting.
@@ -343,7 +342,7 @@ impl ModelClient {
.with_telemetry(Some(request_telemetry));
let instructions = prompt
.get_full_instructions(&self.get_model_family())
.get_full_instructions(&self.config.model_family)
.into_owned();
let payload = ApiCompactionInput {
model: &self.config.model,

View File

@@ -1,6 +1,6 @@
use crate::client_common::tools::ToolSpec;
use crate::error::Result;
use crate::openai_models::model_family::ModelFamily;
use crate::model_family::ModelFamily;
pub use codex_api::common::ResponseEvent;
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
use codex_protocol::models::ResponseItem;
@@ -252,7 +252,7 @@ impl Stream for ResponseStream {
#[cfg(test)]
mod tests {
use crate::openai_models::model_family::find_family_for_model;
use crate::model_family::find_family_for_model;
use codex_api::ResponsesApiRequest;
use codex_api::common::OpenAiVerbosity;
use codex_api::common::TextControls;
@@ -309,7 +309,7 @@ mod tests {
},
];
for test_case in test_cases {
let model_family = find_family_for_model(test_case.slug);
let model_family = find_family_for_model(test_case.slug).expect("known model slug");
let expected = if test_case.expects_apply_patch_instructions {
format!(
"{}\n{}",

Some files were not shown because too many files have changed in this diff Show More