Compare commits

..

1 Commits

Author SHA1 Message Date
Michael Bolin
e6d1279033 Release 0.35.0-alpha.5 2025-09-11 21:05:08 -07:00
82 changed files with 1160 additions and 5126 deletions

69
codex-rs/Cargo.lock generated
View File

@@ -212,50 +212,6 @@ dependencies = [
"term",
]
[[package]]
name = "askama"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b79091df18a97caea757e28cd2d5fda49c6cd4bd01ddffd7ff01ace0c0ad2c28"
dependencies = [
"askama_derive",
"askama_escape",
"humansize",
"num-traits",
"percent-encoding",
]
[[package]]
name = "askama_derive"
version = "0.12.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19fe8d6cb13c4714962c072ea496f3392015f0989b1a2847bb4b2d9effd71d83"
dependencies = [
"askama_parser",
"basic-toml",
"mime",
"mime_guess",
"proc-macro2",
"quote",
"serde",
"syn 2.0.104",
]
[[package]]
name = "askama_escape"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "619743e34b5ba4e9703bba34deac3427c72507c7159f5fd030aea8cac0cfe341"
[[package]]
name = "askama_parser"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acb1161c6b64d1c3d83108213c2a2533a342ac225aabd0bda218278c2ddb00c0"
dependencies = [
"nom",
]
[[package]]
name = "assert-json-diff"
version = "2.0.2"
@@ -349,15 +305,6 @@ version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
[[package]]
name = "basic-toml"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba62675e8242a4c4e806d12f11d136e626e6c8361d6b829310732241652a178a"
dependencies = [
"serde",
]
[[package]]
name = "beef"
version = "0.5.2"
@@ -659,7 +606,6 @@ name = "codex-core"
version = "0.0.0"
dependencies = [
"anyhow",
"askama",
"assert_cmd",
"async-channel",
"base64",
@@ -2035,15 +1981,6 @@ version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]]
name = "humansize"
version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6cb51c9a029ddc91b07a787f1d86b53ccfa49b0e86688c946ebe8d3555685dd7"
dependencies = [
"libm",
]
[[package]]
name = "hyper"
version = "1.7.0"
@@ -2599,12 +2536,6 @@ version = "0.2.175"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
[[package]]
name = "libm"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de"
[[package]]
name = "libredox"
version = "0.1.6"

View File

@@ -22,7 +22,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.35.0-alpha.8"
version = "0.35.0-alpha.5"
# Track the edition for all workspace crates in one place. Individual
# crates can still override this value, but keeping it here means new
# crates created with `cargo new -w ...` automatically inherit the 2024

View File

@@ -40,11 +40,6 @@ pub enum ApplyPatchError {
/// Error that occurs while computing replacements when applying patch chunks
#[error("{0}")]
ComputeReplacements(String),
/// A raw patch body was provided without an explicit `apply_patch` invocation.
#[error(
"patch detected without explicit call to apply_patch. Rerun as [\"apply_patch\", \"<patch>\"]"
)]
ImplicitInvocation,
}
impl From<std::io::Error> for ApplyPatchError {
@@ -98,12 +93,10 @@ pub struct ApplyPatchArgs {
pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
match argv {
// Direct invocation: apply_patch <patch>
[cmd, body] if APPLY_PATCH_COMMANDS.contains(&cmd.as_str()) => match parse_patch(body) {
Ok(source) => MaybeApplyPatch::Body(source),
Err(e) => MaybeApplyPatch::PatchParseError(e),
},
// Bash heredoc form: (optional `cd <path> &&`) apply_patch <<'EOF' ...
[bash, flag, script] if bash == "bash" && flag == "-lc" => {
match extract_apply_patch_from_bash(script) {
Ok((body, workdir)) => match parse_patch(&body) {
@@ -214,26 +207,6 @@ impl ApplyPatchAction {
/// cwd must be an absolute path so that we can resolve relative paths in the
/// patch.
pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApplyPatchVerified {
// Detect a raw patch body passed directly as the command or as the body of a bash -lc
// script. In these cases, report an explicit error rather than applying the patch.
match argv {
[body] => {
if parse_patch(body).is_ok() {
return MaybeApplyPatchVerified::CorrectnessError(
ApplyPatchError::ImplicitInvocation,
);
}
}
[bash, flag, script] if bash == "bash" && flag == "-lc" => {
if parse_patch(script).is_ok() {
return MaybeApplyPatchVerified::CorrectnessError(
ApplyPatchError::ImplicitInvocation,
);
}
}
_ => {}
}
match maybe_parse_apply_patch(argv) {
MaybeApplyPatch::Body(ApplyPatchArgs {
patch,
@@ -902,28 +875,6 @@ mod tests {
));
}
#[test]
fn test_implicit_patch_single_arg_is_error() {
let patch = "*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch".to_string();
let args = vec![patch];
let dir = tempdir().unwrap();
assert!(matches!(
maybe_parse_apply_patch_verified(&args, dir.path()),
MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation)
));
}
#[test]
fn test_implicit_patch_bash_script_is_error() {
let script = "*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch";
let args = args_bash(script);
let dir = tempdir().unwrap();
assert!(matches!(
maybe_parse_apply_patch_verified(&args, dir.path()),
MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation)
));
}
#[test]
fn test_literal() {
let args = strs_to_strings(&[

View File

@@ -17,10 +17,7 @@ pub fn create_config_summary_entries(config: &Config) -> Vec<(&'static str, Stri
{
entries.push((
"reasoning effort",
config
.model_reasoning_effort
.map(|effort| effort.to_string())
.unwrap_or_else(|| "none".to_string()),
config.model_reasoning_effort.to_string(),
));
entries.push((
"reasoning summaries",

View File

@@ -2,7 +2,7 @@ use std::time::Duration;
use std::time::Instant;
/// Returns a string representing the elapsed time since `start_time` like
/// "1m 15s" or "1.50s".
/// "1m15s" or "1.50s".
pub fn format_elapsed(start_time: Instant) -> String {
format_duration(start_time.elapsed())
}
@@ -12,7 +12,7 @@ pub fn format_elapsed(start_time: Instant) -> String {
/// Formatting rules:
/// * < 1 s -> "{milli}ms"
/// * < 60 s -> "{sec:.2}s" (two decimal places)
/// * >= 60 s -> "{min}m {sec:02}s"
/// * >= 60 s -> "{min}m{sec:02}s"
pub fn format_duration(duration: Duration) -> String {
let millis = duration.as_millis() as i64;
format_elapsed_millis(millis)
@@ -26,7 +26,7 @@ fn format_elapsed_millis(millis: i64) -> String {
} else {
let minutes = millis / 60_000;
let seconds = (millis % 60_000) / 1000;
format!("{minutes}m {seconds:02}s")
format!("{minutes}m{seconds:02}s")
}
}
@@ -61,18 +61,12 @@ mod tests {
fn test_format_duration_minutes() {
// Durations ≥ 1 minute should be printed mmss.
let dur = Duration::from_millis(75_000); // 1m15s
assert_eq!(format_duration(dur), "1m 15s");
assert_eq!(format_duration(dur), "1m15s");
let dur_exact = Duration::from_millis(60_000); // 1m0s
assert_eq!(format_duration(dur_exact), "1m 00s");
assert_eq!(format_duration(dur_exact), "1m00s");
let dur_long = Duration::from_millis(3_601_000);
assert_eq!(format_duration(dur_long), "60m 01s");
}
#[test]
fn test_format_duration_one_hour_has_space() {
let dur_hour = Duration::from_millis(3_600_000);
assert_eq!(format_duration(dur_hour), "60m 00s");
assert_eq!(format_duration(dur_long), "60m01s");
}
}

View File

@@ -12,63 +12,49 @@ pub struct ModelPreset {
/// Model slug (e.g., "gpt-5").
pub model: &'static str,
/// Reasoning effort to apply for this preset.
pub effort: Option<ReasoningEffort>,
pub effort: ReasoningEffort,
}
/// Built-in list of model presets that pair a model with a reasoning effort.
///
/// Keep this UI-agnostic so it can be reused by both TUI and MCP server.
pub fn builtin_model_presets() -> &'static [ModelPreset] {
// Order groups swiftfox variants before gpt-5 presets, each from minimal to high.
// Order reflects effort from minimal to high.
const PRESETS: &[ModelPreset] = &[
ModelPreset {
id: "swiftfox-low",
label: "swiftfox low",
description: "",
model: "swiftfox-low",
effort: None,
},
ModelPreset {
id: "swiftfox-medium",
label: "swiftfox medium",
description: "",
model: "swiftfox-medium",
effort: None,
},
ModelPreset {
id: "swiftfox-high",
label: "swiftfox high",
description: "",
model: "swiftfox-high",
effort: None,
},
ModelPreset {
id: "gpt-5-minimal",
label: "gpt-5 minimal",
description: "— fastest responses with limited reasoning; ideal for coding, instructions, or lightweight tasks",
model: "gpt-5",
effort: Some(ReasoningEffort::Minimal),
effort: ReasoningEffort::Minimal,
},
ModelPreset {
id: "gpt-5-low",
label: "gpt-5 low",
description: "— balances speed with some reasoning; useful for straightforward queries and short explanations",
model: "gpt-5",
effort: Some(ReasoningEffort::Low),
effort: ReasoningEffort::Low,
},
ModelPreset {
id: "gpt-5-medium",
label: "gpt-5 medium",
description: "— default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks",
model: "gpt-5",
effort: Some(ReasoningEffort::Medium),
effort: ReasoningEffort::Medium,
},
ModelPreset {
id: "gpt-5-high",
label: "gpt-5 high",
description: "— maximizes reasoning depth for complex or ambiguous problems",
model: "gpt-5",
effort: Some(ReasoningEffort::High),
effort: ReasoningEffort::High,
},
ModelPreset {
id: "gpt-5-high-new",
label: "gpt-5 high new",
description: "— our latest release tuned to rely on the model's built-in reasoning defaults",
model: "gpt-5-high-new",
effort: ReasoningEffort::Medium,
},
];
PRESETS

View File

@@ -13,7 +13,6 @@ workspace = true
[dependencies]
anyhow = "1"
askama = "0.12"
async-channel = "2.3.1"
base64 = "0.22"
bytes = "1.10.1"

View File

@@ -1,87 +0,0 @@
# Review guidelines:
You are acting as a reviewer for a proposed code change made by another engineer.
Below are some default guidelines for determining whether the original author would appreciate the issue being flagged.
These are not the final word in determining whether an issue is a bug. In many cases, you will encounter other, more specific guidelines. These may be present elsewhere in a developer message, a user message, a file, or even elsewhere in this system message.
Those guidelines should be considered to override these general instructions.
Here are the general guidelines for determining whether something is a bug and should be flagged.
1. It meaningfully impacts the accuracy, performance, security, or maintainability of the code.
2. The bug is discrete and actionable (i.e. not a general issue with the codebase or a combination of multiple issues).
3. Fixing the bug does not demand a level of rigor that is not present in the rest of the codebase (e.g. one doesn't need very detailed comments and input validation in a repository of one-off scripts in personal projects)
4. The bug was introduced in the commit (pre-existing bugs should not be flagged).
5. The author of the original PR would likely fix the issue if they were made aware of it.
6. The bug does not rely on unstated assumptions about the codebase or author's intent.
7. It is not enough to speculate that a change may disrupt another part of the codebase, to be considered a bug, one must identify the other parts of the code that are provably affected.
8. The bug is clearly not just an intentional change by the original author.
When flagging a bug, you will also provide an accompanying comment. Once again, these guidelines are not the final word on how to construct a comment -- defer to any subsequent guidelines that you encounter.
1. The comment should be clear about why the issue is a bug.
2. The comment should appropriately communicate the severity of the issue. It should not claim that an issue is more severe than it actually is.
3. The comment should be brief. The body should be at most 1 paragraph. It should not introduce line breaks within the natural language flow unless it is necessary for the code fragment.
4. The comment should not include any chunks of code longer than 3 lines. Any code chunks should be wrapped in markdown inline code tags or a code block.
5. The comment should clearly and explicitly communicate the scenarios, environments, or inputs that are necessary for the bug to arise. The comment should immediately indicate that the issue's severity depends on these factors.
6. The comment's tone should be matter-of-fact and not accusatory or overly positive. It should read as a helpful AI assistant suggestion without sounding too much like a human reviewer.
7. The comment should be written such that the original author can immediately grasp the idea without close reading.
8. The comment should avoid excessive flattery and comments that are not helpful to the original author. The comment should avoid phrasing like "Great job ...", "Thanks for ...".
Below are some more detailed guidelines that you should apply to this specific review.
HOW MANY FINDINGS TO RETURN:
Output all findings that the original author would fix if they knew about it. If there is no finding that a person would definitely love to see and fix, prefer outputting no findings. Do not stop at the first qualifying finding. Continue until you've listed every qualifying finding.
GUIDELINES:
- Ignore trivial style unless it obscures meaning or violates documented standards.
- Use one comment per distinct issue (or a multi-line range if necessary).
- Use ```suggestion blocks ONLY for concrete replacement code (minimal lines; no commentary inside the block).
- In every ```suggestion block, preserve the exact leading whitespace of the replaced lines (spaces vs tabs, number of spaces).
- Do NOT introduce or remove outer indentation levels unless that is the actual fix.
The comments will be presented in the code review as inline comments. You should avoid providing unnecessary location details in the comment body. Always keep the line range as short as possible for interpreting the issue. Avoid ranges longer than 510 lines; instead, choose the most suitable subrange that pinpoints the problem.
At the beginning of the finding title, tag the bug with priority level. For example "[P1] Un-padding slices along wrong tensor dimensions". [P0] Drop everything to fix. Blocking release, operations, or major usage. Only use for universal issues that do not depend on any assumptions about the inputs. · [P1] Urgent. Should be addressed in the next cycle · [P2] Normal. To be fixed eventually · [P3] Low. Nice to have.
Additionally, include a numeric priority field in the JSON output for each finding: set "priority" to 0 for P0, 1 for P1, 2 for P2, or 3 for P3. If a priority cannot be determined, omit the field or use null.
At the end of your findings, output an "overall correctness" verdict of whether or not the patch should be considered "correct".
Correct implies that existing code and tests will not break, and the patch is free of bugs and other blocking issues.
Ignore non-blocking issues such as style, formatting, typos, documentation, and other nits.
FORMATTING GUIDELINES:
The finding description should be one paragraph.
OUTPUT FORMAT:
## Output schema — MUST MATCH *exactly*
```json
{
"findings": [
{
"title": "<≤ 80 chars, imperative>",
"body": "<valid Markdown explaining *why* this is a problem; cite files/lines/functions>",
"confidence_score": <float 0.0-1.0>,
"priority": <int 0-3, optional>,
"code_location": {
"absolute_file_path": "<file path>",
"line_range": {"start": <int>, "end": <int>}
}
}
],
"overall_correctness": "patch is correct" | "patch is incorrect",
"overall_explanation": "<1-3 sentence explanation justifying the overall_correctness verdict>",
"overall_confidence_score": <float 0.0-1.0>
}
```
* **Do not** wrap the JSON in markdown fences or extra prose.
* The code_location field is required and must include absolute_file_path and line_range.
*Line ranges must be as short as possible for interpreting the issue (avoid ranges over 510 lines; pick the most suitable subrange).
* The code_location should overlap with the diff.
* Do not generate a PR fix.

View File

@@ -72,7 +72,7 @@ pub struct ModelClient {
client: reqwest::Client,
provider: ModelProviderInfo,
conversation_id: ConversationId,
effort: Option<ReasoningEffortConfig>,
effort: ReasoningEffortConfig,
summary: ReasoningSummaryConfig,
}
@@ -81,7 +81,7 @@ impl ModelClient {
config: Arc<Config>,
auth_manager: Option<Arc<AuthManager>>,
provider: ModelProviderInfo,
effort: Option<ReasoningEffortConfig>,
effort: ReasoningEffortConfig,
summary: ReasoningSummaryConfig,
conversation_id: ConversationId,
) -> Self {
@@ -104,12 +104,6 @@ impl ModelClient {
.or_else(|| get_model_info(&self.config.model_family).map(|info| info.context_window))
}
pub fn get_auto_compact_token_limit(&self) -> Option<i64> {
self.config.model_auto_compact_token_limit.or_else(|| {
get_model_info(&self.config.model_family).and_then(|info| info.auto_compact_token_limit)
})
}
/// Dispatches to either the Responses or Chat implementation depending on
/// the provider config. Public callers always invoke `stream()` the
/// specialised helpers are private to avoid accidental misuse.
@@ -193,15 +187,6 @@ impl ModelClient {
None
};
// In general, we want to explicitly send `store: false` when using the Responses API,
// but in practice, the Azure Responses API rejects `store: false`:
//
// - If store = false and id is sent an error is thrown that ID is not found
// - If store = false and id is not sent an error is thrown that ID is required
//
// For Azure, we send `store: true` and preserve reasoning item IDs.
let azure_workaround = self.provider.is_azure_responses_endpoint();
let payload = ResponsesApiRequest {
model: &self.config.model,
instructions: &full_instructions,
@@ -210,19 +195,13 @@ impl ModelClient {
tool_choice: "auto",
parallel_tool_calls: false,
reasoning,
store: azure_workaround,
store: false,
stream: true,
include,
prompt_cache_key: Some(self.conversation_id.to_string()),
text,
};
let mut payload_json = serde_json::to_value(&payload)?;
if azure_workaround {
attach_item_ids(&mut payload_json, &input_with_instructions);
}
let payload_body = serde_json::to_string(&payload_json)?;
let mut attempt = 0;
let max_retries = self.provider.request_max_retries();
@@ -235,7 +214,7 @@ impl ModelClient {
trace!(
"POST to {}: {}",
self.provider.get_full_url(&auth),
payload_body.as_str()
serde_json::to_string(&payload)?
);
let mut req_builder = self
@@ -249,7 +228,7 @@ impl ModelClient {
.header("conversation_id", self.conversation_id.to_string())
.header("session_id", self.conversation_id.to_string())
.header(reqwest::header::ACCEPT, "text/event-stream")
.json(&payload_json);
.json(&payload);
if let Some(auth) = auth.as_ref()
&& auth.mode == AuthMode::ChatGPT
@@ -377,7 +356,7 @@ impl ModelClient {
}
/// Returns the current reasoning effort setting.
pub fn get_reasoning_effort(&self) -> Option<ReasoningEffortConfig> {
pub fn get_reasoning_effort(&self) -> ReasoningEffortConfig {
self.effort
}
@@ -446,33 +425,6 @@ struct ResponseCompletedOutputTokensDetails {
reasoning_tokens: u64,
}
fn attach_item_ids(payload_json: &mut Value, original_items: &[ResponseItem]) {
let Some(input_value) = payload_json.get_mut("input") else {
return;
};
let serde_json::Value::Array(items) = input_value else {
return;
};
for (value, item) in items.iter_mut().zip(original_items.iter()) {
if let ResponseItem::Reasoning { id, .. }
| ResponseItem::Message { id: Some(id), .. }
| ResponseItem::WebSearchCall { id: Some(id), .. }
| ResponseItem::FunctionCall { id: Some(id), .. }
| ResponseItem::LocalShellCall { id: Some(id), .. }
| ResponseItem::CustomToolCall { id: Some(id), .. } = item
{
if id.is_empty() {
continue;
}
if let Some(obj) = value.as_object_mut() {
obj.insert("id".to_string(), Value::String(id.clone()));
}
}
}
}
async fn process_sse<S>(
stream: S,
tx_event: mpsc::Sender<Result<ResponseEvent>>,

View File

@@ -19,9 +19,6 @@ use tokio::sync::mpsc;
/// with this content.
const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md");
/// Review thread system prompt. Edit `core/src/review_prompt.md` to customize.
pub const REVIEW_PROMPT: &str = include_str!("../review_prompt.md");
/// API request payload for a single model turn
#[derive(Default, Debug, Clone)]
pub struct Prompt {
@@ -84,10 +81,8 @@ pub enum ResponseEvent {
#[derive(Debug, Serialize)]
pub(crate) struct Reasoning {
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) effort: Option<ReasoningEffortConfig>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) summary: Option<ReasoningSummaryConfig>,
pub(crate) effort: ReasoningEffortConfig,
pub(crate) summary: ReasoningSummaryConfig,
}
/// Controls under the `text` field in the Responses API for GPT-5.
@@ -141,17 +136,14 @@ pub(crate) struct ResponsesApiRequest<'a> {
pub(crate) fn create_reasoning_param_for_request(
model_family: &ModelFamily,
effort: Option<ReasoningEffortConfig>,
effort: ReasoningEffortConfig,
summary: ReasoningSummaryConfig,
) -> Option<Reasoning> {
if !model_family.supports_reasoning_summaries {
return None;
if model_family.supports_reasoning_summaries {
Some(Reasoning { effort, summary })
} else {
None
}
Some(Reasoning {
effort,
summary: Some(summary),
})
}
pub(crate) fn create_text_param_for_request(

File diff suppressed because it is too large Load Diff

View File

@@ -1,400 +0,0 @@
use std::sync::Arc;
use super::AgentTask;
use super::MutexExt;
use super::Session;
use super::TurnContext;
use super::get_last_assistant_message_from_turn;
use crate::Prompt;
use crate::client_common::ResponseEvent;
use crate::error::CodexErr;
use crate::error::Result as CodexResult;
use crate::protocol::AgentMessageEvent;
use crate::protocol::CompactedItem;
use crate::protocol::ErrorEvent;
use crate::protocol::Event;
use crate::protocol::EventMsg;
use crate::protocol::InputItem;
use crate::protocol::InputMessageKind;
use crate::protocol::TaskCompleteEvent;
use crate::protocol::TaskStartedEvent;
use crate::protocol::TurnContextItem;
use crate::util::backoff;
use askama::Template;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseInputItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::RolloutItem;
use futures::prelude::*;
pub(super) const COMPACT_TRIGGER_TEXT: &str = "Start Summarization";
const SUMMARIZATION_PROMPT: &str = include_str!("../../templates/compact/prompt.md");
#[derive(Template)]
#[template(path = "compact/history_bridge.md", escape = "none")]
struct HistoryBridgeTemplate<'a> {
user_messages_text: &'a str,
summary_text: &'a str,
}
pub(super) fn spawn_compact_task(
sess: Arc<Session>,
turn_context: Arc<TurnContext>,
sub_id: String,
input: Vec<InputItem>,
) {
let task = AgentTask::compact(
sess.clone(),
turn_context,
sub_id,
input,
SUMMARIZATION_PROMPT.to_string(),
);
sess.set_task(task);
}
pub(super) async fn run_inline_auto_compact_task(
sess: Arc<Session>,
turn_context: Arc<TurnContext>,
) {
let sub_id = sess.next_internal_sub_id();
let input = vec![InputItem::Text {
text: COMPACT_TRIGGER_TEXT.to_string(),
}];
run_compact_task_inner(
sess,
turn_context,
sub_id,
input,
SUMMARIZATION_PROMPT.to_string(),
false,
)
.await;
}
pub(super) async fn run_compact_task(
sess: Arc<Session>,
turn_context: Arc<TurnContext>,
sub_id: String,
input: Vec<InputItem>,
compact_instructions: String,
) {
run_compact_task_inner(
sess,
turn_context,
sub_id,
input,
compact_instructions,
true,
)
.await;
}
async fn run_compact_task_inner(
sess: Arc<Session>,
turn_context: Arc<TurnContext>,
sub_id: String,
input: Vec<InputItem>,
compact_instructions: String,
remove_task_on_completion: bool,
) {
let model_context_window = turn_context.client.get_model_context_window();
let start_event = Event {
id: sub_id.clone(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window,
}),
};
sess.send_event(start_event).await;
let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input);
let instructions_override = compact_instructions;
let turn_input = sess.turn_input_with_history(vec![initial_input_for_turn.clone().into()]);
let prompt = Prompt {
input: turn_input,
tools: Vec::new(),
base_instructions_override: Some(instructions_override),
};
let max_retries = turn_context.client.get_provider().stream_max_retries();
let mut retries = 0;
let rollout_item = RolloutItem::TurnContext(TurnContextItem {
cwd: turn_context.cwd.clone(),
approval_policy: turn_context.approval_policy,
sandbox_policy: turn_context.sandbox_policy.clone(),
model: turn_context.client.get_model(),
effort: turn_context.client.get_reasoning_effort(),
summary: turn_context.client.get_reasoning_summary(),
});
sess.persist_rollout_items(&[rollout_item]).await;
loop {
let attempt_result = drain_to_completed(&sess, turn_context.as_ref(), &prompt).await;
match attempt_result {
Ok(()) => {
break;
}
Err(CodexErr::Interrupted) => {
return;
}
Err(e) => {
if retries < max_retries {
retries += 1;
let delay = backoff(retries);
sess.notify_stream_error(
&sub_id,
format!(
"stream error: {e}; retrying {retries}/{max_retries} in {delay:?}"
),
)
.await;
tokio::time::sleep(delay).await;
continue;
} else {
let event = Event {
id: sub_id.clone(),
msg: EventMsg::Error(ErrorEvent {
message: e.to_string(),
}),
};
sess.send_event(event).await;
return;
}
}
}
}
if remove_task_on_completion {
sess.remove_task(&sub_id);
}
let history_snapshot = {
let state = sess.state.lock_unchecked();
state.history.contents()
};
let summary_text = get_last_assistant_message_from_turn(&history_snapshot).unwrap_or_default();
let user_messages = collect_user_messages(&history_snapshot);
let initial_context = sess.build_initial_context(turn_context.as_ref());
let new_history = build_compacted_history(initial_context, &user_messages, &summary_text);
{
let mut state = sess.state.lock_unchecked();
state.history.replace(new_history);
}
let rollout_item = RolloutItem::Compacted(CompactedItem {
message: summary_text.clone(),
});
sess.persist_rollout_items(&[rollout_item]).await;
let event = Event {
id: sub_id.clone(),
msg: EventMsg::AgentMessage(AgentMessageEvent {
message: "Compact task completed".to_string(),
}),
};
sess.send_event(event).await;
let event = Event {
id: sub_id.clone(),
msg: EventMsg::TaskComplete(TaskCompleteEvent {
last_agent_message: None,
}),
};
sess.send_event(event).await;
}
fn content_items_to_text(content: &[ContentItem]) -> Option<String> {
let mut pieces = Vec::new();
for item in content {
match item {
ContentItem::InputText { text } | ContentItem::OutputText { text } => {
if !text.is_empty() {
pieces.push(text.as_str());
}
}
ContentItem::InputImage { .. } => {}
}
}
if pieces.is_empty() {
None
} else {
Some(pieces.join("\n"))
}
}
pub(crate) fn collect_user_messages(items: &[ResponseItem]) -> Vec<String> {
items
.iter()
.filter_map(|item| match item {
ResponseItem::Message { role, content, .. } if role == "user" => {
content_items_to_text(content)
}
_ => None,
})
.filter(|text| !is_session_prefix_message(text))
.collect()
}
fn is_session_prefix_message(text: &str) -> bool {
matches!(
InputMessageKind::from(("user", text)),
InputMessageKind::UserInstructions | InputMessageKind::EnvironmentContext
)
}
pub(crate) fn build_compacted_history(
initial_context: Vec<ResponseItem>,
user_messages: &[String],
summary_text: &str,
) -> Vec<ResponseItem> {
let mut history = initial_context;
let user_messages_text = if user_messages.is_empty() {
"(none)".to_string()
} else {
user_messages.join("\n\n")
};
let summary_text = if summary_text.is_empty() {
"(no summary available)".to_string()
} else {
summary_text.to_string()
};
let Ok(bridge) = HistoryBridgeTemplate {
user_messages_text: &user_messages_text,
summary_text: &summary_text,
}
.render() else {
return vec![];
};
history.push(ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText { text: bridge }],
});
history
}
async fn drain_to_completed(
sess: &Session,
turn_context: &TurnContext,
prompt: &Prompt,
) -> CodexResult<()> {
let mut stream = turn_context.client.clone().stream(prompt).await?;
loop {
let maybe_event = stream.next().await;
let Some(event) = maybe_event else {
return Err(CodexErr::Stream(
"stream closed before response.completed".into(),
None,
));
};
match event {
Ok(ResponseEvent::OutputItemDone(item)) => {
let mut state = sess.state.lock_unchecked();
state.history.record_items(std::slice::from_ref(&item));
}
Ok(ResponseEvent::Completed { .. }) => {
return Ok(());
}
Ok(_) => continue,
Err(e) => return Err(e),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn content_items_to_text_joins_non_empty_segments() {
let items = vec![
ContentItem::InputText {
text: "hello".to_string(),
},
ContentItem::OutputText {
text: String::new(),
},
ContentItem::OutputText {
text: "world".to_string(),
},
];
let joined = content_items_to_text(&items);
assert_eq!(Some("hello\nworld".to_string()), joined);
}
#[test]
fn content_items_to_text_ignores_image_only_content() {
let items = vec![ContentItem::InputImage {
image_url: "file://image.png".to_string(),
}];
let joined = content_items_to_text(&items);
assert_eq!(None, joined);
}
#[test]
fn collect_user_messages_extracts_user_text_only() {
let items = vec![
ResponseItem::Message {
id: Some("assistant".to_string()),
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: "ignored".to_string(),
}],
},
ResponseItem::Message {
id: Some("user".to_string()),
role: "user".to_string(),
content: vec![
ContentItem::InputText {
text: "first".to_string(),
},
ContentItem::OutputText {
text: "second".to_string(),
},
],
},
ResponseItem::Other,
];
let collected = collect_user_messages(&items);
assert_eq!(vec!["first\nsecond".to_string()], collected);
}
#[test]
fn collect_user_messages_filters_session_prefix_entries() {
let items = vec![
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "<user_instructions>do things</user_instructions>".to_string(),
}],
},
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "<ENVIRONMENT_CONTEXT>cwd=/tmp</ENVIRONMENT_CONTEXT>".to_string(),
}],
},
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "real user message".to_string(),
}],
},
];
let collected = collect_user_messages(&items);
assert_eq!(vec!["real user message".to_string()], collected);
}
}

View File

@@ -32,8 +32,7 @@ use toml::Value as TomlValue;
use toml_edit::DocumentMut;
const OPENAI_DEFAULT_MODEL: &str = "gpt-5";
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5";
pub const SWIFTFOX_MEDIUM_MODEL: &str = "swiftfox-medium";
pub const GPT5_HIGH_MODEL: &str = "gpt-5-high";
/// Maximum number of bytes of the documentation that will be embedded. Larger
/// files are *silently truncated* to this size so we do not take up too much of
@@ -48,9 +47,6 @@ pub struct Config {
/// Optional override of model selection.
pub model: String,
/// Model used specifically for review sessions. Defaults to "gpt-5".
pub review_model: String,
pub model_family: ModelFamily,
/// Size of the context window for the model, in tokens.
@@ -59,9 +55,6 @@ pub struct Config {
/// Maximum number of output tokens.
pub model_max_output_tokens: Option<u64>,
/// Token usage threshold triggering auto-compaction of conversation history.
pub model_auto_compact_token_limit: Option<i64>,
/// Key into the model_providers map that specifies which provider to use.
pub model_provider_id: String,
@@ -147,7 +140,7 @@ pub struct Config {
/// Value to use for `reasoning.effort` when making a request using the
/// Responses API.
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_effort: ReasoningEffort,
/// If not "none", the value to use for `reasoning.summary` when making a
/// request using the Responses API.
@@ -430,24 +423,14 @@ pub async fn persist_model_selection(
if let Some(profile_name) = active_profile {
let profile_table = ensure_profile_table(&mut doc, profile_name)?;
profile_table["model"] = toml_edit::value(model);
match effort {
Some(effort) => {
profile_table["model_reasoning_effort"] = toml_edit::value(effort.to_string());
}
None => {
profile_table.remove("model_reasoning_effort");
}
if let Some(effort) = effort {
profile_table["model_reasoning_effort"] = toml_edit::value(effort.to_string());
}
} else {
let table = doc.as_table_mut();
table["model"] = toml_edit::value(model);
match effort {
Some(effort) => {
table["model_reasoning_effort"] = toml_edit::value(effort.to_string());
}
None => {
table.remove("model_reasoning_effort");
}
if let Some(effort) = effort {
table["model_reasoning_effort"] = toml_edit::value(effort.to_string());
}
}
@@ -512,12 +495,10 @@ fn apply_toml_override(root: &mut TomlValue, path: &str, value: TomlValue) {
}
/// Base config deserialized from ~/.codex/config.toml.
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
#[derive(Deserialize, Debug, Clone, Default)]
pub struct ConfigToml {
/// Optional override of model selection.
pub model: Option<String>,
/// Review model override used by the `/review` feature.
pub review_model: Option<String>,
/// Provider to use from the model_providers map.
pub model_provider: Option<String>,
@@ -528,9 +509,6 @@ pub struct ConfigToml {
/// Maximum number of output tokens.
pub model_max_output_tokens: Option<u64>,
/// Token usage threshold triggering auto-compaction of conversation history.
pub model_auto_compact_token_limit: Option<i64>,
/// Default approval policy for executing commands.
pub approval_policy: Option<AskForApproval>,
@@ -649,7 +627,7 @@ pub struct ProjectConfig {
pub trust_level: Option<String>,
}
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
#[derive(Deserialize, Debug, Clone, Default)]
pub struct ToolsToml {
#[serde(default, alias = "web_search_request")]
pub web_search: Option<bool>,
@@ -746,7 +724,6 @@ impl ConfigToml {
#[derive(Default, Debug, Clone)]
pub struct ConfigOverrides {
pub model: Option<String>,
pub review_model: Option<String>,
pub cwd: Option<PathBuf>,
pub approval_policy: Option<AskForApproval>,
pub sandbox_mode: Option<SandboxMode>,
@@ -774,7 +751,6 @@ impl Config {
// Destructure ConfigOverrides fully to ensure all overrides are applied.
let ConfigOverrides {
model,
review_model: override_review_model,
cwd,
approval_policy,
sandbox_mode,
@@ -891,11 +867,6 @@ impl Config {
.as_ref()
.map(|info| info.max_output_tokens)
});
let model_auto_compact_token_limit = cfg.model_auto_compact_token_limit.or_else(|| {
openai_model_info
.as_ref()
.and_then(|info| info.auto_compact_token_limit)
});
let experimental_resume = cfg.experimental_resume;
@@ -910,18 +881,11 @@ impl Config {
Self::get_base_instructions(experimental_instructions_path, &resolved_cwd)?;
let base_instructions = base_instructions.or(file_base_instructions);
// Default review model when not set in config; allow CLI override to take precedence.
let review_model = override_review_model
.or(cfg.review_model)
.unwrap_or_else(default_review_model);
let config = Self {
model,
review_model,
model_family,
model_context_window,
model_max_output_tokens,
model_auto_compact_token_limit,
model_provider_id,
model_provider,
cwd: resolved_cwd,
@@ -949,7 +913,8 @@ impl Config {
.unwrap_or(false),
model_reasoning_effort: config_profile
.model_reasoning_effort
.or(cfg.model_reasoning_effort),
.or(cfg.model_reasoning_effort)
.unwrap_or_default(),
model_reasoning_summary: config_profile
.model_reasoning_summary
.or(cfg.model_reasoning_summary)
@@ -1041,10 +1006,6 @@ fn default_model() -> String {
OPENAI_DEFAULT_MODEL.to_string()
}
fn default_review_model() -> String {
OPENAI_DEFAULT_REVIEW_MODEL.to_string()
}
/// Returns the path to the Codex configuration directory, which can be
/// specified by the `CODEX_HOME` environment variable. If not set, defaults to
/// `~/.codex`.
@@ -1184,7 +1145,7 @@ exclude_slash_tmp = true
persist_model_selection(
codex_home.path(),
None,
"swiftfox-high",
"gpt-5-high-new",
Some(ReasoningEffort::High),
)
.await?;
@@ -1193,7 +1154,7 @@ exclude_slash_tmp = true
tokio::fs::read_to_string(codex_home.path().join(CONFIG_TOML_FILE)).await?;
let parsed: ConfigToml = toml::from_str(&serialized)?;
assert_eq!(parsed.model.as_deref(), Some("swiftfox-high"));
assert_eq!(parsed.model.as_deref(), Some("gpt-5-high-new"));
assert_eq!(parsed.model_reasoning_effort, Some(ReasoningEffort::High));
Ok(())
@@ -1247,8 +1208,8 @@ model = "gpt-4.1"
persist_model_selection(
codex_home.path(),
Some("dev"),
"swiftfox-medium",
Some(ReasoningEffort::Medium),
"gpt-5-high-new",
Some(ReasoningEffort::Low),
)
.await?;
@@ -1260,11 +1221,8 @@ model = "gpt-4.1"
.get("dev")
.expect("profile should be created");
assert_eq!(profile.model.as_deref(), Some("swiftfox-medium"));
assert_eq!(
profile.model_reasoning_effort,
Some(ReasoningEffort::Medium)
);
assert_eq!(profile.model.as_deref(), Some("gpt-5-high-new"));
assert_eq!(profile.model_reasoning_effort, Some(ReasoningEffort::Low));
Ok(())
}
@@ -1460,11 +1418,9 @@ model_verbosity = "high"
assert_eq!(
Config {
model: "o3".to_string(),
review_model: "gpt-5".to_string(),
model_family: find_family_for_model("o3").expect("known model slug"),
model_context_window: Some(200_000),
model_max_output_tokens: Some(100_000),
model_auto_compact_token_limit: None,
model_provider_id: "openai".to_string(),
model_provider: fixture.openai_provider.clone(),
approval_policy: AskForApproval::Never,
@@ -1482,7 +1438,7 @@ model_verbosity = "high"
codex_linux_sandbox_exe: None,
hide_agent_reasoning: false,
show_raw_agent_reasoning: false,
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_effort: ReasoningEffort::High,
model_reasoning_summary: ReasoningSummary::Detailed,
model_verbosity: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
@@ -1518,11 +1474,9 @@ model_verbosity = "high"
)?;
let expected_gpt3_profile_config = Config {
model: "gpt-3.5-turbo".to_string(),
review_model: "gpt-5".to_string(),
model_family: find_family_for_model("gpt-3.5-turbo").expect("known model slug"),
model_context_window: Some(16_385),
model_max_output_tokens: Some(4_096),
model_auto_compact_token_limit: None,
model_provider_id: "openai-chat-completions".to_string(),
model_provider: fixture.openai_chat_completions_provider.clone(),
approval_policy: AskForApproval::UnlessTrusted,
@@ -1540,7 +1494,7 @@ model_verbosity = "high"
codex_linux_sandbox_exe: None,
hide_agent_reasoning: false,
show_raw_agent_reasoning: false,
model_reasoning_effort: None,
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
model_verbosity: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
@@ -1591,11 +1545,9 @@ model_verbosity = "high"
)?;
let expected_zdr_profile_config = Config {
model: "o3".to_string(),
review_model: "gpt-5".to_string(),
model_family: find_family_for_model("o3").expect("known model slug"),
model_context_window: Some(200_000),
model_max_output_tokens: Some(100_000),
model_auto_compact_token_limit: None,
model_provider_id: "openai".to_string(),
model_provider: fixture.openai_provider.clone(),
approval_policy: AskForApproval::OnFailure,
@@ -1613,7 +1565,7 @@ model_verbosity = "high"
codex_linux_sandbox_exe: None,
hide_agent_reasoning: false,
show_raw_agent_reasoning: false,
model_reasoning_effort: None,
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
model_verbosity: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
@@ -1650,11 +1602,9 @@ model_verbosity = "high"
)?;
let expected_gpt5_profile_config = Config {
model: "gpt-5".to_string(),
review_model: "gpt-5".to_string(),
model_family: find_family_for_model("gpt-5").expect("known model slug"),
model_context_window: Some(272_000),
model_max_output_tokens: Some(128_000),
model_auto_compact_token_limit: None,
model_provider_id: "openai".to_string(),
model_provider: fixture.openai_provider.clone(),
approval_policy: AskForApproval::OnFailure,
@@ -1672,7 +1622,7 @@ model_verbosity = "high"
codex_linux_sandbox_exe: None,
hide_agent_reasoning: false,
show_raw_agent_reasoning: false,
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_effort: ReasoningEffort::High,
model_reasoning_summary: ReasoningSummary::Detailed,
model_verbosity: Some(Verbosity::High),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),

View File

@@ -7,12 +7,6 @@ use toml_edit::DocumentMut;
pub const CONFIG_KEY_MODEL: &str = "model";
pub const CONFIG_KEY_EFFORT: &str = "model_reasoning_effort";
#[derive(Copy, Clone)]
enum NoneBehavior {
Skip,
Remove,
}
/// Persist overrides into `config.toml` using explicit key segments per
/// override. This avoids ambiguity with keys that contain dots or spaces.
pub async fn persist_overrides(
@@ -20,12 +14,47 @@ pub async fn persist_overrides(
profile: Option<&str>,
overrides: &[(&[&str], &str)],
) -> Result<()> {
let with_options: Vec<(&[&str], Option<&str>)> = overrides
.iter()
.map(|(segments, value)| (*segments, Some(*value)))
.collect();
let config_path = codex_home.join(CONFIG_TOML_FILE);
persist_overrides_with_behavior(codex_home, profile, &with_options, NoneBehavior::Skip).await
let mut doc = match tokio::fs::read_to_string(&config_path).await {
Ok(s) => s.parse::<DocumentMut>()?,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
tokio::fs::create_dir_all(codex_home).await?;
DocumentMut::new()
}
Err(e) => return Err(e.into()),
};
let effective_profile = if let Some(p) = profile {
Some(p.to_owned())
} else {
doc.get("profile")
.and_then(|i| i.as_str())
.map(|s| s.to_string())
};
for (segments, val) in overrides.iter().copied() {
let value = toml_edit::value(val);
if let Some(ref name) = effective_profile {
if segments.first().copied() == Some("profiles") {
apply_toml_edit_override_segments(&mut doc, segments, value);
} else {
let mut seg_buf: Vec<&str> = Vec::with_capacity(2 + segments.len());
seg_buf.push("profiles");
seg_buf.push(name.as_str());
seg_buf.extend_from_slice(segments);
apply_toml_edit_override_segments(&mut doc, &seg_buf, value);
}
} else {
apply_toml_edit_override_segments(&mut doc, segments, value);
}
}
let tmp_file = NamedTempFile::new_in(codex_home)?;
tokio::fs::write(tmp_file.path(), doc.to_string()).await?;
tmp_file.persist(config_path)?;
Ok(())
}
/// Persist overrides where values may be optional. Any entries with `None`
@@ -36,17 +65,16 @@ pub async fn persist_non_null_overrides(
profile: Option<&str>,
overrides: &[(&[&str], Option<&str>)],
) -> Result<()> {
persist_overrides_with_behavior(codex_home, profile, overrides, NoneBehavior::Skip).await
}
let filtered: Vec<(&[&str], &str)> = overrides
.iter()
.filter_map(|(k, v)| v.map(|vv| (*k, vv)))
.collect();
/// Persist overrides where `None` values clear any existing values from the
/// configuration file.
pub async fn persist_overrides_and_clear_if_none(
codex_home: &Path,
profile: Option<&str>,
overrides: &[(&[&str], Option<&str>)],
) -> Result<()> {
persist_overrides_with_behavior(codex_home, profile, overrides, NoneBehavior::Remove).await
if filtered.is_empty() {
return Ok(());
}
persist_overrides(codex_home, profile, &filtered).await
}
/// Apply a single override onto a `toml_edit` document while preserving
@@ -93,125 +121,6 @@ fn apply_toml_edit_override_segments(
current[last] = value;
}
async fn persist_overrides_with_behavior(
codex_home: &Path,
profile: Option<&str>,
overrides: &[(&[&str], Option<&str>)],
none_behavior: NoneBehavior,
) -> Result<()> {
if overrides.is_empty() {
return Ok(());
}
let should_skip = match none_behavior {
NoneBehavior::Skip => overrides.iter().all(|(_, value)| value.is_none()),
NoneBehavior::Remove => false,
};
if should_skip {
return Ok(());
}
let config_path = codex_home.join(CONFIG_TOML_FILE);
let read_result = tokio::fs::read_to_string(&config_path).await;
let mut doc = match read_result {
Ok(contents) => contents.parse::<DocumentMut>()?,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
if overrides
.iter()
.all(|(_, value)| value.is_none() && matches!(none_behavior, NoneBehavior::Remove))
{
return Ok(());
}
tokio::fs::create_dir_all(codex_home).await?;
DocumentMut::new()
}
Err(e) => return Err(e.into()),
};
let effective_profile = if let Some(p) = profile {
Some(p.to_owned())
} else {
doc.get("profile")
.and_then(|i| i.as_str())
.map(|s| s.to_string())
};
let mut mutated = false;
for (segments, value) in overrides.iter().copied() {
let mut seg_buf: Vec<&str> = Vec::new();
let segments_to_apply: &[&str];
if let Some(ref name) = effective_profile {
if segments.first().copied() == Some("profiles") {
segments_to_apply = segments;
} else {
seg_buf.reserve(2 + segments.len());
seg_buf.push("profiles");
seg_buf.push(name.as_str());
seg_buf.extend_from_slice(segments);
segments_to_apply = seg_buf.as_slice();
}
} else {
segments_to_apply = segments;
}
match value {
Some(v) => {
let item_value = toml_edit::value(v);
apply_toml_edit_override_segments(&mut doc, segments_to_apply, item_value);
mutated = true;
}
None => {
if matches!(none_behavior, NoneBehavior::Remove)
&& remove_toml_edit_segments(&mut doc, segments_to_apply)
{
mutated = true;
}
}
}
}
if !mutated {
return Ok(());
}
let tmp_file = NamedTempFile::new_in(codex_home)?;
tokio::fs::write(tmp_file.path(), doc.to_string()).await?;
tmp_file.persist(config_path)?;
Ok(())
}
fn remove_toml_edit_segments(doc: &mut DocumentMut, segments: &[&str]) -> bool {
use toml_edit::Item;
if segments.is_empty() {
return false;
}
let mut current = doc.as_table_mut();
for seg in &segments[..segments.len() - 1] {
let Some(item) = current.get_mut(seg) else {
return false;
};
match item {
Item::Table(table) => {
current = table;
}
_ => {
return false;
}
}
}
current.remove(segments[segments.len() - 1]).is_some()
}
#[cfg(test)]
mod tests {
use super::*;
@@ -665,81 +574,6 @@ model = "o3"
assert_eq!(contents, expected);
}
#[tokio::test]
async fn persist_clear_none_removes_top_level_value() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
let seed = r#"model = "gpt-5"
model_reasoning_effort = "medium"
"#;
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
persist_overrides_and_clear_if_none(
codex_home,
None,
&[
(&[CONFIG_KEY_MODEL], None),
(&[CONFIG_KEY_EFFORT], Some("high")),
],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = "model_reasoning_effort = \"high\"\n";
assert_eq!(contents, expected);
}
#[tokio::test]
async fn persist_clear_none_respects_active_profile() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
let seed = r#"profile = "team"
[profiles.team]
model = "gpt-4"
model_reasoning_effort = "minimal"
"#;
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
persist_overrides_and_clear_if_none(
codex_home,
None,
&[
(&[CONFIG_KEY_MODEL], None),
(&[CONFIG_KEY_EFFORT], Some("high")),
],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"profile = "team"
[profiles.team]
model_reasoning_effort = "high"
"#;
assert_eq!(contents, expected);
}
#[tokio::test]
async fn persist_clear_none_noop_when_file_missing() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
persist_overrides_and_clear_if_none(codex_home, None, &[(&[CONFIG_KEY_MODEL], None)])
.await
.expect("persist");
assert!(!codex_home.join(CONFIG_TOML_FILE).exists());
}
// Test helper moved to bottom per review guidance.
async fn read_config(codex_home: &Path) -> String {
let p = codex_home.join(CONFIG_TOML_FILE);

View File

@@ -1,3 +1,4 @@
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
/// Transcript of conversation history
@@ -32,8 +33,52 @@ impl ConversationHistory {
}
}
pub(crate) fn replace(&mut self, items: Vec<ResponseItem>) {
self.items = items;
pub(crate) fn keep_last_messages(&mut self, n: usize) {
if n == 0 {
self.items.clear();
return;
}
// Collect the last N message items (assistant/user), newest to oldest.
let mut kept: Vec<ResponseItem> = Vec::with_capacity(n);
for item in self.items.iter().rev() {
if let ResponseItem::Message { role, content, .. } = item {
kept.push(ResponseItem::Message {
// we need to remove the id or the model will complain that messages are sent without
// their reasonings
id: None,
role: role.clone(),
content: content.clone(),
});
if kept.len() == n {
break;
}
}
}
// Preserve chronological order (oldest to newest) within the kept slice.
kept.reverse();
self.items = kept;
}
pub(crate) fn last_agent_message(&self) -> String {
for item in self.items.iter().rev() {
if let ResponseItem::Message { role, content, .. } = item
&& role == "assistant"
{
return content
.iter()
.find_map(|ci| {
if let ContentItem::OutputText { text } = ci {
Some(text.clone())
} else {
None
}
})
.unwrap_or_default();
}
}
String::new()
}
}

View File

@@ -144,19 +144,19 @@ impl ConversationManager {
self.conversations.write().await.remove(conversation_id)
}
/// Fork an existing conversation by taking messages up to the given position
/// (not including the message at the given position) and starting a new
/// Fork an existing conversation by dropping the last `drop_last_messages`
/// user/assistant messages from its transcript and starting a new
/// conversation with identical configuration (unless overridden by the
/// caller's `config`). The new conversation will have a fresh id.
pub async fn fork_conversation(
&self,
nth_user_message: usize,
num_messages_to_drop: usize,
config: Config,
path: PathBuf,
) -> CodexResult<NewConversation> {
// Compute the prefix up to the cut point.
let history = RolloutRecorder::get_rollout_history(&path).await?;
let history = truncate_after_nth_user_message(history, nth_user_message);
let history = truncate_after_dropping_last_messages(history, num_messages_to_drop);
// Spawn a new conversation with the computed initial history.
let auth_manager = self.auth_manager.clone();
@@ -169,10 +169,14 @@ impl ConversationManager {
}
}
/// Return a prefix of `items` obtained by cutting strictly before the nth user message
/// (0-based) and all items that follow it.
fn truncate_after_nth_user_message(history: InitialHistory, n: usize) -> InitialHistory {
// Work directly on rollout items, and cut the vector at the nth user message input.
/// Return a prefix of `items` obtained by dropping the last `n` user messages
/// and all items that follow them.
fn truncate_after_dropping_last_messages(history: InitialHistory, n: usize) -> InitialHistory {
if n == 0 {
return InitialHistory::Forked(history.get_rollout_items());
}
// Work directly on rollout items, and cut the vector at the nth-from-last user message input.
let items: Vec<RolloutItem> = history.get_rollout_items();
// Find indices of user message inputs in rollout order.
@@ -185,13 +189,13 @@ fn truncate_after_nth_user_message(history: InitialHistory, n: usize) -> Initial
}
}
// If fewer than or equal to n user messages exist, treat as empty (out of range).
if user_positions.len() <= n {
// If fewer than n user messages exist, treat as empty.
if user_positions.len() < n {
return InitialHistory::New;
}
// Cut strictly before the nth user message (do not keep the nth itself).
let cut_idx = user_positions[n];
// Cut strictly before the nth-from-last user message (do not keep the nth itself).
let cut_idx = user_positions[user_positions.len() - n];
let rolled: Vec<RolloutItem> = items.into_iter().take(cut_idx).collect();
if rolled.is_empty() {
@@ -258,7 +262,7 @@ mod tests {
.cloned()
.map(RolloutItem::ResponseItem)
.collect();
let truncated = truncate_after_nth_user_message(InitialHistory::Forked(initial), 1);
let truncated = truncate_after_dropping_last_messages(InitialHistory::Forked(initial), 1);
let got_items = truncated.get_rollout_items();
let expected_items = vec![
RolloutItem::ResponseItem(items[0].clone()),
@@ -275,7 +279,7 @@ mod tests {
.cloned()
.map(RolloutItem::ResponseItem)
.collect();
let truncated2 = truncate_after_nth_user_message(InitialHistory::Forked(initial2), 2);
let truncated2 = truncate_after_dropping_last_messages(InitialHistory::Forked(initial2), 2);
assert!(matches!(truncated2, InitialHistory::New));
}
}

View File

@@ -10,8 +10,8 @@ pub(crate) const INTERNAL_STORAGE_FILE: &str = "internal_storage.json";
pub struct InternalStorage {
#[serde(skip)]
storage_path: PathBuf,
#[serde(default, alias = "gpt_5_high_model_prompt_seen")]
pub swiftfox_model_prompt_seen: bool,
#[serde(default)]
pub gpt_5_high_model_prompt_seen: bool,
}
// TODO(jif) generalise all the file writers and build proper async channel inserters.

View File

@@ -92,6 +92,12 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
supports_reasoning_summaries: true,
uses_local_shell_tool: true,
)
} else if slug.starts_with("codex-") {
model_family!(
slug, slug,
supports_reasoning_summaries: true,
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
)
} else if slug.starts_with("gpt-4.1") {
model_family!(
slug, "gpt-4.1",
@@ -103,12 +109,6 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
simple_model_family!(slug, "gpt-4o")
} else if slug.starts_with("gpt-3.5") {
simple_model_family!(slug, "gpt-3.5")
} else if slug.starts_with("codex-") || slug.starts_with("swiftfox") {
model_family!(
slug, slug,
supports_reasoning_summaries: true,
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
)
} else if slug.starts_with("gpt-5") {
model_family!(
slug, "gpt-5",

View File

@@ -162,21 +162,6 @@ impl ModelProviderInfo {
}
}
pub(crate) fn is_azure_responses_endpoint(&self) -> bool {
if self.wire_api != WireApi::Responses {
return false;
}
if self.name.eq_ignore_ascii_case("azure") {
return true;
}
self.base_url
.as_ref()
.map(|base| matches_azure_responses_base_url(base))
.unwrap_or(false)
}
/// Apply provider-specific HTTP headers (both static and environment-based)
/// onto an existing `reqwest::RequestBuilder` and return the updated
/// builder.
@@ -344,18 +329,6 @@ pub fn create_oss_provider_with_base_url(base_url: &str) -> ModelProviderInfo {
}
}
fn matches_azure_responses_base_url(base_url: &str) -> bool {
let base = base_url.to_ascii_lowercase();
const AZURE_MARKERS: [&str; 5] = [
"openai.azure.",
"cognitiveservices.azure.",
"aoai.azure.",
"azure-api.",
"azurefd.",
];
AZURE_MARKERS.iter().any(|marker| base.contains(marker))
}
#[cfg(test)]
mod tests {
use super::*;
@@ -446,69 +419,4 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
assert_eq!(expected_provider, provider);
}
#[test]
fn detects_azure_responses_base_urls() {
fn provider_for(base_url: &str) -> ModelProviderInfo {
ModelProviderInfo {
name: "test".into(),
base_url: Some(base_url.into()),
env_key: None,
env_key_instructions: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
}
}
let positive_cases = [
"https://foo.openai.azure.com/openai",
"https://foo.openai.azure.us/openai/deployments/bar",
"https://foo.cognitiveservices.azure.cn/openai",
"https://foo.aoai.azure.com/openai",
"https://foo.openai.azure-api.net/openai",
"https://foo.z01.azurefd.net/",
];
for base_url in positive_cases {
let provider = provider_for(base_url);
assert!(
provider.is_azure_responses_endpoint(),
"expected {base_url} to be detected as Azure"
);
}
let named_provider = ModelProviderInfo {
name: "Azure".into(),
base_url: Some("https://example.com".into()),
env_key: None,
env_key_instructions: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
};
assert!(named_provider.is_azure_responses_endpoint());
let negative_cases = [
"https://api.openai.com/v1",
"https://example.com/openai",
"https://myproxy.azurewebsites.net/openai",
];
for base_url in negative_cases {
let provider = provider_for(base_url);
assert!(
!provider.is_azure_responses_endpoint(),
"expected {base_url} not to be detected as Azure"
);
}
}
}

View File

@@ -12,19 +12,6 @@ pub(crate) struct ModelInfo {
/// Maximum number of output tokens that can be generated for the model.
pub(crate) max_output_tokens: u64,
/// Token threshold where we should automatically compact conversation history.
pub(crate) auto_compact_token_limit: Option<i64>,
}
impl ModelInfo {
const fn new(context_window: u64, max_output_tokens: u64) -> Self {
Self {
context_window,
max_output_tokens,
auto_compact_token_limit: None,
}
}
}
pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
@@ -33,37 +20,73 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
// OSS models have a 128k shared token pool.
// Arbitrarily splitting it: 3/4 input context, 1/4 output.
// https://openai.com/index/gpt-oss-model-card/
"gpt-oss-20b" => Some(ModelInfo::new(96_000, 32_000)),
"gpt-oss-120b" => Some(ModelInfo::new(96_000, 32_000)),
"gpt-oss-20b" => Some(ModelInfo {
context_window: 96_000,
max_output_tokens: 32_000,
}),
"gpt-oss-120b" => Some(ModelInfo {
context_window: 96_000,
max_output_tokens: 32_000,
}),
// https://platform.openai.com/docs/models/o3
"o3" => Some(ModelInfo::new(200_000, 100_000)),
"o3" => Some(ModelInfo {
context_window: 200_000,
max_output_tokens: 100_000,
}),
// https://platform.openai.com/docs/models/o4-mini
"o4-mini" => Some(ModelInfo::new(200_000, 100_000)),
"o4-mini" => Some(ModelInfo {
context_window: 200_000,
max_output_tokens: 100_000,
}),
// https://platform.openai.com/docs/models/codex-mini-latest
"codex-mini-latest" => Some(ModelInfo::new(200_000, 100_000)),
"codex-mini-latest" => Some(ModelInfo {
context_window: 200_000,
max_output_tokens: 100_000,
}),
// As of Jun 25, 2025, gpt-4.1 defaults to gpt-4.1-2025-04-14.
// https://platform.openai.com/docs/models/gpt-4.1
"gpt-4.1" | "gpt-4.1-2025-04-14" => Some(ModelInfo::new(1_047_576, 32_768)),
"gpt-4.1" | "gpt-4.1-2025-04-14" => Some(ModelInfo {
context_window: 1_047_576,
max_output_tokens: 32_768,
}),
// As of Jun 25, 2025, gpt-4o defaults to gpt-4o-2024-08-06.
// https://platform.openai.com/docs/models/gpt-4o
"gpt-4o" | "gpt-4o-2024-08-06" => Some(ModelInfo::new(128_000, 16_384)),
"gpt-4o" | "gpt-4o-2024-08-06" => Some(ModelInfo {
context_window: 128_000,
max_output_tokens: 16_384,
}),
// https://platform.openai.com/docs/models/gpt-4o?snapshot=gpt-4o-2024-05-13
"gpt-4o-2024-05-13" => Some(ModelInfo::new(128_000, 4_096)),
"gpt-4o-2024-05-13" => Some(ModelInfo {
context_window: 128_000,
max_output_tokens: 4_096,
}),
// https://platform.openai.com/docs/models/gpt-4o?snapshot=gpt-4o-2024-11-20
"gpt-4o-2024-11-20" => Some(ModelInfo::new(128_000, 16_384)),
"gpt-4o-2024-11-20" => Some(ModelInfo {
context_window: 128_000,
max_output_tokens: 16_384,
}),
// https://platform.openai.com/docs/models/gpt-3.5-turbo
"gpt-3.5-turbo" => Some(ModelInfo::new(16_385, 4_096)),
"gpt-3.5-turbo" => Some(ModelInfo {
context_window: 16_385,
max_output_tokens: 4_096,
}),
_ if slug.starts_with("gpt-5") => Some(ModelInfo::new(272_000, 128_000)),
_ if slug.starts_with("gpt-5") => Some(ModelInfo {
context_window: 272_000,
max_output_tokens: 128_000,
}),
_ if slug.starts_with("codex-") => Some(ModelInfo::new(272_000, 128_000)),
_ if slug.starts_with("codex-") => Some(ModelInfo {
context_window: 272_000,
max_output_tokens: 128_000,
}),
_ => None,
}

View File

@@ -288,9 +288,58 @@ fn create_shell_tool_for_sandbox(sandbox_policy: &SandboxPolicy) -> OpenAiTool {
);
}
let description = match sandbox_policy {
SandboxPolicy::WorkspaceWrite {
network_access,
..
} => {
let network_line = if !network_access {
"\n - Commands that require network access"
} else {
""
};
format!(
r#"
The shell tool is used to execute shell commands.
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands will require escalated privileges:
- Types of actions that require escalated privileges:
- Writing files other than those in the writable roots (see the environment context for the allowed directories){network_line}
- Examples of commands that require escalated privileges:
- git commit
- npm install or pnpm install
- cargo build
- cargo test
- When invoking a command that will require escalated privileges:
- Provide the with_escalated_permissions parameter with the boolean value true
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter."#,
)
}
SandboxPolicy::DangerFullAccess => {
"Runs a shell command and returns its output.".to_string()
}
SandboxPolicy::ReadOnly => {
r#"
The shell tool is used to execute shell commands.
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands (including apply_patch) will require escalated permissions:
- Types of actions that require escalated privileges:
- Writing files
- Applying patches
- Examples of commands that require escalated privileges:
- apply_patch
- git commit
- npm install or pnpm install
- cargo build
- cargo test
- When invoking a command that will require escalated privileges:
- Provide the with_escalated_permissions parameter with the boolean value true
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter"#.to_string()
}
};
OpenAiTool::Function(ResponsesApiTool {
name: "shell".to_string(),
description: "Runs a shell command and returns its output.".to_string(),
description,
strict: false,
parameters: JsonSchema::Object {
properties,
@@ -1116,7 +1165,20 @@ mod tests {
};
assert_eq!(name, "shell");
let expected = "Runs a shell command and returns its output.";
let expected = r#"
The shell tool is used to execute shell commands.
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands will require escalated privileges:
- Types of actions that require escalated privileges:
- Writing files other than those in the writable roots (see the environment context for the allowed directories)
- Commands that require network access
- Examples of commands that require escalated privileges:
- git commit
- npm install or pnpm install
- cargo build
- cargo test
- When invoking a command that will require escalated privileges:
- Provide the with_escalated_permissions parameter with the boolean value true
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter."#;
assert_eq!(description, expected);
}
@@ -1131,7 +1193,21 @@ mod tests {
};
assert_eq!(name, "shell");
let expected = "Runs a shell command and returns its output.";
let expected = r#"
The shell tool is used to execute shell commands.
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands (including apply_patch) will require escalated permissions:
- Types of actions that require escalated privileges:
- Writing files
- Applying patches
- Examples of commands that require escalated privileges:
- apply_patch
- git commit
- npm install or pnpm install
- cargo build
- cargo test
- When invoking a command that will require escalated privileges:
- Provide the with_escalated_permissions parameter with the boolean value true
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter"#;
assert_eq!(description, expected);
}

View File

@@ -0,0 +1,21 @@
You are a summarization assistant. A conversation follows between a user and a coding-focused AI (Codex). Your task is to generate a clear summary capturing:
• High-level objective or problem being solved
• Key instructions or design decisions given by the user
• Main code actions or behaviors from the AI
• Important variables, functions, modules, or outputs discussed
• Any unresolved questions or next steps
Produce the summary in a structured format like:
**Objective:**
**User instructions:** … (bulleted)
**AI actions / code behavior:** … (bulleted)
**Important entities:** … (e.g. function names, variables, files)
**Open issues / next steps:** … (if any)
**Summary (concise):** (one or two sentences)

View File

@@ -38,9 +38,7 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool {
| EventMsg::AgentMessage(_)
| EventMsg::AgentReasoning(_)
| EventMsg::AgentReasoningRawContent(_)
| EventMsg::TokenCount(_)
| EventMsg::EnteredReviewMode(_)
| EventMsg::ExitedReviewMode(_) => true,
| EventMsg::TokenCount(_) => true,
EventMsg::Error(_)
| EventMsg::TaskStarted(_)
| EventMsg::TaskComplete(_)

View File

@@ -2,7 +2,6 @@
; inspired by Chrome's sandbox policy:
; https://source.chromium.org/chromium/chromium/src/+/main:sandbox/policy/mac/common.sb;l=273-319;drc=7b3962fe2e5fc9e2ee58000dc8fbf3429d84d3bd
; https://source.chromium.org/chromium/chromium/src/+/main:sandbox/policy/mac/renderer.sb;l=64;drc=7b3962fe2e5fc9e2ee58000dc8fbf3429d84d3bd
; start with closed-by-default
(deny default)
@@ -10,13 +9,7 @@
; child processes inherit the policy of their parent
(allow process-exec)
(allow process-fork)
(allow signal (target same-sandbox))
; Allow cf prefs to work.
(allow user-preference-read)
; process-info
(allow process-info* (target same-sandbox))
(allow signal (target self))
(allow file-write-data
(require-all
@@ -39,22 +32,28 @@
(sysctl-name "hw.l3cachesize_compat")
(sysctl-name "hw.logicalcpu_max")
(sysctl-name "hw.machine")
(sysctl-name "hw.memsize")
(sysctl-name "hw.ncpu")
(sysctl-name "hw.nperflevels")
; Chrome locks these CPU feature detection down a bit more tightly,
; but mostly for fingerprinting concerns which isn't an issue for codex.
(sysctl-name-prefix "hw.optional.arm.")
(sysctl-name-prefix "hw.optional.armv8_")
(sysctl-name "hw.optional.arm.FEAT_BF16")
(sysctl-name "hw.optional.arm.FEAT_DotProd")
(sysctl-name "hw.optional.arm.FEAT_FCMA")
(sysctl-name "hw.optional.arm.FEAT_FHM")
(sysctl-name "hw.optional.arm.FEAT_FP16")
(sysctl-name "hw.optional.arm.FEAT_I8MM")
(sysctl-name "hw.optional.arm.FEAT_JSCVT")
(sysctl-name "hw.optional.arm.FEAT_LSE")
(sysctl-name "hw.optional.arm.FEAT_RDM")
(sysctl-name "hw.optional.arm.FEAT_SHA512")
(sysctl-name "hw.optional.armv8_2_sha512")
(sysctl-name "hw.memsize")
(sysctl-name "hw.pagesize")
(sysctl-name "hw.packages")
(sysctl-name "hw.pagesize_compat")
(sysctl-name "hw.pagesize")
(sysctl-name "hw.physicalcpu_max")
(sysctl-name "hw.tbfrequency_compat")
(sysctl-name "hw.vectorunit")
(sysctl-name "kern.hostname")
(sysctl-name "kern.maxfilesperproc")
(sysctl-name "kern.maxproc")
(sysctl-name "kern.osproductversion")
(sysctl-name "kern.osrelease")
(sysctl-name "kern.ostype")
@@ -64,27 +63,14 @@
(sysctl-name "kern.usrstack64")
(sysctl-name "kern.version")
(sysctl-name "sysctl.proc_cputype")
(sysctl-name "vm.loadavg")
(sysctl-name-prefix "hw.perflevel")
(sysctl-name-prefix "kern.proc.pgrp.")
(sysctl-name-prefix "kern.proc.pid.")
(sysctl-name-prefix "net.routetable.")
)
; IOKit
(allow iokit-open
(iokit-registry-entry-class "RootDomainUserClient")
)
; needed to look up user info, see https://crbug.com/792228
(allow mach-lookup
(global-name "com.apple.system.opendirectoryd.libinfo")
)
; Added on top of Chrome profile
; Needed for python multiprocessing on MacOS for the SemLock
(allow ipc-posix-sem)
; needed to look up user info, see https://crbug.com/792228
(allow mach-lookup
(global-name "com.apple.PowerManagement.control")
(global-name "com.apple.system.opendirectoryd.libinfo")
)

View File

@@ -421,7 +421,7 @@ mod tests {
.handle_request(UnifiedExecRequest {
session_id: None,
input_chunks: &["bash".to_string(), "-i".to_string()],
timeout_ms: Some(2_500),
timeout_ms: Some(1_500),
})
.await?;
let session_id = open_shell.session_id.expect("expected session_id");
@@ -441,7 +441,7 @@ mod tests {
.handle_request(UnifiedExecRequest {
session_id: Some(session_id),
input_chunks: &["echo $CODEX_INTERACTIVE_SHELL_VAR\n".to_string()],
timeout_ms: Some(2_500),
timeout_ms: Some(1_500),
})
.await?;
assert!(out_2.output.contains("codex"));
@@ -458,7 +458,7 @@ mod tests {
.handle_request(UnifiedExecRequest {
session_id: None,
input_chunks: &["/bin/bash".to_string(), "-i".to_string()],
timeout_ms: Some(2_500),
timeout_ms: Some(1_500),
})
.await?;
let session_a = shell_a.session_id.expect("expected session id");
@@ -467,7 +467,7 @@ mod tests {
.handle_request(UnifiedExecRequest {
session_id: Some(session_a),
input_chunks: &["export CODEX_INTERACTIVE_SHELL_VAR=codex\n".to_string()],
timeout_ms: Some(2_500),
timeout_ms: Some(1_500),
})
.await?;
@@ -478,7 +478,7 @@ mod tests {
"echo".to_string(),
"$CODEX_INTERACTIVE_SHELL_VAR\n".to_string(),
],
timeout_ms: Some(2_500),
timeout_ms: Some(1_500),
})
.await?;
assert!(!out_2.output.contains("codex"));
@@ -487,7 +487,7 @@ mod tests {
.handle_request(UnifiedExecRequest {
session_id: Some(session_a),
input_chunks: &["echo $CODEX_INTERACTIVE_SHELL_VAR\n".to_string()],
timeout_ms: Some(2_500),
timeout_ms: Some(1_500),
})
.await?;
assert!(out_3.output.contains("codex"));
@@ -504,7 +504,7 @@ mod tests {
.handle_request(UnifiedExecRequest {
session_id: None,
input_chunks: &["bash".to_string(), "-i".to_string()],
timeout_ms: Some(2_500),
timeout_ms: Some(1_500),
})
.await?;
let session_id = open_shell.session_id.expect("expected session id");
@@ -516,7 +516,7 @@ mod tests {
"export".to_string(),
"CODEX_INTERACTIVE_SHELL_VAR=codex\n".to_string(),
],
timeout_ms: Some(2_500),
timeout_ms: Some(1_500),
})
.await?;
@@ -574,7 +574,7 @@ mod tests {
.handle_request(UnifiedExecRequest {
session_id: None,
input_chunks: &["/bin/echo".to_string(), "codex".to_string()],
timeout_ms: Some(2_500),
timeout_ms: Some(1_500),
})
.await?;
@@ -595,7 +595,7 @@ mod tests {
.handle_request(UnifiedExecRequest {
session_id: None,
input_chunks: &["/bin/bash".to_string(), "-i".to_string()],
timeout_ms: Some(2_500),
timeout_ms: Some(1_500),
})
.await?;
let session_id = open_shell.session_id.expect("expected session id");
@@ -604,7 +604,7 @@ mod tests {
.handle_request(UnifiedExecRequest {
session_id: Some(session_id),
input_chunks: &["exit\n".to_string()],
timeout_ms: Some(2_500),
timeout_ms: Some(1_500),
})
.await?;

View File

@@ -1,7 +0,0 @@
You were originally given instructions from a user over one or more turns. Here were the user messages:
{{ user_messages_text }}
Another language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:
{{ summary_text }}

View File

@@ -1,5 +0,0 @@
You have exceeded the maximum number of tokens, please stop coding and instead write a short memento message for the next agent. Your note should:
- Summarize what you finished and what still needs work. If there was a recent update_plan call, repeat its steps verbatim.
- List outstanding TODOs with file paths / line numbers so they're easy to find.
- Flag code that needs more tests (edge cases, performance, integration, etc.).
- Record any open bugs, quirks, or setup steps that will make it easier for the next agent to pick up where you left off.

View File

@@ -1,32 +1,18 @@
use codex_core::CodexAuth;
use codex_core::ContentItem;
use codex_core::ConversationManager;
use codex_core::LocalShellAction;
use codex_core::LocalShellExecAction;
use codex_core::LocalShellStatus;
use codex_core::ModelClient;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::Prompt;
use codex_core::ReasoningItemContent;
use codex_core::ResponseEvent;
use codex_core::ResponseItem;
use codex_core::WireApi;
use codex_core::built_in_model_providers;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use codex_protocol::mcp_protocol::ConversationId;
use codex_protocol::models::ReasoningItemReasoningSummary;
use codex_protocol::models::WebSearchAction;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
use core_test_support::wait_for_event;
use futures::StreamExt;
use serde_json::json;
use std::io::Write;
use std::sync::Arc;
use tempfile::TempDir;
use uuid::Uuid;
use wiremock::Mock;
@@ -634,147 +620,6 @@ async fn includes_user_instructions_message_in_request() {
assert_message_ends_with(&request_body["input"][1], "</environment_context>");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn azure_responses_request_includes_store_and_reasoning_ids() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
let server = MockServer::start().await;
let sse_body = concat!(
"data: {\"type\":\"response.created\",\"response\":{}}\n\n",
"data: {\"type\":\"response.completed\",\"response\":{\"id\":\"resp_1\"}}\n\n",
);
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_body, "text/event-stream");
Mock::given(method("POST"))
.and(path("/openai/responses"))
.respond_with(template)
.expect(1)
.mount(&server)
.await;
let provider = ModelProviderInfo {
name: "azure".into(),
base_url: Some(format!("{}/openai", server.uri())),
env_key: None,
env_key_instructions: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(5_000),
requires_openai_auth: false,
};
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider_id = provider.name.clone();
config.model_provider = provider.clone();
let effort = config.model_reasoning_effort;
let summary = config.model_reasoning_summary;
let config = Arc::new(config);
let client = ModelClient::new(
Arc::clone(&config),
None,
provider,
effort,
summary,
ConversationId::new(),
);
let mut prompt = Prompt::default();
prompt.input.push(ResponseItem::Reasoning {
id: "reasoning-id".into(),
summary: vec![ReasoningItemReasoningSummary::SummaryText {
text: "summary".into(),
}],
content: Some(vec![ReasoningItemContent::ReasoningText {
text: "content".into(),
}]),
encrypted_content: None,
});
prompt.input.push(ResponseItem::Message {
id: Some("message-id".into()),
role: "assistant".into(),
content: vec![ContentItem::OutputText {
text: "message".into(),
}],
});
prompt.input.push(ResponseItem::WebSearchCall {
id: Some("web-search-id".into()),
status: Some("completed".into()),
action: WebSearchAction::Search {
query: "weather".into(),
},
});
prompt.input.push(ResponseItem::FunctionCall {
id: Some("function-id".into()),
name: "do_thing".into(),
arguments: "{}".into(),
call_id: "function-call-id".into(),
});
prompt.input.push(ResponseItem::LocalShellCall {
id: Some("local-shell-id".into()),
call_id: Some("local-shell-call-id".into()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".into(), "hello".into()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
});
prompt.input.push(ResponseItem::CustomToolCall {
id: Some("custom-tool-id".into()),
status: Some("completed".into()),
call_id: "custom-tool-call-id".into(),
name: "custom_tool".into(),
input: "{}".into(),
});
let mut stream = client
.stream(&prompt)
.await
.expect("responses stream to start");
while let Some(event) = stream.next().await {
if let Ok(ResponseEvent::Completed { .. }) = event {
break;
}
}
let requests = server
.received_requests()
.await
.expect("mock server collected requests");
assert_eq!(requests.len(), 1, "expected a single request");
let body: serde_json::Value = requests[0]
.body_json()
.expect("request body to be valid JSON");
assert_eq!(body["store"], serde_json::Value::Bool(true));
assert_eq!(body["stream"], serde_json::Value::Bool(true));
assert_eq!(body["input"].as_array().map(Vec::len), Some(6));
assert_eq!(body["input"][0]["id"].as_str(), Some("reasoning-id"));
assert_eq!(body["input"][1]["id"].as_str(), Some("message-id"));
assert_eq!(body["input"][2]["id"].as_str(), Some("web-search-id"));
assert_eq!(body["input"][3]["id"].as_str(), Some("function-id"));
assert_eq!(body["input"][4]["id"].as_str(), Some("local-shell-id"));
assert_eq!(body["input"][5]["id"].as_str(), Some("custom-tool-id"));
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn azure_overrides_assign_properties_used_for_responses_url() {
let existing_env_var_with_random_value = if cfg!(windows) { "USERNAME" } else { "USER" };

View File

@@ -5,7 +5,6 @@ use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::built_in_model_providers;
use codex_core::protocol::ErrorEvent;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
@@ -16,25 +15,18 @@ use core_test_support::load_default_config_for_test;
use core_test_support::wait_for_event;
use serde_json::Value;
use tempfile::TempDir;
use wiremock::BodyPrintLimit;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::Request;
use wiremock::Respond;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
use pretty_assertions::assert_eq;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
// --- Test helpers -----------------------------------------------------------
/// Build an SSE stream body from a list of JSON events.
pub(super) fn sse(events: Vec<Value>) -> String {
fn sse(events: Vec<Value>) -> String {
use std::fmt::Write as _;
let mut out = String::new();
for ev in events {
@@ -50,7 +42,7 @@ pub(super) fn sse(events: Vec<Value>) -> String {
}
/// Convenience: SSE event for a completed response with a specific id.
pub(super) fn ev_completed(id: &str) -> Value {
fn ev_completed(id: &str) -> Value {
serde_json::json!({
"type": "response.completed",
"response": {
@@ -60,24 +52,8 @@ pub(super) fn ev_completed(id: &str) -> Value {
})
}
fn ev_completed_with_tokens(id: &str, total_tokens: u64) -> Value {
serde_json::json!({
"type": "response.completed",
"response": {
"id": id,
"usage": {
"input_tokens": total_tokens,
"input_tokens_details": null,
"output_tokens": 0,
"output_tokens_details": null,
"total_tokens": total_tokens
}
}
})
}
/// Convenience: SSE event for a single assistant message output item.
pub(super) fn ev_assistant_message(id: &str, text: &str) -> Value {
fn ev_assistant_message(id: &str, text: &str) -> Value {
serde_json::json!({
"type": "response.output_item.done",
"item": {
@@ -89,25 +65,13 @@ pub(super) fn ev_assistant_message(id: &str, text: &str) -> Value {
})
}
fn ev_function_call(call_id: &str, name: &str, arguments: &str) -> Value {
serde_json::json!({
"type": "response.output_item.done",
"item": {
"type": "function_call",
"call_id": call_id,
"name": name,
"arguments": arguments
}
})
}
pub(super) fn sse_response(body: String) -> ResponseTemplate {
fn sse_response(body: String) -> ResponseTemplate {
ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(body, "text/event-stream")
}
pub(super) async fn mount_sse_once<M>(server: &MockServer, matcher: M, body: String)
async fn mount_sse_once<M>(server: &MockServer, matcher: M, body: String)
where
M: wiremock::Match + Send + Sync + 'static,
{
@@ -115,32 +79,15 @@ where
.and(path("/v1/responses"))
.and(matcher)
.respond_with(sse_response(body))
.expect(1)
.mount(server)
.await;
}
async fn start_mock_server() -> MockServer {
MockServer::builder()
.body_print_limit(BodyPrintLimit::Limited(80_000))
.start()
.await
}
pub(super) const FIRST_REPLY: &str = "FIRST_REPLY";
pub(super) const SUMMARY_TEXT: &str = "SUMMARY_ONLY_CONTEXT";
pub(super) const SUMMARIZE_TRIGGER: &str = "Start Summarization";
const FIRST_REPLY: &str = "FIRST_REPLY";
const SUMMARY_TEXT: &str = "SUMMARY_ONLY_CONTEXT";
const SUMMARIZE_TRIGGER: &str = "Start Summarization";
const THIRD_USER_MSG: &str = "next turn";
const AUTO_SUMMARY_TEXT: &str = "AUTO_SUMMARY";
const FIRST_AUTO_MSG: &str = "token limit start";
const SECOND_AUTO_MSG: &str = "token limit push";
const STILL_TOO_BIG_REPLY: &str = "STILL_TOO_BIG";
const MULTI_AUTO_MSG: &str = "multi auto";
const SECOND_LARGE_REPLY: &str = "SECOND_LARGE_REPLY";
const FIRST_AUTO_SUMMARY: &str = "FIRST_AUTO_SUMMARY";
const SECOND_AUTO_SUMMARY: &str = "SECOND_AUTO_SUMMARY";
const FINAL_REPLY: &str = "FINAL_REPLY";
const DUMMY_FUNCTION_NAME: &str = "unsupported_tool";
const DUMMY_CALL_ID: &str = "call-multi-auto";
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn summarize_context_three_requests_and_instructions() {
@@ -152,7 +99,7 @@ async fn summarize_context_three_requests_and_instructions() {
}
// Set up a mock server that we can inspect after the run.
let server = start_mock_server().await;
let server = MockServer::start().await;
// SSE 1: assistant replies normally so it is recorded in history.
let sse1 = sse(vec![
@@ -197,7 +144,6 @@ async fn summarize_context_three_requests_and_instructions() {
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
config.model_auto_compact_token_limit = Some(200_000);
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let NewConversation {
conversation: codex,
@@ -252,7 +198,7 @@ async fn summarize_context_three_requests_and_instructions() {
"summarization should override base instructions"
);
assert!(
instr2.contains("You have exceeded the maximum number of tokens"),
instr2.contains("You are a summarization assistant"),
"summarization instructions not applied"
);
@@ -263,17 +209,14 @@ async fn summarize_context_three_requests_and_instructions() {
assert_eq!(last2.get("type").unwrap().as_str().unwrap(), "message");
assert_eq!(last2.get("role").unwrap().as_str().unwrap(), "user");
let text2 = last2["content"][0]["text"].as_str().unwrap();
assert!(
text2.contains(SUMMARIZE_TRIGGER),
"expected summarize trigger, got `{text2}`"
);
assert!(text2.contains(SUMMARIZE_TRIGGER));
// Third request must contain the refreshed instructions, bridge summary message and new user msg.
// Third request must contain only the summary from step 2 as prior history plus new user msg.
let input3 = body3.get("input").and_then(|v| v.as_array()).unwrap();
println!("third request body: {body3}");
assert!(
input3.len() >= 3,
"expected refreshed context and new user message in third request"
input3.len() >= 2,
"expected summary + new user message in third request"
);
// Collect all (role, text) message tuples.
@@ -289,35 +232,24 @@ async fn summarize_context_three_requests_and_instructions() {
}
}
// No previous assistant messages should remain and the new user message is present.
// Exactly one assistant message should remain after compaction and the new user message is present.
let assistant_count = messages.iter().filter(|(r, _)| r == "assistant").count();
assert_eq!(assistant_count, 0, "assistant history should be cleared");
assert_eq!(
assistant_count, 1,
"exactly one assistant message should remain after compaction"
);
assert!(
messages
.iter()
.any(|(r, t)| r == "user" && t == THIRD_USER_MSG),
"third request should include the new user message"
);
let Some((_, bridge_text)) = messages.iter().find(|(role, text)| {
role == "user"
&& (text.contains("Here were the user messages")
|| text.contains("Here are all the user messages"))
&& text.contains(SUMMARY_TEXT)
}) else {
panic!("expected a bridge message containing the summary");
};
assert!(
bridge_text.contains("hello world"),
"bridge should capture earlier user messages"
!messages.iter().any(|(_, t)| t.contains("hello world")),
"third request should not include the original user input"
);
assert!(
!bridge_text.contains(SUMMARIZE_TRIGGER),
"bridge text should not echo the summarize trigger"
);
assert!(
!messages
.iter()
.any(|(_, text)| text.contains(SUMMARIZE_TRIGGER)),
!messages.iter().any(|(_, t)| t.contains(SUMMARIZE_TRIGGER)),
"third request should not include the summarize trigger"
);
@@ -326,7 +258,6 @@ async fn summarize_context_three_requests_and_instructions() {
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await;
// Verify rollout contains APITurn entries for each API call and a Compacted entry.
println!("rollout path: {}", rollout_path.display());
let text = std::fs::read_to_string(&rollout_path).unwrap_or_else(|e| {
panic!(
"failed to read rollout file {}: {e}",
@@ -365,506 +296,3 @@ async fn summarize_context_three_requests_and_instructions() {
"expected a Compacted entry containing the summarizer output"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn auto_compact_runs_after_token_limit_hit() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
let server = start_mock_server().await;
let sse1 = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 70_000),
]);
let sse2 = sse(vec![
ev_assistant_message("m2", "SECOND_REPLY"),
ev_completed_with_tokens("r2", 330_000),
]);
let sse3 = sse(vec![
ev_assistant_message("m3", AUTO_SUMMARY_TEXT),
ev_completed_with_tokens("r3", 200),
]);
let first_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(FIRST_AUTO_MSG)
&& !body.contains(SECOND_AUTO_MSG)
&& !body.contains("You have exceeded the maximum number of tokens")
};
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(first_matcher)
.respond_with(sse_response(sse1))
.mount(&server)
.await;
let second_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(SECOND_AUTO_MSG)
&& body.contains(FIRST_AUTO_MSG)
&& !body.contains("You have exceeded the maximum number of tokens")
};
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(second_matcher)
.respond_with(sse_response(sse2))
.mount(&server)
.await;
let third_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains("You have exceeded the maximum number of tokens")
};
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(third_matcher)
.respond_with(sse_response(sse3))
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
config.model_auto_compact_token_limit = Some(200_000);
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let codex = conversation_manager
.new_conversation(config)
.await
.unwrap()
.conversation;
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: FIRST_AUTO_MSG.into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: SECOND_AUTO_MSG.into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 3, "auto compact should add a third request");
let body3 = requests[2].body_json::<serde_json::Value>().unwrap();
let instructions = body3
.get("instructions")
.and_then(|v| v.as_str())
.unwrap_or_default();
assert!(
instructions.contains("You have exceeded the maximum number of tokens"),
"auto compact should reuse summarization instructions"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn auto_compact_persists_rollout_entries() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
let server = start_mock_server().await;
let sse1 = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 70_000),
]);
let sse2 = sse(vec![
ev_assistant_message("m2", "SECOND_REPLY"),
ev_completed_with_tokens("r2", 330_000),
]);
let sse3 = sse(vec![
ev_assistant_message("m3", AUTO_SUMMARY_TEXT),
ev_completed_with_tokens("r3", 200),
]);
let first_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(FIRST_AUTO_MSG)
&& !body.contains(SECOND_AUTO_MSG)
&& !body.contains("You have exceeded the maximum number of tokens")
};
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(first_matcher)
.respond_with(sse_response(sse1))
.mount(&server)
.await;
let second_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(SECOND_AUTO_MSG)
&& body.contains(FIRST_AUTO_MSG)
&& !body.contains("You have exceeded the maximum number of tokens")
};
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(second_matcher)
.respond_with(sse_response(sse2))
.mount(&server)
.await;
let third_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains("You have exceeded the maximum number of tokens")
};
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(third_matcher)
.respond_with(sse_response(sse3))
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let NewConversation {
conversation: codex,
session_configured,
..
} = conversation_manager.new_conversation(config).await.unwrap();
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: FIRST_AUTO_MSG.into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: SECOND_AUTO_MSG.into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex.submit(Op::Shutdown).await.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await;
let rollout_path = session_configured.rollout_path;
let text = std::fs::read_to_string(&rollout_path).unwrap_or_else(|e| {
panic!(
"failed to read rollout file {}: {e}",
rollout_path.display()
)
});
let mut turn_context_count = 0usize;
for line in text.lines() {
let trimmed = line.trim();
if trimmed.is_empty() {
continue;
}
let Ok(entry): Result<RolloutLine, _> = serde_json::from_str(trimmed) else {
continue;
};
match entry.item {
RolloutItem::TurnContext(_) => {
turn_context_count += 1;
}
RolloutItem::Compacted(_) => {}
_ => {}
}
}
assert!(
turn_context_count >= 2,
"expected at least two turn context entries, got {turn_context_count}"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn auto_compact_stops_after_failed_attempt() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
let server = start_mock_server().await;
let sse1 = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 500),
]);
let sse2 = sse(vec![
ev_assistant_message("m2", SUMMARY_TEXT),
ev_completed_with_tokens("r2", 50),
]);
let sse3 = sse(vec![
ev_assistant_message("m3", STILL_TOO_BIG_REPLY),
ev_completed_with_tokens("r3", 500),
]);
let first_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(FIRST_AUTO_MSG)
&& !body.contains("You have exceeded the maximum number of tokens")
};
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(first_matcher)
.respond_with(sse_response(sse1.clone()))
.mount(&server)
.await;
let second_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains("You have exceeded the maximum number of tokens")
};
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(second_matcher)
.respond_with(sse_response(sse2.clone()))
.mount(&server)
.await;
let third_matcher = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
!body.contains("You have exceeded the maximum number of tokens")
&& body.contains(SUMMARY_TEXT)
};
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(third_matcher)
.respond_with(sse_response(sse3.clone()))
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
config.model_auto_compact_token_limit = Some(200);
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let codex = conversation_manager
.new_conversation(config)
.await
.unwrap()
.conversation;
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: FIRST_AUTO_MSG.into(),
}],
})
.await
.unwrap();
let error_event = wait_for_event(&codex, |ev| matches!(ev, EventMsg::Error(_))).await;
let EventMsg::Error(ErrorEvent { message }) = error_event else {
panic!("expected error event");
};
assert!(
message.contains("limit"),
"error message should include limit information: {message}"
);
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let requests = server.received_requests().await.unwrap();
assert_eq!(
requests.len(),
3,
"auto compact should attempt at most one summarization before erroring"
);
let last_body = requests[2].body_json::<serde_json::Value>().unwrap();
let instructions = last_body
.get("instructions")
.and_then(|v| v.as_str())
.unwrap_or_default();
assert!(
!instructions.contains("You have exceeded the maximum number of tokens"),
"third request should be the follow-up turn, not another summarization"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn auto_compact_allows_multiple_attempts_when_interleaved_with_other_turn_events() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
let server = start_mock_server().await;
let sse1 = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 500),
]);
let sse2 = sse(vec![
ev_assistant_message("m2", FIRST_AUTO_SUMMARY),
ev_completed_with_tokens("r2", 50),
]);
let sse3 = sse(vec![
ev_function_call(DUMMY_CALL_ID, DUMMY_FUNCTION_NAME, "{}"),
ev_completed_with_tokens("r3", 150),
]);
let sse4 = sse(vec![
ev_assistant_message("m4", SECOND_LARGE_REPLY),
ev_completed_with_tokens("r4", 450),
]);
let sse5 = sse(vec![
ev_assistant_message("m5", SECOND_AUTO_SUMMARY),
ev_completed_with_tokens("r5", 60),
]);
let sse6 = sse(vec![
ev_assistant_message("m6", FINAL_REPLY),
ev_completed_with_tokens("r6", 120),
]);
#[derive(Clone)]
struct SeqResponder {
bodies: Arc<Vec<String>>,
calls: Arc<AtomicUsize>,
requests: Arc<Mutex<Vec<Vec<u8>>>>,
}
impl SeqResponder {
fn new(bodies: Vec<String>) -> Self {
Self {
bodies: Arc::new(bodies),
calls: Arc::new(AtomicUsize::new(0)),
requests: Arc::new(Mutex::new(Vec::new())),
}
}
fn recorded_requests(&self) -> Vec<Vec<u8>> {
self.requests.lock().unwrap().clone()
}
}
impl Respond for SeqResponder {
fn respond(&self, req: &Request) -> ResponseTemplate {
let idx = self.calls.fetch_add(1, Ordering::SeqCst);
self.requests.lock().unwrap().push(req.body.clone());
let body = self
.bodies
.get(idx)
.unwrap_or_else(|| panic!("unexpected request index {idx}"))
.clone();
ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(body, "text/event-stream")
}
}
let responder = SeqResponder::new(vec![sse1, sse2, sse3, sse4, sse5, sse6]);
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(responder.clone())
.expect(6)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
config.model_auto_compact_token_limit = Some(200);
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let codex = conversation_manager
.new_conversation(config)
.await
.unwrap()
.conversation;
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: MULTI_AUTO_MSG.into(),
}],
})
.await
.unwrap();
loop {
let event = codex.next_event().await.unwrap();
if let EventMsg::TaskComplete(_) = &event.msg
&& !event.id.starts_with("auto-compact-")
{
break;
}
}
let request_bodies: Vec<String> = responder
.recorded_requests()
.into_iter()
.map(|body| String::from_utf8(body).unwrap_or_default())
.collect();
assert_eq!(
request_bodies.len(),
6,
"expected six requests including two auto compactions"
);
assert!(
request_bodies[0].contains(MULTI_AUTO_MSG),
"first request should contain the user input"
);
assert!(
request_bodies[1].contains("You have exceeded the maximum number of tokens"),
"first auto compact request should use summarization instructions"
);
assert!(
request_bodies[3].contains(&format!("unsupported call: {DUMMY_FUNCTION_NAME}")),
"function call output should be sent before the second auto compact"
);
assert!(
request_bodies[4].contains("You have exceeded the maximum number of tokens"),
"second auto compact request should reuse summarization instructions"
);
}

View File

@@ -1,838 +0,0 @@
#![allow(clippy::expect_used)]
//! Integration tests that cover compacting, resuming, and forking conversations.
//!
//! Each test sets up a mocked SSE conversation and drives the conversation through
//! a specific sequence of operations. After every operation we capture the
//! request payload that Codex would send to the model and assert that the
//! model-visible history matches the expected sequence of messages.
use super::compact::FIRST_REPLY;
use super::compact::SUMMARIZE_TRIGGER;
use super::compact::SUMMARY_TEXT;
use super::compact::ev_assistant_message;
use super::compact::ev_completed;
use super::compact::mount_sse_once;
use super::compact::sse;
use codex_core::CodexAuth;
use codex_core::CodexConversation;
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::built_in_model_providers;
use codex_core::config::Config;
use codex_core::protocol::ConversationPathResponseEvent;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use core_test_support::load_default_config_for_test;
use core_test_support::wait_for_event;
use pretty_assertions::assert_eq;
use serde_json::Value;
use serde_json::json;
use std::sync::Arc;
use tempfile::TempDir;
use wiremock::MockServer;
const AFTER_SECOND_RESUME: &str = "AFTER_SECOND_RESUME";
fn network_disabled() -> bool {
std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok()
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
/// Scenario: compact an initial conversation, resume it, fork one turn back, and
/// ensure the model-visible history matches expectations at each request.
async fn compact_resume_and_fork_preserve_model_history_view() {
if network_disabled() {
println!("Skipping test because network is disabled in this sandbox");
return;
}
// 1. Arrange mocked SSE responses for the initial compact/resume/fork flow.
let server = MockServer::start().await;
mount_initial_flow(&server).await;
// 2. Start a new conversation and drive it through the compact/resume/fork steps.
let (_home, config, manager, base) = start_test_conversation(&server).await;
user_turn(&base, "hello world").await;
compact_conversation(&base).await;
user_turn(&base, "AFTER_COMPACT").await;
let base_path = fetch_conversation_path(&base, "base conversation").await;
assert!(
base_path.exists(),
"compact+resume test expects base path {base_path:?} to exist",
);
let resumed = resume_conversation(&manager, &config, base_path).await;
user_turn(&resumed, "AFTER_RESUME").await;
let resumed_path = fetch_conversation_path(&resumed, "resumed conversation").await;
assert!(
resumed_path.exists(),
"compact+resume test expects resumed path {resumed_path:?} to exist",
);
let forked = fork_conversation(&manager, &config, resumed_path, 4).await;
user_turn(&forked, "AFTER_FORK").await;
// 3. Capture the requests to the model and validate the history slices.
let requests = gather_request_bodies(&server).await;
// input after compact is a prefix of input after resume/fork
let input_after_compact = json!(requests[requests.len() - 3]["input"]);
let input_after_resume = json!(requests[requests.len() - 2]["input"]);
let input_after_fork = json!(requests[requests.len() - 1]["input"]);
let compact_arr = input_after_compact
.as_array()
.expect("input after compact should be an array");
let resume_arr = input_after_resume
.as_array()
.expect("input after resume should be an array");
let fork_arr = input_after_fork
.as_array()
.expect("input after fork should be an array");
assert!(
compact_arr.len() <= resume_arr.len(),
"after-resume input should have at least as many items as after-compact",
);
assert_eq!(compact_arr.as_slice(), &resume_arr[..compact_arr.len()]);
eprint!(
"len of compact: {}, len of fork: {}",
compact_arr.len(),
fork_arr.len()
);
eprintln!("input_after_fork:{}", json!(input_after_fork));
assert!(
compact_arr.len() <= fork_arr.len(),
"after-fork input should have at least as many items as after-compact",
);
assert_eq!(compact_arr.as_slice(), &fork_arr[..compact_arr.len()]);
let prompt = requests[0]["instructions"]
.as_str()
.unwrap_or_default()
.to_string();
let user_instructions = requests[0]["input"][0]["content"][0]["text"]
.as_str()
.unwrap_or_default()
.to_string();
let environment_context = requests[0]["input"][1]["content"][0]["text"]
.as_str()
.unwrap_or_default()
.to_string();
let tool_calls = json!(requests[0]["tools"].as_array());
let prompt_cache_key = requests[0]["prompt_cache_key"]
.as_str()
.unwrap_or_default()
.to_string();
let fork_prompt_cache_key = requests[requests.len() - 1]["prompt_cache_key"]
.as_str()
.unwrap_or_default()
.to_string();
let user_turn_1 = json!(
{
"model": "gpt-5",
"instructions": prompt,
"input": [
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": user_instructions
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": environment_context
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "hello world"
}
]
}
],
"tools": tool_calls,
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"summary": "auto"
},
"store": false,
"stream": true,
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": prompt_cache_key
});
let compact_1 = json!(
{
"model": "gpt-5",
"instructions": "You have exceeded the maximum number of tokens, please stop coding and instead write a short memento message for the next agent. Your note should:
- Summarize what you finished and what still needs work. If there was a recent update_plan call, repeat its steps verbatim.
- List outstanding TODOs with file paths / line numbers so they're easy to find.
- Flag code that needs more tests (edge cases, performance, integration, etc.).
- Record any open bugs, quirks, or setup steps that will make it easier for the next agent to pick up where you left off.",
"input": [
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": user_instructions
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": environment_context
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "hello world"
}
]
},
{
"type": "message",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": "FIRST_REPLY"
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "Start Summarization"
}
]
}
],
"tools": [],
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"summary": "auto"
},
"store": false,
"stream": true,
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": prompt_cache_key
});
let user_turn_2_after_compact = json!(
{
"model": "gpt-5",
"instructions": prompt,
"input": [
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": user_instructions
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": environment_context
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "You were originally given instructions from a user over one or more turns. Here were the user messages:
hello world
Another language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:
SUMMARY_ONLY_CONTEXT"
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "AFTER_COMPACT"
}
]
}
],
"tools": tool_calls,
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"summary": "auto"
},
"store": false,
"stream": true,
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": prompt_cache_key
});
let usert_turn_3_after_resume = json!(
{
"model": "gpt-5",
"instructions": prompt,
"input": [
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": user_instructions
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": environment_context
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "You were originally given instructions from a user over one or more turns. Here were the user messages:
hello world
Another language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:
SUMMARY_ONLY_CONTEXT"
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "AFTER_COMPACT"
}
]
},
{
"type": "message",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": "AFTER_COMPACT_REPLY"
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "AFTER_RESUME"
}
]
}
],
"tools": tool_calls,
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"summary": "auto"
},
"store": false,
"stream": true,
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": prompt_cache_key
});
let user_turn_3_after_fork = json!(
{
"model": "gpt-5",
"instructions": prompt,
"input": [
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": user_instructions
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": environment_context
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "You were originally given instructions from a user over one or more turns. Here were the user messages:
hello world
Another language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:
SUMMARY_ONLY_CONTEXT"
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "AFTER_COMPACT"
}
]
},
{
"type": "message",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": "AFTER_COMPACT_REPLY"
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "AFTER_FORK"
}
]
}
],
"tools": tool_calls,
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"summary": "auto"
},
"store": false,
"stream": true,
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": fork_prompt_cache_key
});
let expected = json!([
user_turn_1,
compact_1,
user_turn_2_after_compact,
usert_turn_3_after_resume,
user_turn_3_after_fork
]);
assert_eq!(requests.len(), 5);
assert_eq!(json!(requests), expected);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
/// Scenario: after the forked branch is compacted, resuming again should reuse
/// the compacted history and only append the new user message.
async fn compact_resume_after_second_compaction_preserves_history() {
if network_disabled() {
println!("Skipping test because network is disabled in this sandbox");
return;
}
// 1. Arrange mocked SSE responses for the initial flow plus the second compact.
let server = MockServer::start().await;
mount_initial_flow(&server).await;
mount_second_compact_flow(&server).await;
// 2. Drive the conversation through compact -> resume -> fork -> compact -> resume.
let (_home, config, manager, base) = start_test_conversation(&server).await;
user_turn(&base, "hello world").await;
compact_conversation(&base).await;
user_turn(&base, "AFTER_COMPACT").await;
let base_path = fetch_conversation_path(&base, "base conversation").await;
assert!(
base_path.exists(),
"second compact test expects base path {base_path:?} to exist",
);
let resumed = resume_conversation(&manager, &config, base_path).await;
user_turn(&resumed, "AFTER_RESUME").await;
let resumed_path = fetch_conversation_path(&resumed, "resumed conversation").await;
assert!(
resumed_path.exists(),
"second compact test expects resumed path {resumed_path:?} to exist",
);
let forked = fork_conversation(&manager, &config, resumed_path, 1).await;
user_turn(&forked, "AFTER_FORK").await;
compact_conversation(&forked).await;
user_turn(&forked, "AFTER_COMPACT_2").await;
let forked_path = fetch_conversation_path(&forked, "forked conversation").await;
assert!(
forked_path.exists(),
"second compact test expects forked path {forked_path:?} to exist",
);
let resumed_again = resume_conversation(&manager, &config, forked_path).await;
user_turn(&resumed_again, AFTER_SECOND_RESUME).await;
let requests = gather_request_bodies(&server).await;
let input_after_compact = json!(requests[requests.len() - 2]["input"]);
let input_after_resume = json!(requests[requests.len() - 1]["input"]);
// test input after compact before resume is the same as input after resume
let compact_input_array = input_after_compact
.as_array()
.expect("input after compact should be an array");
let resume_input_array = input_after_resume
.as_array()
.expect("input after resume should be an array");
assert!(
compact_input_array.len() <= resume_input_array.len(),
"after-resume input should have at least as many items as after-compact"
);
assert_eq!(
compact_input_array.as_slice(),
&resume_input_array[..compact_input_array.len()]
);
// hard coded test
let prompt = requests[0]["instructions"]
.as_str()
.unwrap_or_default()
.to_string();
let user_instructions = requests[0]["input"][0]["content"][0]["text"]
.as_str()
.unwrap_or_default()
.to_string();
let environment_instructions = requests[0]["input"][1]["content"][0]["text"]
.as_str()
.unwrap_or_default()
.to_string();
let expected = json!([
{
"instructions": prompt,
"input": [
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": user_instructions
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": environment_instructions
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "You were originally given instructions from a user over one or more turns. Here were the user messages:\n\nAFTER_FORK\n\nAnother language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:\n\nSUMMARY_ONLY_CONTEXT"
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "AFTER_COMPACT_2"
}
]
},
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "AFTER_SECOND_RESUME"
}
]
}
],
}
]);
let last_request_after_2_compacts = json!([{
"instructions": requests[requests.len() -1]["instructions"],
"input": requests[requests.len() -1]["input"],
}]);
assert_eq!(expected, last_request_after_2_compacts);
}
fn normalize_line_endings(value: &mut Value) {
match value {
Value::String(text) => {
if text.contains('\r') {
*text = text.replace("\r\n", "\n").replace('\r', "\n");
}
}
Value::Array(items) => {
for item in items {
normalize_line_endings(item);
}
}
Value::Object(map) => {
for item in map.values_mut() {
normalize_line_endings(item);
}
}
_ => {}
}
}
async fn gather_request_bodies(server: &MockServer) -> Vec<Value> {
server
.received_requests()
.await
.expect("mock server should not fail")
.into_iter()
.map(|req| {
let mut value = req.body_json::<Value>().expect("valid JSON body");
normalize_line_endings(&mut value);
value
})
.collect()
}
async fn mount_initial_flow(server: &MockServer) {
let sse1 = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed("r1"),
]);
let sse2 = sse(vec![
ev_assistant_message("m2", SUMMARY_TEXT),
ev_completed("r2"),
]);
let sse3 = sse(vec![
ev_assistant_message("m3", "AFTER_COMPACT_REPLY"),
ev_completed("r3"),
]);
let sse4 = sse(vec![ev_completed("r4")]);
let sse5 = sse(vec![ev_completed("r5")]);
let match_first = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains("\"text\":\"hello world\"")
&& !body.contains(&format!("\"text\":\"{SUMMARIZE_TRIGGER}\""))
&& !body.contains("\"text\":\"AFTER_COMPACT\"")
&& !body.contains("\"text\":\"AFTER_RESUME\"")
&& !body.contains("\"text\":\"AFTER_FORK\"")
};
mount_sse_once(server, match_first, sse1).await;
let match_compact = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(&format!("\"text\":\"{SUMMARIZE_TRIGGER}\""))
};
mount_sse_once(server, match_compact, sse2).await;
let match_after_compact = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains("\"text\":\"AFTER_COMPACT\"")
&& !body.contains("\"text\":\"AFTER_RESUME\"")
&& !body.contains("\"text\":\"AFTER_FORK\"")
};
mount_sse_once(server, match_after_compact, sse3).await;
let match_after_resume = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains("\"text\":\"AFTER_RESUME\"")
};
mount_sse_once(server, match_after_resume, sse4).await;
let match_after_fork = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains("\"text\":\"AFTER_FORK\"")
};
mount_sse_once(server, match_after_fork, sse5).await;
}
async fn mount_second_compact_flow(server: &MockServer) {
let sse6 = sse(vec![
ev_assistant_message("m4", SUMMARY_TEXT),
ev_completed("r6"),
]);
let sse7 = sse(vec![ev_completed("r7")]);
let match_second_compact = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(&format!("\"text\":\"{SUMMARIZE_TRIGGER}\"")) && body.contains("AFTER_FORK")
};
mount_sse_once(server, match_second_compact, sse6).await;
let match_after_second_resume = |req: &wiremock::Request| {
let body = std::str::from_utf8(&req.body).unwrap_or("");
body.contains(&format!("\"text\":\"{AFTER_SECOND_RESUME}\""))
};
mount_sse_once(server, match_after_second_resume, sse7).await;
}
async fn start_test_conversation(
server: &MockServer,
) -> (TempDir, Config, ConversationManager, Arc<CodexConversation>) {
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let home = TempDir::new().expect("create temp dir");
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
let manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let NewConversation { conversation, .. } = manager
.new_conversation(config.clone())
.await
.expect("create conversation");
(home, config, manager, conversation)
}
async fn user_turn(conversation: &Arc<CodexConversation>, text: &str) {
conversation
.submit(Op::UserInput {
items: vec![InputItem::Text { text: text.into() }],
})
.await
.expect("submit user turn");
wait_for_event(conversation, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
}
async fn compact_conversation(conversation: &Arc<CodexConversation>) {
conversation
.submit(Op::Compact)
.await
.expect("compact conversation");
wait_for_event(conversation, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
}
async fn fetch_conversation_path(
conversation: &Arc<CodexConversation>,
context: &str,
) -> std::path::PathBuf {
conversation
.submit(Op::GetPath)
.await
.expect("request conversation path");
match wait_for_event(conversation, |ev| {
matches!(ev, EventMsg::ConversationPath(_))
})
.await
{
EventMsg::ConversationPath(ConversationPathResponseEvent { path, .. }) => path,
_ => panic!("expected ConversationPath event for {context}"),
}
}
async fn resume_conversation(
manager: &ConversationManager,
config: &Config,
path: std::path::PathBuf,
) -> Arc<CodexConversation> {
let auth_manager =
codex_core::AuthManager::from_auth_for_testing(CodexAuth::from_api_key("dummy"));
let NewConversation { conversation, .. } = manager
.resume_conversation_from_rollout(config.clone(), path, auth_manager)
.await
.expect("resume conversation");
conversation
}
async fn fork_conversation(
manager: &ConversationManager,
config: &Config,
path: std::path::PathBuf,
back_steps: usize,
) -> Arc<CodexConversation> {
let NewConversation { conversation, .. } = manager
.fork_conversation(back_steps, config.clone(), path)
.await
.expect("fork conversation");
conversation
}

View File

@@ -104,8 +104,7 @@ async fn fork_conversation_twice_drops_to_first_message() {
items
};
// Compute expected prefixes after each fork by truncating base rollout
// strictly before the nth user input (0-based).
// Compute expected prefixes after each fork by truncating base rollout at nth-from-last user input.
let base_items = read_items(&base_path);
let find_user_input_positions = |items: &[RolloutItem]| -> Vec<usize> {
let mut pos = Vec::new();
@@ -127,8 +126,11 @@ async fn fork_conversation_twice_drops_to_first_message() {
};
let user_inputs = find_user_input_positions(&base_items);
// After cutting at nth user input (n=1 → second user message), cut strictly before that input.
let cut1 = user_inputs.get(1).copied().unwrap_or(0);
// After dropping last user input (n=1), cut strictly before that input if present, else empty.
let cut1 = user_inputs
.get(user_inputs.len().saturating_sub(1))
.copied()
.unwrap_or(0);
let expected_after_first: Vec<RolloutItem> = base_items[..cut1].to_vec();
// After dropping again (n=1 on fork1), compute expected relative to fork1's rollout.
@@ -159,12 +161,12 @@ async fn fork_conversation_twice_drops_to_first_message() {
serde_json::to_value(&expected_after_first).unwrap()
);
// Fork again with n=0 → drops the (new) last user message, leaving only the first.
// Fork again with n=1 → drops the (new) last user message, leaving only the first.
let NewConversation {
conversation: codex_fork2,
..
} = conversation_manager
.fork_conversation(0, config_for_fork.clone(), fork1_path.clone())
.fork_conversation(1, config_for_fork.clone(), fork1_path.clone())
.await
.expect("fork 2");

View File

@@ -3,14 +3,11 @@
mod cli_stream;
mod client;
mod compact;
mod compact_resume_fork;
mod exec;
mod exec_stream_events;
mod fork_conversation;
mod live_cli;
mod model_overrides;
mod prompt_caching;
mod review;
mod seatbelt;
mod stream_error_allows_next_turn;
mod stream_no_completed;

View File

@@ -1,92 +0,0 @@
use codex_core::CodexAuth;
use codex_core::ConversationManager;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
use codex_core::protocol_config_types::ReasoningEffort;
use core_test_support::load_default_config_for_test;
use core_test_support::wait_for_event;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
const CONFIG_TOML: &str = "config.toml";
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn override_turn_context_does_not_persist_when_config_exists() {
let codex_home = TempDir::new().unwrap();
let config_path = codex_home.path().join(CONFIG_TOML);
let initial_contents = "model = \"gpt-4o\"\n";
tokio::fs::write(&config_path, initial_contents)
.await
.expect("seed config.toml");
let mut config = load_default_config_for_test(&codex_home);
config.model = "gpt-4o".to_string();
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create conversation")
.conversation;
codex
.submit(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: Some("o3".to_string()),
effort: Some(Some(ReasoningEffort::High)),
summary: None,
})
.await
.expect("submit override");
codex.submit(Op::Shutdown).await.expect("request shutdown");
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await;
let contents = tokio::fs::read_to_string(&config_path)
.await
.expect("read config.toml after override");
assert_eq!(contents, initial_contents);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn override_turn_context_does_not_create_config_file() {
let codex_home = TempDir::new().unwrap();
let config_path = codex_home.path().join(CONFIG_TOML);
assert!(
!config_path.exists(),
"test setup should start without config"
);
let config = load_default_config_for_test(&codex_home);
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create conversation")
.conversation;
codex
.submit(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: Some("o3".to_string()),
effort: Some(Some(ReasoningEffort::Medium)),
summary: None,
})
.await
.expect("submit override");
codex.submit(Op::Shutdown).await.expect("request shutdown");
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await;
assert!(
!config_path.exists(),
"override should not create config.toml"
);
}

View File

@@ -387,7 +387,7 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
exclude_slash_tmp: true,
}),
model: Some("o3".to_string()),
effort: Some(Some(ReasoningEffort::High)),
effort: Some(ReasoningEffort::High),
summary: Some(ReasoningSummary::Detailed),
})
.await
@@ -519,7 +519,7 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
exclude_slash_tmp: true,
},
model: "o3".to_string(),
effort: Some(ReasoningEffort::High),
effort: ReasoningEffort::High,
summary: ReasoningSummary::Detailed,
})
.await

View File

@@ -1,542 +0,0 @@
use codex_core::CodexAuth;
use codex_core::CodexConversation;
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::built_in_model_providers;
use codex_core::config::Config;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_core::protocol::ReviewCodeLocation;
use codex_core::protocol::ReviewFinding;
use codex_core::protocol::ReviewLineRange;
use codex_core::protocol::ReviewOutputEvent;
use codex_core::protocol::ReviewRequest;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id_from_str;
use core_test_support::wait_for_event;
use pretty_assertions::assert_eq;
use std::path::PathBuf;
use std::sync::Arc;
use tempfile::TempDir;
use tokio::io::AsyncWriteExt as _;
use uuid::Uuid;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
/// Verify that submitting `Op::Review` spawns a child task and emits
/// EnteredReviewMode -> ExitedReviewMode(None) -> TaskComplete
/// in that order when the model returns a structured review JSON payload.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn review_op_emits_lifecycle_and_review_output() {
// Skip under Codex sandbox network restrictions.
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
// Start mock Responses API server. Return a single assistant message whose
// text is a JSON-encoded ReviewOutputEvent.
let review_json = serde_json::json!({
"findings": [
{
"title": "Prefer Stylize helpers",
"body": "Use .dim()/.bold() chaining instead of manual Style where possible.",
"confidence_score": 0.9,
"priority": 1,
"code_location": {
"absolute_file_path": "/tmp/file.rs",
"line_range": {"start": 10, "end": 20}
}
}
],
"overall_correctness": "good",
"overall_explanation": "All good with some improvements suggested.",
"overall_confidence_score": 0.8
})
.to_string();
let sse_template = r#"[
{"type":"response.output_item.done", "item":{
"type":"message", "role":"assistant",
"content":[{"type":"output_text","text":__REVIEW__}]
}},
{"type":"response.completed", "response": {"id": "__ID__"}}
]"#;
let review_json_escaped = serde_json::to_string(&review_json).unwrap();
let sse_raw = sse_template.replace("__REVIEW__", &review_json_escaped);
let server = start_responses_server_with_sse(&sse_raw, 1).await;
let codex_home = TempDir::new().unwrap();
let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await;
// Submit review request.
codex
.submit(Op::Review {
review_request: ReviewRequest {
prompt: "Please review my changes".to_string(),
user_facing_hint: "my changes".to_string(),
},
})
.await
.unwrap();
// Verify lifecycle: Entered -> Exited(Some(review)) -> TaskComplete.
let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await;
let closed = wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExitedReviewMode(_))).await;
let review = match closed {
EventMsg::ExitedReviewMode(Some(r)) => r,
other => panic!("expected ExitedReviewMode(Some(..)), got {other:?}"),
};
// Deep compare full structure using PartialEq (floats are f32 on both sides).
let expected = ReviewOutputEvent {
findings: vec![ReviewFinding {
title: "Prefer Stylize helpers".to_string(),
body: "Use .dim()/.bold() chaining instead of manual Style where possible.".to_string(),
confidence_score: 0.9,
priority: 1,
code_location: ReviewCodeLocation {
absolute_file_path: PathBuf::from("/tmp/file.rs"),
line_range: ReviewLineRange { start: 10, end: 20 },
},
}],
overall_correctness: "good".to_string(),
overall_explanation: "All good with some improvements suggested.".to_string(),
overall_confidence_score: 0.8,
};
assert_eq!(expected, review);
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
server.verify().await;
}
/// When the model returns plain text that is not JSON, ensure the child
/// lifecycle still occurs and the plain text is surfaced via
/// ExitedReviewMode(Some(..)) as the overall_explanation.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn review_op_with_plain_text_emits_review_fallback() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
let sse_raw = r#"[
{"type":"response.output_item.done", "item":{
"type":"message", "role":"assistant",
"content":[{"type":"output_text","text":"just plain text"}]
}},
{"type":"response.completed", "response": {"id": "__ID__"}}
]"#;
let server = start_responses_server_with_sse(sse_raw, 1).await;
let codex_home = TempDir::new().unwrap();
let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await;
codex
.submit(Op::Review {
review_request: ReviewRequest {
prompt: "Plain text review".to_string(),
user_facing_hint: "plain text review".to_string(),
},
})
.await
.unwrap();
let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await;
let closed = wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExitedReviewMode(_))).await;
let review = match closed {
EventMsg::ExitedReviewMode(Some(r)) => r,
other => panic!("expected ExitedReviewMode(Some(..)), got {other:?}"),
};
// Expect a structured fallback carrying the plain text.
let expected = ReviewOutputEvent {
overall_explanation: "just plain text".to_string(),
..Default::default()
};
assert_eq!(expected, review);
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
server.verify().await;
}
/// When the model returns structured JSON in a review, ensure no AgentMessage
/// is emitted; the UI consumes the structured result via ExitedReviewMode.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn review_does_not_emit_agent_message_on_structured_output() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
let review_json = serde_json::json!({
"findings": [
{
"title": "Example",
"body": "Structured review output.",
"confidence_score": 0.5,
"priority": 1,
"code_location": {
"absolute_file_path": "/tmp/file.rs",
"line_range": {"start": 1, "end": 2}
}
}
],
"overall_correctness": "ok",
"overall_explanation": "ok",
"overall_confidence_score": 0.5
})
.to_string();
let sse_template = r#"[
{"type":"response.output_item.done", "item":{
"type":"message", "role":"assistant",
"content":[{"type":"output_text","text":__REVIEW__}]
}},
{"type":"response.completed", "response": {"id": "__ID__"}}
]"#;
let review_json_escaped = serde_json::to_string(&review_json).unwrap();
let sse_raw = sse_template.replace("__REVIEW__", &review_json_escaped);
let server = start_responses_server_with_sse(&sse_raw, 1).await;
let codex_home = TempDir::new().unwrap();
let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await;
codex
.submit(Op::Review {
review_request: ReviewRequest {
prompt: "check structured".to_string(),
user_facing_hint: "check structured".to_string(),
},
})
.await
.unwrap();
// Drain events until TaskComplete; ensure none are AgentMessage.
use tokio::time::Duration;
use tokio::time::timeout;
let mut saw_entered = false;
let mut saw_exited = false;
loop {
let ev = timeout(Duration::from_secs(5), codex.next_event())
.await
.expect("timeout waiting for event")
.expect("stream ended unexpectedly");
match ev.msg {
EventMsg::TaskComplete(_) => break,
EventMsg::AgentMessage(_) => {
panic!("unexpected AgentMessage during review with structured output")
}
EventMsg::EnteredReviewMode(_) => saw_entered = true,
EventMsg::ExitedReviewMode(_) => saw_exited = true,
_ => {}
}
}
assert!(saw_entered && saw_exited, "missing review lifecycle events");
server.verify().await;
}
/// Ensure that when a custom `review_model` is set in the config, the review
/// request uses that model (and not the main chat model).
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn review_uses_custom_review_model_from_config() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
// Minimal stream: just a completed event
let sse_raw = r#"[
{"type":"response.completed", "response": {"id": "__ID__"}}
]"#;
let server = start_responses_server_with_sse(sse_raw, 1).await;
let codex_home = TempDir::new().unwrap();
// Choose a review model different from the main model; ensure it is used.
let codex = new_conversation_for_server(&server, &codex_home, |cfg| {
cfg.model = "gpt-4.1".to_string();
cfg.review_model = "gpt-5".to_string();
})
.await;
codex
.submit(Op::Review {
review_request: ReviewRequest {
prompt: "use custom model".to_string(),
user_facing_hint: "use custom model".to_string(),
},
})
.await
.unwrap();
// Wait for completion
let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await;
let _closed = wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExitedReviewMode(None))).await;
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Assert the request body model equals the configured review model
let request = &server.received_requests().await.unwrap()[0];
let body = request.body_json::<serde_json::Value>().unwrap();
assert_eq!(body["model"].as_str().unwrap(), "gpt-5");
server.verify().await;
}
/// When a review session begins, it must not prepend prior chat history from
/// the parent session. The request `input` should contain only the review
/// prompt from the user.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn review_input_isolated_from_parent_history() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
// Mock server for the single review request
let sse_raw = r#"[
{"type":"response.completed", "response": {"id": "__ID__"}}
]"#;
let server = start_responses_server_with_sse(sse_raw, 1).await;
// Seed a parent session history via resume file with both user + assistant items.
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let session_file = codex_home.path().join("resume.jsonl");
{
let mut f = tokio::fs::File::create(&session_file).await.unwrap();
let convo_id = Uuid::new_v4();
// Proper session_meta line (enveloped) with a conversation id
let meta_line = serde_json::json!({
"timestamp": "2024-01-01T00:00:00.000Z",
"type": "session_meta",
"payload": {
"id": convo_id,
"timestamp": "2024-01-01T00:00:00Z",
"instructions": null,
"cwd": ".",
"originator": "test_originator",
"cli_version": "test_version"
}
});
f.write_all(format!("{meta_line}\n").as_bytes())
.await
.unwrap();
// Prior user message (enveloped response_item)
let user = codex_protocol::models::ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![codex_protocol::models::ContentItem::InputText {
text: "parent: earlier user message".to_string(),
}],
};
let user_json = serde_json::to_value(&user).unwrap();
let user_line = serde_json::json!({
"timestamp": "2024-01-01T00:00:01.000Z",
"type": "response_item",
"payload": user_json
});
f.write_all(format!("{user_line}\n").as_bytes())
.await
.unwrap();
// Prior assistant message (enveloped response_item)
let assistant = codex_protocol::models::ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![codex_protocol::models::ContentItem::OutputText {
text: "parent: assistant reply".to_string(),
}],
};
let assistant_json = serde_json::to_value(&assistant).unwrap();
let assistant_line = serde_json::json!({
"timestamp": "2024-01-01T00:00:02.000Z",
"type": "response_item",
"payload": assistant_json
});
f.write_all(format!("{assistant_line}\n").as_bytes())
.await
.unwrap();
}
config.experimental_resume = Some(session_file);
let codex = new_conversation_for_server(&server, &codex_home, |cfg| {
// apply resume file
cfg.experimental_resume = config.experimental_resume.clone();
})
.await;
// Submit review request; it must start fresh (no parent history in `input`).
let review_prompt = "Please review only this".to_string();
codex
.submit(Op::Review {
review_request: ReviewRequest {
prompt: review_prompt.clone(),
user_facing_hint: review_prompt.clone(),
},
})
.await
.unwrap();
let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await;
let _closed = wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExitedReviewMode(None))).await;
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Assert the request `input` contains only the single review user message.
let request = &server.received_requests().await.unwrap()[0];
let body = request.body_json::<serde_json::Value>().unwrap();
let expected_input = serde_json::json!([
{
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": review_prompt}]
}
]);
assert_eq!(body["input"], expected_input);
server.verify().await;
}
/// After a review thread finishes, its conversation should not leak into the
/// parent session. A subsequent parent turn must not include any review
/// messages in its request `input`.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn review_history_does_not_leak_into_parent_session() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
// Respond to both the review request and the subsequent parent request.
let sse_raw = r#"[
{"type":"response.output_item.done", "item":{
"type":"message", "role":"assistant",
"content":[{"type":"output_text","text":"review assistant output"}]
}},
{"type":"response.completed", "response": {"id": "__ID__"}}
]"#;
let server = start_responses_server_with_sse(sse_raw, 2).await;
let codex_home = TempDir::new().unwrap();
let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await;
// 1) Run a review turn that produces an assistant message (isolated in child).
codex
.submit(Op::Review {
review_request: ReviewRequest {
prompt: "Start a review".to_string(),
user_facing_hint: "Start a review".to_string(),
},
})
.await
.unwrap();
let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await;
let _closed = wait_for_event(&codex, |ev| {
matches!(ev, EventMsg::ExitedReviewMode(Some(_)))
})
.await;
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// 2) Continue in the parent session; request input must not include any review items.
let followup = "back to parent".to_string();
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: followup.clone(),
}],
})
.await
.unwrap();
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Inspect the second request (parent turn) input contents.
// Parent turns include session initial messages (user_instructions, environment_context).
// Critically, no messages from the review thread should appear.
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2);
let body = requests[1].body_json::<serde_json::Value>().unwrap();
let input = body["input"].as_array().expect("input array");
// Must include the followup as the last item for this turn
let last = input.last().expect("at least one item in input");
assert_eq!(last["role"].as_str().unwrap(), "user");
let last_text = last["content"][0]["text"].as_str().unwrap();
assert_eq!(last_text, followup);
// Ensure no review-thread content leaked into the parent request
let contains_review_prompt = input
.iter()
.any(|msg| msg["content"][0]["text"].as_str().unwrap_or_default() == "Start a review");
let contains_review_assistant = input.iter().any(|msg| {
msg["content"][0]["text"].as_str().unwrap_or_default() == "review assistant output"
});
assert!(
!contains_review_prompt,
"review prompt leaked into parent turn input"
);
assert!(
!contains_review_assistant,
"review assistant output leaked into parent turn input"
);
server.verify().await;
}
/// Start a mock Responses API server and mount the given SSE stream body.
async fn start_responses_server_with_sse(sse_raw: &str, expected_requests: usize) -> MockServer {
let server = MockServer::start().await;
let sse = load_sse_fixture_with_id_from_str(sse_raw, &Uuid::new_v4().to_string());
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(
ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse.clone(), "text/event-stream"),
)
.expect(expected_requests as u64)
.mount(&server)
.await;
server
}
/// Create a conversation configured to talk to the provided mock server.
#[expect(clippy::expect_used)]
async fn new_conversation_for_server<F>(
server: &MockServer,
codex_home: &TempDir,
mutator: F,
) -> Arc<CodexConversation>
where
F: FnOnce(&mut Config),
{
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let mut config = load_default_config_for_test(codex_home);
config.model_provider = model_provider;
mutator(&mut config);
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
conversation_manager
.new_conversation(config)
.await
.expect("create conversation")
.conversation
}

View File

@@ -562,8 +562,6 @@ impl EventProcessor for EventProcessorWithHumanOutput {
EventMsg::ShutdownComplete => return CodexStatus::Shutdown,
EventMsg::ConversationPath(_) => {}
EventMsg::UserMessage(_) => {}
EventMsg::EnteredReviewMode(_) => {}
EventMsg::ExitedReviewMode(_) => {}
}
CodexStatus::Running
}

View File

@@ -137,7 +137,6 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
// Load configuration and determine approval policy
let overrides = ConfigOverrides {
model,
review_model: None,
config_profile,
// This CLI is intended to be headless and has no affordances for asking
// the user for approval.

View File

@@ -31,13 +31,6 @@ install:
rustup show active-toolchain
cargo fetch
# Run `cargo nextest` since it's faster than `cargo test`, though including
# --no-fail-fast is important to ensure all tests are run.
#
# Run `cargo install cargo-nextest` if you don't have it installed.
test:
cargo nextest run --no-fail-fast
# Run the MCP server
mcp-server-run *args:
cargo run -p codex-mcp-server -- "$@"

View File

@@ -18,9 +18,6 @@ use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::ConfigToml;
use codex_core::config::load_config_as_toml;
use codex_core::config_edit::CONFIG_KEY_EFFORT;
use codex_core::config_edit::CONFIG_KEY_MODEL;
use codex_core::config_edit::persist_overrides_and_clear_if_none;
use codex_core::default_client::get_codex_user_agent;
use codex_core::exec::ExecParams;
use codex_core::exec_env::create_env;
@@ -74,8 +71,6 @@ use codex_protocol::mcp_protocol::SendUserMessageResponse;
use codex_protocol::mcp_protocol::SendUserTurnParams;
use codex_protocol::mcp_protocol::SendUserTurnResponse;
use codex_protocol::mcp_protocol::ServerNotification;
use codex_protocol::mcp_protocol::SetDefaultModelParams;
use codex_protocol::mcp_protocol::SetDefaultModelResponse;
use codex_protocol::mcp_protocol::UserInfoResponse;
use codex_protocol::mcp_protocol::UserSavedConfig;
use codex_protocol::models::ContentItem;
@@ -197,9 +192,6 @@ impl CodexMessageProcessor {
ClientRequest::GetUserSavedConfig { request_id } => {
self.get_user_saved_config(request_id).await;
}
ClientRequest::SetDefaultModel { request_id, params } => {
self.set_default_model(request_id, params).await;
}
ClientRequest::GetUserAgent { request_id } => {
self.get_user_agent(request_id).await;
}
@@ -507,40 +499,6 @@ impl CodexMessageProcessor {
self.outgoing.send_response(request_id, response).await;
}
async fn set_default_model(&self, request_id: RequestId, params: SetDefaultModelParams) {
let SetDefaultModelParams {
model,
reasoning_effort,
} = params;
let effort_str = reasoning_effort.map(|effort| effort.to_string());
let overrides: [(&[&str], Option<&str>); 2] = [
(&[CONFIG_KEY_MODEL], model.as_deref()),
(&[CONFIG_KEY_EFFORT], effort_str.as_deref()),
];
match persist_overrides_and_clear_if_none(
&self.config.codex_home,
self.config.active_profile.as_deref(),
&overrides,
)
.await
{
Ok(()) => {
let response = SetDefaultModelResponse {};
self.outgoing.send_response(request_id, response).await;
}
Err(err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to persist overrides: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
}
}
}
async fn exec_one_off_command(&self, request_id: RequestId, params: ExecOneOffCommandParams) {
tracing::debug!("ExecOneOffCommand params: {params:?}");
@@ -1248,7 +1206,6 @@ fn derive_config_from_params(
} = params;
let overrides = ConfigOverrides {
model,
review_model: None,
config_profile: profile,
cwd: cwd.map(PathBuf::from),
approval_policy,

View File

@@ -152,7 +152,6 @@ impl CodexToolCallParam {
// Build the `ConfigOverrides` recognized by codex-core.
let overrides = codex_core::config::ConfigOverrides {
model,
review_model: None,
config_profile: profile,
cwd: cwd.map(PathBuf::from),
approval_policy: approval_policy.map(Into::into),

View File

@@ -279,9 +279,7 @@ async fn run_codex_tool_session_inner(
| EventMsg::TurnAborted(_)
| EventMsg::ConversationPath(_)
| EventMsg::UserMessage(_)
| EventMsg::ShutdownComplete
| EventMsg::EnteredReviewMode(_)
| EventMsg::ExitedReviewMode(_) => {
| EventMsg::ShutdownComplete => {
// For now, we do not do anything extra for these
// events. Note that
// send(codex_event_to_notification(&event)) above has

View File

@@ -280,7 +280,7 @@ mod tests {
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: conversation_id,
model: "gpt-4o".to_string(),
reasoning_effort: Some(ReasoningEffort::default()),
reasoning_effort: ReasoningEffort::default(),
history_log_id: 1,
history_entry_count: 1000,
initial_messages: None,
@@ -314,7 +314,7 @@ mod tests {
let session_configured_event = SessionConfiguredEvent {
session_id: conversation_id,
model: "gpt-4o".to_string(),
reasoning_effort: Some(ReasoningEffort::default()),
reasoning_effort: ReasoningEffort::default(),
history_log_id: 1,
history_entry_count: 1000,
initial_messages: None,

View File

@@ -24,7 +24,6 @@ use codex_protocol::mcp_protocol::RemoveConversationListenerParams;
use codex_protocol::mcp_protocol::ResumeConversationParams;
use codex_protocol::mcp_protocol::SendUserMessageParams;
use codex_protocol::mcp_protocol::SendUserTurnParams;
use codex_protocol::mcp_protocol::SetDefaultModelParams;
use mcp_types::CallToolRequestParams;
use mcp_types::ClientCapabilities;
@@ -302,15 +301,6 @@ impl McpProcess {
self.send_request("userInfo", None).await
}
/// Send a `setDefaultModel` JSON-RPC request.
pub async fn send_set_default_model_request(
&mut self,
params: SetDefaultModelParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("setDefaultModel", params).await
}
/// Send a `listConversations` JSON-RPC request.
pub async fn send_list_conversations_request(
&mut self,

View File

@@ -320,7 +320,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() {
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::new_read_only_policy(),
model: "mock-model".to_string(),
effort: Some(ReasoningEffort::Medium),
effort: ReasoningEffort::Medium,
summary: ReasoningSummary::Auto,
})
.await

View File

@@ -9,6 +9,5 @@ mod interrupt;
mod list_resume;
mod login;
mod send_message;
mod set_default_model;
mod user_agent;
mod user_info;

View File

@@ -1,76 +0,0 @@
use std::path::Path;
use codex_core::config::ConfigToml;
use codex_protocol::mcp_protocol::SetDefaultModelParams;
use codex_protocol::mcp_protocol::SetDefaultModelResponse;
use mcp_test_support::McpProcess;
use mcp_test_support::to_response;
use mcp_types::JSONRPCResponse;
use mcp_types::RequestId;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn set_default_model_persists_overrides() {
let codex_home = TempDir::new().expect("create tempdir");
create_config_toml(codex_home.path()).expect("write config.toml");
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let params = SetDefaultModelParams {
model: Some("gpt-4.1".to_string()),
reasoning_effort: None,
};
let request_id = mcp
.send_set_default_model_request(params)
.await
.expect("send setDefaultModel");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await
.expect("setDefaultModel timeout")
.expect("setDefaultModel response");
let _: SetDefaultModelResponse =
to_response(resp).expect("deserialize setDefaultModel response");
let config_path = codex_home.path().join("config.toml");
let config_contents = tokio::fs::read_to_string(&config_path)
.await
.expect("read config.toml");
let config_toml: ConfigToml = toml::from_str(&config_contents).expect("parse config.toml");
assert_eq!(
ConfigToml {
model: Some("gpt-4.1".to_string()),
model_reasoning_effort: None,
..Default::default()
},
config_toml,
);
}
// Helper to create a config.toml; mirrors create_conversation.rs
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
r#"
model = "gpt-5"
model_reasoning_effort = "medium"
"#,
)
}

View File

@@ -40,7 +40,6 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
codex_protocol::mcp_protocol::ApplyPatchApprovalResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::ExecCommandApprovalResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::GetUserSavedConfigResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::SetDefaultModelResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::GetUserAgentResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::UserInfoResponse::export_all_to(out_dir)?;

View File

@@ -153,11 +153,6 @@ pub enum ClientRequest {
#[serde(rename = "id")]
request_id: RequestId,
},
SetDefaultModel {
#[serde(rename = "id")]
request_id: RequestId,
params: SetDefaultModelParams,
},
GetUserAgent {
#[serde(rename = "id")]
request_id: RequestId,
@@ -223,8 +218,7 @@ pub struct NewConversationResponse {
pub conversation_id: ConversationId,
pub model: String,
/// Note this could be ignored by the model.
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffort>,
pub reasoning_effort: ReasoningEffort,
pub rollout_path: PathBuf,
}
@@ -422,22 +416,6 @@ pub struct GetUserSavedConfigResponse {
pub config: UserSavedConfig,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetDefaultModelParams {
/// If set to None, this means `model` should be cleared in config.toml.
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
/// If set to None, this means `model_reasoning_effort` should be cleared
/// in config.toml.
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffort>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetDefaultModelResponse {}
/// UserSavedConfig contains a subset of the config. It is meant to expose mcp
/// client-configurable settings that can be specified in the NewConversation
/// and SendUserTurn requests.
@@ -527,8 +505,7 @@ pub struct SendUserTurnParams {
pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy,
pub model: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub effort: Option<ReasoningEffort>,
pub effort: ReasoningEffort,
pub summary: ReasoningSummary,
}

View File

@@ -15,7 +15,6 @@ use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::custom_prompts::CustomPrompt;
use crate::mcp_protocol::ConversationId;
use crate::message_history::HistoryEntry;
use crate::models::ContentItem;
use crate::models::ResponseItem;
use crate::num_format::format_with_separators;
use crate::parse_command::ParsedCommand;
@@ -82,8 +81,7 @@ pub enum Op {
model: String,
/// Will only be honored if the model is configured to use reasoning.
#[serde(skip_serializing_if = "Option::is_none")]
effort: Option<ReasoningEffortConfig>,
effort: ReasoningEffortConfig,
/// Will only be honored if the model is configured to use reasoning.
summary: ReasoningSummaryConfig,
@@ -113,11 +111,8 @@ pub enum Op {
model: Option<String>,
/// Updated reasoning effort (honored only for reasoning-capable models).
///
/// Use `Some(Some(_))` to set a specific effort, `Some(None)` to clear
/// the effort, or `None` to leave the existing value unchanged.
#[serde(skip_serializing_if = "Option::is_none")]
effort: Option<Option<ReasoningEffortConfig>>,
effort: Option<ReasoningEffortConfig>,
/// Updated reasoning summary preference (honored only for reasoning-capable models).
#[serde(skip_serializing_if = "Option::is_none")]
@@ -167,10 +162,6 @@ pub enum Op {
/// The agent will use its existing context (either conversation history or previous response id)
/// to generate a summary which will be returned as an AgentMessage event.
Compact,
/// Request a code review from the agent.
Review { review_request: ReviewRequest },
/// Request to shut down codex instance.
Shutdown,
}
@@ -509,12 +500,6 @@ pub enum EventMsg {
ShutdownComplete,
ConversationPath(ConversationPathResponseEvent),
/// Entered review mode.
EnteredReviewMode(ReviewRequest),
/// Exited review mode with an optional final result to apply.
ExitedReviewMode(Option<ReviewOutputEvent>),
}
// Individual event payload types matching each `EventMsg` variant.
@@ -723,12 +708,12 @@ where
let (_role, message) = value;
let message = message.as_ref();
let trimmed = message.trim();
if starts_with_ignore_ascii_case(trimmed, ENVIRONMENT_CONTEXT_OPEN_TAG)
&& ends_with_ignore_ascii_case(trimmed, ENVIRONMENT_CONTEXT_CLOSE_TAG)
if trimmed.starts_with(ENVIRONMENT_CONTEXT_OPEN_TAG)
&& trimmed.ends_with(ENVIRONMENT_CONTEXT_CLOSE_TAG)
{
InputMessageKind::EnvironmentContext
} else if starts_with_ignore_ascii_case(trimmed, USER_INSTRUCTIONS_OPEN_TAG)
&& ends_with_ignore_ascii_case(trimmed, USER_INSTRUCTIONS_CLOSE_TAG)
} else if trimmed.starts_with(USER_INSTRUCTIONS_OPEN_TAG)
&& trimmed.ends_with(USER_INSTRUCTIONS_CLOSE_TAG)
{
InputMessageKind::UserInstructions
} else {
@@ -737,26 +722,6 @@ where
}
}
fn starts_with_ignore_ascii_case(text: &str, prefix: &str) -> bool {
let text_bytes = text.as_bytes();
let prefix_bytes = prefix.as_bytes();
text_bytes.len() >= prefix_bytes.len()
&& text_bytes
.iter()
.zip(prefix_bytes.iter())
.all(|(a, b)| a.eq_ignore_ascii_case(b))
}
fn ends_with_ignore_ascii_case(text: &str, suffix: &str) -> bool {
let text_bytes = text.as_bytes();
let suffix_bytes = suffix.as_bytes();
text_bytes.len() >= suffix_bytes.len()
&& text_bytes[text_bytes.len() - suffix_bytes.len()..]
.iter()
.zip(suffix_bytes.iter())
.all(|(a, b)| a.eq_ignore_ascii_case(b))
}
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
pub struct AgentMessageDeltaEvent {
pub delta: String,
@@ -863,7 +828,26 @@ impl InitialHistory {
InitialHistory::Forked(items) => items.clone(),
}
}
pub fn get_response_items(&self) -> Vec<ResponseItem> {
match self {
InitialHistory::New => Vec::new(),
InitialHistory::Resumed(resumed) => resumed
.history
.iter()
.filter_map(|ri| match ri {
RolloutItem::ResponseItem(item) => Some(item.clone()),
_ => None,
})
.collect(),
InitialHistory::Forked(items) => items
.iter()
.filter_map(|ri| match ri {
RolloutItem::ResponseItem(item) => Some(item.clone()),
_ => None,
})
.collect(),
}
}
pub fn get_event_msgs(&self) -> Option<Vec<EventMsg>> {
match self {
InitialHistory::New => None,
@@ -923,26 +907,13 @@ pub struct CompactedItem {
pub message: String,
}
impl From<CompactedItem> for ResponseItem {
fn from(value: CompactedItem) -> Self {
ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: value.message,
}],
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug, TS)]
pub struct TurnContextItem {
pub cwd: PathBuf,
pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy,
pub model: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub effort: Option<ReasoningEffortConfig>,
pub effort: ReasoningEffortConfig,
pub summary: ReasoningSummaryConfig,
}
@@ -966,57 +937,6 @@ pub struct GitInfo {
pub repository_url: Option<String>,
}
/// Review request sent to the review session.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, TS)]
pub struct ReviewRequest {
pub prompt: String,
pub user_facing_hint: String,
}
/// Structured review result produced by a child review session.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, TS)]
pub struct ReviewOutputEvent {
pub findings: Vec<ReviewFinding>,
pub overall_correctness: String,
pub overall_explanation: String,
pub overall_confidence_score: f32,
}
impl Default for ReviewOutputEvent {
fn default() -> Self {
Self {
findings: Vec::new(),
overall_correctness: String::default(),
overall_explanation: String::default(),
overall_confidence_score: 0.0,
}
}
}
/// A single review finding describing an observed issue or recommendation.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, TS)]
pub struct ReviewFinding {
pub title: String,
pub body: String,
pub confidence_score: f32,
pub priority: i32,
pub code_location: ReviewCodeLocation,
}
/// Location of the code related to a review finding.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, TS)]
pub struct ReviewCodeLocation {
pub absolute_file_path: PathBuf,
pub line_range: ReviewLineRange,
}
/// Inclusive line range in a file associated with the finding.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, TS)]
pub struct ReviewLineRange {
pub start: u32,
pub end: u32,
}
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
pub struct ExecCommandBeginEvent {
/// Identifier so this can be paired with the ExecCommandEnd event.
@@ -1162,8 +1082,7 @@ pub struct SessionConfiguredEvent {
pub model: String,
/// The effort the model is putting into reasoning about the user's request.
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffortConfig>,
pub reasoning_effort: ReasoningEffortConfig,
/// Identifier of the history log file (inode on Unix, 0 otherwise).
pub history_log_id: u64,
@@ -1253,7 +1172,7 @@ mod tests {
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: conversation_id,
model: "codex-mini-latest".to_string(),
reasoning_effort: Some(ReasoningEffortConfig::default()),
reasoning_effort: ReasoningEffortConfig::default(),
history_log_id: 0,
history_entry_count: 0,
initial_messages: None,

View File

@@ -3,7 +3,6 @@ use crate::app_event::AppEvent;
use crate::app_event_sender::AppEventSender;
use crate::chatwidget::ChatWidget;
use crate::file_search::FileSearchManager;
use crate::history_cell::HistoryCell;
use crate::pager_overlay::Overlay;
use crate::resume_picker::ResumeSelection;
use crate::tui;
@@ -15,7 +14,6 @@ use codex_core::config::Config;
use codex_core::config::persist_model_selection;
use codex_core::model_family::find_family_for_model;
use codex_core::protocol::TokenUsage;
use codex_core::protocol_config_types::ReasoningEffort as ReasoningEffortConfig;
use color_eyre::eyre::Result;
use color_eyre::eyre::WrapErr;
use crossterm::event::KeyCode;
@@ -47,7 +45,7 @@ pub(crate) struct App {
pub(crate) file_search: FileSearchManager,
pub(crate) transcript_cells: Vec<Arc<dyn HistoryCell>>,
pub(crate) transcript_lines: Vec<Line<'static>>,
// Pager overlay state (Transcript or Static like Diff)
pub(crate) overlay: Option<Overlay>,
@@ -132,7 +130,7 @@ impl App {
model_saved_to_global: false,
file_search,
enhanced_keys_supported,
transcript_cells: Vec::new(),
transcript_lines: Vec::new(),
overlay: None,
deferred_history_lines: Vec::new(),
has_emitted_history_lines: false,
@@ -214,12 +212,15 @@ impl App {
tui.frame_requester().schedule_frame();
}
AppEvent::InsertHistoryCell(cell) => {
let cell: Arc<dyn HistoryCell> = cell.into();
let mut cell_transcript = cell.transcript_lines();
if !cell.is_stream_continuation() && !self.transcript_lines.is_empty() {
cell_transcript.insert(0, Line::from(""));
}
if let Some(Overlay::Transcript(t)) = &mut self.overlay {
t.insert_cell(cell.clone());
t.insert_lines(cell_transcript.clone());
tui.frame_requester().schedule_frame();
}
self.transcript_cells.push(cell.clone());
self.transcript_lines.extend(cell_transcript.clone());
let mut display = cell.display_lines(tui.terminal.last_known_screen_size.width);
if !display.is_empty() {
// Only insert a separating blank line for new cells that are not
@@ -296,7 +297,7 @@ impl App {
self.chat_widget.apply_file_search_result(query, matches);
}
AppEvent::UpdateReasoningEffort(effort) => {
self.on_update_reasoning_effort(effort);
self.chat_widget.set_reasoning_effort(effort);
}
AppEvent::UpdateModel(model) => {
self.chat_widget.set_model(model.clone());
@@ -325,29 +326,13 @@ impl App {
fn show_model_save_hint(&mut self) {
let model = self.config.model.clone();
if self.active_profile.is_some() {
self.chat_widget.add_info_message(
format!("Model changed to {model} for the current session"),
Some("(ctrl+s to set as profile default)".to_string()),
);
self.chat_widget.add_info_message(format!(
"Model switched to {model}. Press Ctrl+S to save it for this profile, then press Ctrl+S again to set it as your global default."
));
} else {
self.chat_widget.add_info_message(
format!("Model changed to {model} for the current session"),
Some("(ctrl+s to set as default)".to_string()),
);
}
}
fn on_update_reasoning_effort(&mut self, effort: Option<ReasoningEffortConfig>) {
let changed = self.config.model_reasoning_effort != effort;
self.chat_widget.set_reasoning_effort(effort);
self.config.model_reasoning_effort = effort;
if changed {
let show_hint = self.model_saved_to_profile || self.model_saved_to_global;
self.model_saved_to_profile = false;
self.model_saved_to_global = false;
if show_hint {
self.show_model_save_hint();
}
self.chat_widget.add_info_message(format!(
"Model switched to {model}. Press Ctrl+S to save it as your global default."
));
}
}
@@ -376,13 +361,14 @@ impl App {
match scope {
SaveScope::Profile(profile) => {
match persist_model_selection(&codex_home, Some(profile), &model, effort).await {
match persist_model_selection(&codex_home, Some(profile), &model, Some(effort))
.await
{
Ok(()) => {
self.model_saved_to_profile = true;
self.chat_widget.add_info_message(
format!("Profile model changed to {model} for all sessions"),
Some("(view global config in config.toml)".to_string()),
);
self.chat_widget.add_info_message(format!(
"Saved model {model} ({effort}) for profile `{profile}`. Press Ctrl+S again to make this your global default."
));
}
Err(err) => {
tracing::error!(
@@ -396,13 +382,12 @@ impl App {
}
}
SaveScope::Global => {
match persist_model_selection(&codex_home, None, &model, effort).await {
match persist_model_selection(&codex_home, None, &model, Some(effort)).await {
Ok(()) => {
self.model_saved_to_global = true;
self.chat_widget.add_info_message(
format!("Default model changed to {model} for all sessions"),
Some("(view global config in config.toml)".to_string()),
)
self.chat_widget.add_info_message(format!(
"Saved model {model} ({effort}) as your global default."
));
}
Err(err) => {
tracing::error!(
@@ -419,7 +404,6 @@ impl App {
self.chat_widget.add_info_message(
"Model preference already saved globally; no further action needed."
.to_string(),
None,
);
}
}
@@ -435,7 +419,7 @@ impl App {
} => {
// Enter alternate screen and set viewport to full size.
let _ = tui.enter_alt_screen();
self.overlay = Some(Overlay::new_transcript(self.transcript_cells.clone()));
self.overlay = Some(Overlay::new_transcript(self.transcript_lines.clone()));
tui.frame_requester().schedule_frame();
}
KeyEvent {
@@ -468,7 +452,7 @@ impl App {
kind: KeyEventKind::Press,
..
} if self.backtrack.primed
&& self.backtrack.nth_user_message != usize::MAX
&& self.backtrack.count > 0
&& self.chat_widget.composer_is_empty() =>
{
// Delegate to helper for clarity; preserves behavior.
@@ -492,66 +476,3 @@ impl App {
};
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::app_backtrack::BacktrackState;
use crate::chatwidget::tests::make_chatwidget_manual_with_sender;
use crate::file_search::FileSearchManager;
use codex_core::CodexAuth;
use codex_core::ConversationManager;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
fn make_test_app() -> App {
let (chat_widget, app_event_tx, _rx, _op_rx) = make_chatwidget_manual_with_sender();
let config = chat_widget.config_ref().clone();
let server = Arc::new(ConversationManager::with_auth(CodexAuth::from_api_key(
"Test API Key",
)));
let file_search = FileSearchManager::new(config.cwd.clone(), app_event_tx.clone());
App {
server,
app_event_tx,
chat_widget,
config,
active_profile: None,
model_saved_to_profile: false,
model_saved_to_global: false,
file_search,
transcript_cells: Vec::new(),
overlay: None,
deferred_history_lines: Vec::new(),
has_emitted_history_lines: false,
enhanced_keys_supported: false,
commit_anim_running: Arc::new(AtomicBool::new(false)),
backtrack: BacktrackState::default(),
}
}
#[test]
fn update_reasoning_effort_updates_config_and_resets_flags() {
let mut app = make_test_app();
app.model_saved_to_profile = true;
app.model_saved_to_global = true;
app.config.model_reasoning_effort = Some(ReasoningEffortConfig::Medium);
app.chat_widget
.set_reasoning_effort(Some(ReasoningEffortConfig::Medium));
app.on_update_reasoning_effort(Some(ReasoningEffortConfig::High));
assert_eq!(
app.config.model_reasoning_effort,
Some(ReasoningEffortConfig::High)
);
assert_eq!(
app.chat_widget.config_ref().model_reasoning_effort,
Some(ReasoningEffortConfig::High)
);
assert!(!app.model_saved_to_profile);
assert!(!app.model_saved_to_global);
}
}

View File

@@ -1,7 +1,7 @@
use std::path::PathBuf;
use crate::app::App;
use crate::history_cell::UserHistoryCell;
use crate::backtrack_helpers;
use crate::pager_overlay::Overlay;
use crate::tui;
use crate::tui::TuiEvent;
@@ -19,11 +19,11 @@ pub(crate) struct BacktrackState {
pub(crate) primed: bool,
/// Session id of the base conversation to fork from.
pub(crate) base_id: Option<ConversationId>,
/// Index in the transcript of the last user message.
pub(crate) nth_user_message: usize,
/// Current step count (Nth last user message).
pub(crate) count: usize,
/// True when the transcript overlay is showing a backtrack preview.
pub(crate) overlay_preview_active: bool,
/// Pending fork request: (base_id, nth_user_message, prefill).
/// Pending fork request: (base_id, drop_count, prefill).
pub(crate) pending: Option<(ConversationId, usize, String)>,
}
@@ -96,9 +96,9 @@ impl App {
&mut self,
prefill: String,
base_id: ConversationId,
nth_user_message: usize,
drop_last_messages: usize,
) {
self.backtrack.pending = Some((base_id, nth_user_message, prefill));
self.backtrack.pending = Some((base_id, drop_last_messages, prefill));
self.app_event_tx.send(crate::app_event::AppEvent::CodexOp(
codex_core::protocol::Op::GetPath,
));
@@ -107,7 +107,7 @@ impl App {
/// Open transcript overlay (enters alternate screen and shows full transcript).
pub(crate) fn open_transcript_overlay(&mut self, tui: &mut tui::Tui) {
let _ = tui.enter_alt_screen();
self.overlay = Some(Overlay::new_transcript(self.transcript_cells.clone()));
self.overlay = Some(Overlay::new_transcript(self.transcript_lines.clone()));
tui.frame_requester().schedule_frame();
}
@@ -130,17 +130,15 @@ impl App {
/// Re-render the full transcript into the terminal scrollback in one call.
/// Useful when switching sessions to ensure prior history remains visible.
pub(crate) fn render_transcript_once(&mut self, tui: &mut tui::Tui) {
if !self.transcript_cells.is_empty() {
for cell in &self.transcript_cells {
tui.insert_history_lines(cell.transcript_lines());
}
if !self.transcript_lines.is_empty() {
tui.insert_history_lines(self.transcript_lines.clone());
}
}
/// Initialize backtrack state and show composer hint.
fn prime_backtrack(&mut self) {
self.backtrack.primed = true;
self.backtrack.nth_user_message = usize::MAX;
self.backtrack.count = 0;
self.backtrack.base_id = self.chat_widget.conversation_id();
self.chat_widget.show_esc_backtrack_hint();
}
@@ -159,44 +157,51 @@ impl App {
self.backtrack.primed = true;
self.backtrack.base_id = self.chat_widget.conversation_id();
self.backtrack.overlay_preview_active = true;
let last_user_cell_position = self
.transcript_cells
.iter()
.filter_map(|c| c.as_any().downcast_ref::<UserHistoryCell>())
.count() as i64
- 1;
if last_user_cell_position >= 0 {
self.apply_backtrack_selection(last_user_cell_position as usize);
}
let sel = self.compute_backtrack_selection(tui, 1);
self.apply_backtrack_selection(sel);
tui.frame_requester().schedule_frame();
}
/// Step selection to the next older user message and update overlay.
fn step_backtrack_and_highlight(&mut self, tui: &mut tui::Tui) {
let last_user_cell_position = self
.transcript_cells
.iter()
.filter(|c| c.as_any().is::<UserHistoryCell>())
.take(self.backtrack.nth_user_message)
.count()
.saturating_sub(1);
self.apply_backtrack_selection(last_user_cell_position);
let next = self.backtrack.count.saturating_add(1);
let sel = self.compute_backtrack_selection(tui, next);
self.apply_backtrack_selection(sel);
tui.frame_requester().schedule_frame();
}
/// Compute normalized target, scroll offset, and highlight for requested step.
fn compute_backtrack_selection(
&self,
tui: &tui::Tui,
requested_n: usize,
) -> (usize, Option<usize>, Option<(usize, usize)>) {
let nth = backtrack_helpers::normalize_backtrack_n(&self.transcript_lines, requested_n);
let header_idx =
backtrack_helpers::find_nth_last_user_header_index(&self.transcript_lines, nth);
let offset = header_idx.map(|idx| {
backtrack_helpers::wrapped_offset_before(
&self.transcript_lines,
idx,
tui.terminal.viewport_area.width,
)
});
let hl = backtrack_helpers::highlight_range_for_nth_last_user(&self.transcript_lines, nth);
(nth, offset, hl)
}
/// Apply a computed backtrack selection to the overlay and internal counter.
fn apply_backtrack_selection(&mut self, nth_user_message: usize) {
self.backtrack.nth_user_message = nth_user_message;
fn apply_backtrack_selection(
&mut self,
selection: (usize, Option<usize>, Option<(usize, usize)>),
) {
let (nth, offset, hl) = selection;
self.backtrack.count = nth;
if let Some(Overlay::Transcript(t)) = &mut self.overlay {
let cell = self
.transcript_cells
.iter()
.enumerate()
.filter(|(_, c)| c.as_any().is::<UserHistoryCell>())
.nth(nth_user_message);
if let Some((idx, _)) = cell {
t.set_highlight_cell(Some(idx));
if let Some(off) = offset {
t.set_scroll_offset(off);
}
t.set_highlight_range(hl);
}
}
@@ -214,19 +219,13 @@ impl App {
/// Handle Enter in overlay backtrack preview: confirm selection and reset state.
fn overlay_confirm_backtrack(&mut self, tui: &mut tui::Tui) {
let nth_user_message = self.backtrack.nth_user_message;
if let Some(base_id) = self.backtrack.base_id {
let user_cells = self
.transcript_cells
.iter()
.filter_map(|c| c.as_any().downcast_ref::<UserHistoryCell>())
.collect::<Vec<_>>();
let prefill = user_cells
.get(nth_user_message)
.map(|c| c.message.clone())
.unwrap_or_default();
let drop_last_messages = self.backtrack.count;
let prefill =
backtrack_helpers::nth_last_user_text(&self.transcript_lines, drop_last_messages)
.unwrap_or_default();
self.close_transcript_overlay(tui);
self.request_backtrack(prefill, base_id, nth_user_message);
self.request_backtrack(prefill, base_id, drop_last_messages);
}
self.reset_backtrack_state();
}
@@ -245,15 +244,11 @@ impl App {
/// Computes the prefill from the selected user message and requests history.
pub(crate) fn confirm_backtrack_from_main(&mut self) {
if let Some(base_id) = self.backtrack.base_id {
let prefill = self
.transcript_cells
.iter()
.filter(|c| c.as_any().is::<UserHistoryCell>())
.nth(self.backtrack.nth_user_message)
.and_then(|c| c.as_any().downcast_ref::<UserHistoryCell>())
.map(|c| c.message.clone())
.unwrap_or_default();
self.request_backtrack(prefill, base_id, self.backtrack.nth_user_message);
let drop_last_messages = self.backtrack.count;
let prefill =
backtrack_helpers::nth_last_user_text(&self.transcript_lines, drop_last_messages)
.unwrap_or_default();
self.request_backtrack(prefill, base_id, drop_last_messages);
}
self.reset_backtrack_state();
}
@@ -262,7 +257,7 @@ impl App {
pub(crate) fn reset_backtrack_state(&mut self) {
self.backtrack.primed = false;
self.backtrack.base_id = None;
self.backtrack.nth_user_message = usize::MAX;
self.backtrack.count = 0;
// In case a hint is somehow still visible (e.g., race with overlay open/close).
self.chat_widget.clear_esc_backtrack_hint();
}
@@ -276,9 +271,9 @@ impl App {
) -> Result<()> {
if let Some((base_id, _, _)) = self.backtrack.pending.as_ref()
&& ev.conversation_id == *base_id
&& let Some((_, nth_user_message, prefill)) = self.backtrack.pending.take()
&& let Some((_, drop_count, prefill)) = self.backtrack.pending.take()
{
self.fork_and_switch_to_new_conversation(tui, ev, nth_user_message, prefill)
self.fork_and_switch_to_new_conversation(tui, ev, drop_count, prefill)
.await;
}
Ok(())
@@ -289,17 +284,17 @@ impl App {
&mut self,
tui: &mut tui::Tui,
ev: ConversationPathResponseEvent,
nth_user_message: usize,
drop_count: usize,
prefill: String,
) {
let cfg = self.chat_widget.config_ref().clone();
// Perform the fork via a thin wrapper for clarity/testability.
let result = self
.perform_fork(ev.path.clone(), nth_user_message, cfg.clone())
.perform_fork(ev.path.clone(), drop_count, cfg.clone())
.await;
match result {
Ok(new_conv) => {
self.install_forked_conversation(tui, cfg, new_conv, nth_user_message, &prefill)
self.install_forked_conversation(tui, cfg, new_conv, drop_count, &prefill)
}
Err(e) => tracing::error!("error forking conversation: {e:#}"),
}
@@ -309,12 +304,10 @@ impl App {
async fn perform_fork(
&self,
path: PathBuf,
nth_user_message: usize,
drop_count: usize,
cfg: codex_core::config::Config,
) -> codex_core::error::Result<codex_core::NewConversation> {
self.server
.fork_conversation(nth_user_message, cfg, path)
.await
self.server.fork_conversation(drop_count, cfg, path).await
}
/// Install a forked conversation into the ChatWidget and update UI to reflect selection.
@@ -323,7 +316,7 @@ impl App {
tui: &mut tui::Tui,
cfg: codex_core::config::Config,
new_conv: codex_core::NewConversation,
nth_user_message: usize,
drop_count: usize,
prefill: &str,
) {
let conv = new_conv.conversation;
@@ -339,7 +332,7 @@ impl App {
self.chat_widget =
crate::chatwidget::ChatWidget::new_from_existing(init, conv, session_configured);
// Trim transcript up to the selected user message and re-render it.
self.trim_transcript_for_backtrack(nth_user_message);
self.trim_transcript_for_backtrack(drop_count);
self.render_transcript_once(tui);
if !prefill.is_empty() {
self.chat_widget.set_composer_text(prefill.to_string());
@@ -347,21 +340,14 @@ impl App {
tui.frame_requester().schedule_frame();
}
/// Trim transcript_cells to preserve only content up to the selected user message.
fn trim_transcript_for_backtrack(&mut self, nth_user_message: usize) {
let cut_idx = self
.transcript_cells
.iter()
.enumerate()
.filter_map(|(idx, cell)| {
if cell.as_any().is::<UserHistoryCell>() {
Some(idx)
} else {
None
}
})
.nth(nth_user_message - 1)
.unwrap_or(self.transcript_cells.len());
self.transcript_cells.truncate(cut_idx);
/// Trim transcript_lines to preserve only content up to the selected user message.
fn trim_transcript_for_backtrack(&mut self, drop_count: usize) {
if let Some(cut_idx) =
backtrack_helpers::find_nth_last_user_header_index(&self.transcript_lines, drop_count)
{
self.transcript_lines.truncate(cut_idx);
} else {
self.transcript_lines.clear();
}
}
}

View File

@@ -46,7 +46,7 @@ pub(crate) enum AppEvent {
CommitTick,
/// Update the current reasoning effort in the running app and widget.
UpdateReasoningEffort(Option<ReasoningEffort>),
UpdateReasoningEffort(ReasoningEffort),
/// Update the current model slug in the running app and widget.
UpdateModel(String),

View File

@@ -0,0 +1,153 @@
use ratatui::text::Line;
/// Convenience: compute the highlight range for the Nth last user message.
pub(crate) fn highlight_range_for_nth_last_user(
lines: &[Line<'_>],
n: usize,
) -> Option<(usize, usize)> {
let header = find_nth_last_user_header_index(lines, n)?;
Some(highlight_range_from_header(lines, header))
}
/// Compute the wrapped display-line offset before `header_idx`, for a given width.
pub(crate) fn wrapped_offset_before(lines: &[Line<'_>], header_idx: usize, width: u16) -> usize {
let before = &lines[0..header_idx];
crate::wrapping::word_wrap_lines(before, width as usize).len()
}
/// Find the header index for the Nth last user message in the transcript.
/// Returns `None` if `n == 0` or there are fewer than `n` user messages.
pub(crate) fn find_nth_last_user_header_index(lines: &[Line<'_>], n: usize) -> Option<usize> {
if n == 0 {
return None;
}
let mut found = 0usize;
for (idx, line) in lines.iter().enumerate().rev() {
let content: String = line
.spans
.iter()
.map(|s| s.content.as_ref())
.collect::<Vec<_>>()
.join("");
if content.trim() == "user" {
found += 1;
if found == n {
return Some(idx);
}
}
}
None
}
/// Normalize a requested backtrack step `n` against the available user messages.
/// - Returns `0` if there are no user messages.
/// - Returns `n` if the Nth last user message exists.
/// - Otherwise wraps to `1` (the most recent user message).
pub(crate) fn normalize_backtrack_n(lines: &[Line<'_>], n: usize) -> usize {
if n == 0 {
return 0;
}
if find_nth_last_user_header_index(lines, n).is_some() {
return n;
}
if find_nth_last_user_header_index(lines, 1).is_some() {
1
} else {
0
}
}
/// Extract the text content of the Nth last user message.
/// The message body is considered to be the lines following the "user" header
/// until the first blank line.
pub(crate) fn nth_last_user_text(lines: &[Line<'_>], n: usize) -> Option<String> {
let header_idx = find_nth_last_user_header_index(lines, n)?;
extract_message_text_after_header(lines, header_idx)
}
/// Extract message text starting after `header_idx` until the first blank line.
fn extract_message_text_after_header(lines: &[Line<'_>], header_idx: usize) -> Option<String> {
let start = header_idx + 1;
let mut out: Vec<String> = Vec::new();
for line in lines.iter().skip(start) {
let is_blank = line
.spans
.iter()
.all(|s| s.content.as_ref().trim().is_empty());
if is_blank {
break;
}
let text: String = line
.spans
.iter()
.map(|s| s.content.as_ref())
.collect::<Vec<_>>()
.join("");
out.push(text);
}
if out.is_empty() {
None
} else {
Some(out.join("\n"))
}
}
/// Given a header index, return the inclusive range for the message block
/// [header_idx, end) where end is the first blank line after the header or the
/// end of the transcript.
fn highlight_range_from_header(lines: &[Line<'_>], header_idx: usize) -> (usize, usize) {
let mut end = header_idx + 1;
while end < lines.len() {
let is_blank = lines[end]
.spans
.iter()
.all(|s| s.content.as_ref().trim().is_empty());
if is_blank {
break;
}
end += 1;
}
(header_idx, end)
}
#[cfg(test)]
mod tests {
use super::*;
fn line(s: &str) -> Line<'static> {
s.to_string().into()
}
fn transcript_with_users(count: usize) -> Vec<Line<'static>> {
// Build a transcript with `count` user messages, each followed by one body line and a blank line.
let mut v = Vec::new();
for i in 0..count {
v.push(line("user"));
v.push(line(&format!("message {i}")));
v.push(line(""));
}
v
}
#[test]
fn normalize_wraps_to_one_when_past_oldest() {
let lines = transcript_with_users(2);
assert_eq!(normalize_backtrack_n(&lines, 1), 1);
assert_eq!(normalize_backtrack_n(&lines, 2), 2);
// Requesting 3rd when only 2 exist wraps to 1
assert_eq!(normalize_backtrack_n(&lines, 3), 1);
}
#[test]
fn normalize_returns_zero_when_no_user_messages() {
let lines = transcript_with_users(0);
assert_eq!(normalize_backtrack_n(&lines, 1), 0);
assert_eq!(normalize_backtrack_n(&lines, 5), 0);
}
#[test]
fn normalize_keeps_valid_n() {
let lines = transcript_with_users(3);
assert_eq!(normalize_backtrack_n(&lines, 2), 2);
}
}

View File

@@ -38,7 +38,6 @@ use crate::bottom_pane::textarea::TextAreaState;
use crate::clipboard_paste::normalize_pasted_path;
use crate::clipboard_paste::pasted_image_format;
use crate::key_hint;
use crate::ui_consts::LIVE_PREFIX_COLS;
use codex_file_search::FileMatch;
use std::cell::RefCell;
use std::collections::HashMap;
@@ -96,10 +95,6 @@ enum ActivePopup {
File(FileSearchPopup),
}
const FOOTER_HINT_HEIGHT: u16 = 1;
const FOOTER_SPACING_HEIGHT: u16 = 1;
const FOOTER_HEIGHT_WITH_HINT: u16 = FOOTER_HINT_HEIGHT + FOOTER_SPACING_HEIGHT;
impl ChatComposer {
pub fn new(
has_input_focus: bool,
@@ -137,28 +132,25 @@ impl ChatComposer {
}
pub fn desired_height(&self, width: u16) -> u16 {
// Leave 1 column for the left border and 1 column for left padding
self.textarea
.desired_height(width.saturating_sub(LIVE_PREFIX_COLS))
self.textarea.desired_height(width - 1)
+ match &self.active_popup {
ActivePopup::None => FOOTER_HEIGHT_WITH_HINT,
ActivePopup::None => 1u16,
ActivePopup::Command(c) => c.calculate_required_height(),
ActivePopup::File(c) => c.calculate_required_height(),
}
}
pub fn cursor_pos(&self, area: Rect) -> Option<(u16, u16)> {
let popup_constraint = match &self.active_popup {
ActivePopup::Command(popup) => Constraint::Max(popup.calculate_required_height()),
ActivePopup::File(popup) => Constraint::Max(popup.calculate_required_height()),
ActivePopup::None => Constraint::Max(FOOTER_HEIGHT_WITH_HINT),
let popup_height = match &self.active_popup {
ActivePopup::Command(popup) => popup.calculate_required_height(),
ActivePopup::File(popup) => popup.calculate_required_height(),
ActivePopup::None => 1,
};
let [textarea_rect, _] =
Layout::vertical([Constraint::Min(1), popup_constraint]).areas(area);
Layout::vertical([Constraint::Min(1), Constraint::Max(popup_height)]).areas(area);
let mut textarea_rect = textarea_rect;
// Leave 1 for border and 1 for padding
textarea_rect.width = textarea_rect.width.saturating_sub(LIVE_PREFIX_COLS);
textarea_rect.x = textarea_rect.x.saturating_add(LIVE_PREFIX_COLS);
textarea_rect.width = textarea_rect.width.saturating_sub(1);
textarea_rect.x += 1;
let state = self.textarea_state.borrow();
self.textarea.cursor_pos_with_state(textarea_rect, &state)
}
@@ -1231,16 +1223,13 @@ impl ChatComposer {
impl WidgetRef for ChatComposer {
fn render_ref(&self, area: Rect, buf: &mut Buffer) {
let (popup_constraint, hint_spacing) = match &self.active_popup {
ActivePopup::Command(popup) => (Constraint::Max(popup.calculate_required_height()), 0),
ActivePopup::File(popup) => (Constraint::Max(popup.calculate_required_height()), 0),
ActivePopup::None => (
Constraint::Length(FOOTER_HEIGHT_WITH_HINT),
FOOTER_SPACING_HEIGHT,
),
let popup_height = match &self.active_popup {
ActivePopup::Command(popup) => popup.calculate_required_height(),
ActivePopup::File(popup) => popup.calculate_required_height(),
ActivePopup::None => 1,
};
let [textarea_rect, popup_rect] =
Layout::vertical([Constraint::Min(1), popup_constraint]).areas(area);
Layout::vertical([Constraint::Min(1), Constraint::Max(popup_height)]).areas(area);
match &self.active_popup {
ActivePopup::Command(popup) => {
popup.render_ref(popup_rect, buf);
@@ -1249,16 +1238,7 @@ impl WidgetRef for ChatComposer {
popup.render_ref(popup_rect, buf);
}
ActivePopup::None => {
let hint_rect = if hint_spacing > 0 {
let [_, hint_rect] = Layout::vertical([
Constraint::Length(hint_spacing),
Constraint::Length(FOOTER_HINT_HEIGHT),
])
.areas(popup_rect);
hint_rect
} else {
popup_rect
};
let bottom_line_rect = popup_rect;
let mut hint: Vec<Span<'static>> = if self.ctrl_c_quit_hint {
let ctrl_c_followup = if self.is_task_running {
" to interrupt"
@@ -1278,6 +1258,7 @@ impl WidgetRef for ChatComposer {
key_hint::ctrl('J')
};
vec![
" ".into(),
key_hint::plain('⏎'),
" send ".into(),
newline_hint_key,
@@ -1328,7 +1309,7 @@ impl WidgetRef for ChatComposer {
Line::from(hint)
.style(Style::default().dim())
.render_ref(hint_rect, buf);
.render_ref(bottom_line_rect, buf);
}
}
let border_style = if self.has_focus {
@@ -1345,16 +1326,15 @@ impl WidgetRef for ChatComposer {
buf,
);
let mut textarea_rect = textarea_rect;
// Leave 1 for border and 1 for padding
textarea_rect.width = textarea_rect.width.saturating_sub(LIVE_PREFIX_COLS);
textarea_rect.x = textarea_rect.x.saturating_add(LIVE_PREFIX_COLS);
textarea_rect.width = textarea_rect.width.saturating_sub(1);
textarea_rect.x += 1;
let mut state = self.textarea_state.borrow_mut();
StatefulWidgetRef::render_ref(&(&self.textarea), textarea_rect, buf, &mut state);
if self.textarea.text().is_empty() {
Line::from(self.placeholder_text.as_str())
.style(Style::default().dim())
.render_ref(textarea_rect.inner(Margin::new(0, 0)), buf);
.render_ref(textarea_rect.inner(Margin::new(1, 0)), buf);
}
}
}
@@ -1364,7 +1344,6 @@ mod tests {
use super::*;
use image::ImageBuffer;
use image::Rgba;
use pretty_assertions::assert_eq;
use std::path::PathBuf;
use tempfile::tempdir;
@@ -1377,60 +1356,6 @@ mod tests {
use crate::bottom_pane::textarea::TextArea;
use tokio::sync::mpsc::unbounded_channel;
#[test]
fn footer_hint_row_is_separated_from_composer() {
let (tx, _rx) = unbounded_channel::<AppEvent>();
let sender = AppEventSender::new(tx);
let composer = ChatComposer::new(
true,
sender,
false,
"Ask Codex to do anything".to_string(),
false,
);
let area = Rect::new(0, 0, 40, 6);
let mut buf = Buffer::empty(area);
composer.render_ref(area, &mut buf);
let row_to_string = |y: u16| {
let mut row = String::new();
for x in 0..area.width {
row.push(buf[(x, y)].symbol().chars().next().unwrap_or(' '));
}
row
};
let mut hint_row: Option<(u16, String)> = None;
for y in 0..area.height {
let row = row_to_string(y);
if row.contains(" send") {
hint_row = Some((y, row));
break;
}
}
let (hint_row_idx, hint_row_contents) =
hint_row.expect("expected footer hint row to be rendered");
assert_eq!(
hint_row_idx,
area.height - 1,
"hint row should occupy the bottom line: {hint_row_contents:?}",
);
assert!(
hint_row_idx > 0,
"expected a spacing row above the footer hints",
);
let spacing_row = row_to_string(hint_row_idx - 1);
assert_eq!(
spacing_row.trim(),
"",
"expected blank spacing row above hints but saw: {spacing_row:?}",
);
}
#[test]
fn test_current_at_token_basic_cases() {
let test_cases = vec![

View File

@@ -137,12 +137,11 @@ impl BottomPaneView for ListSelectionView {
fn desired_height(&self, _width: u16) -> u16 {
let rows = (self.items.len()).clamp(1, MAX_POPUP_ROWS);
// +1 for the title row, +1 for a spacer line beneath the header,
// +1 for optional subtitle, +1 for optional footer
let mut height = rows as u16 + 2;
// +1 for the title row, +1 for optional subtitle, +1 for optional footer
let mut height = rows as u16 + 1;
if self.subtitle.is_some() {
// +1 for subtitle (the spacer is accounted for above)
height = height.saturating_add(1);
// +1 for subtitle, +1 for a blank spacer line beneath it
height = height.saturating_add(2);
}
if self.footer_hint.is_some() {
height = height.saturating_add(2);
@@ -179,18 +178,17 @@ impl BottomPaneView for ListSelectionView {
vec![Self::dim_prefix_span(), sub.clone().dim()];
let subtitle_para = Paragraph::new(Line::from(subtitle_spans));
subtitle_para.render(subtitle_area, buf);
next_y = next_y.saturating_add(1);
// Render the extra spacer line with the dimmed prefix to align with title/subtitle
let spacer_area = Rect {
x: area.x,
y: next_y.saturating_add(1),
width: area.width,
height: 1,
};
Self::render_dim_prefix_line(spacer_area, buf);
next_y = next_y.saturating_add(2);
}
let spacer_area = Rect {
x: area.x,
y: next_y,
width: area.width,
height: 1,
};
Self::render_dim_prefix_line(spacer_area, buf);
next_y = next_y.saturating_add(1);
let footer_reserved = if self.footer_hint.is_some() { 2 } else { 0 };
let rows_area = Rect {
x: area.x,
@@ -247,78 +245,3 @@ impl BottomPaneView for ListSelectionView {
}
}
}
#[cfg(test)]
mod tests {
use super::BottomPaneView;
use super::*;
use crate::app_event::AppEvent;
use insta::assert_snapshot;
use ratatui::layout::Rect;
use tokio::sync::mpsc::unbounded_channel;
fn make_selection_view(subtitle: Option<&str>) -> ListSelectionView {
let (tx_raw, _rx) = unbounded_channel::<AppEvent>();
let tx = AppEventSender::new(tx_raw);
let items = vec![
SelectionItem {
name: "Read Only".to_string(),
description: Some("Codex can read files".to_string()),
is_current: true,
actions: vec![],
},
SelectionItem {
name: "Full Access".to_string(),
description: Some("Codex can edit files".to_string()),
is_current: false,
actions: vec![],
},
];
ListSelectionView::new(
"Select Approval Mode".to_string(),
subtitle.map(str::to_string),
Some("Press Enter to confirm or Esc to go back".to_string()),
items,
tx,
)
}
fn render_lines(view: &ListSelectionView) -> String {
let width = 48;
let height = BottomPaneView::desired_height(view, width);
let area = Rect::new(0, 0, width, height);
let mut buf = Buffer::empty(area);
view.render(area, &mut buf);
let lines: Vec<String> = (0..area.height)
.map(|row| {
let mut line = String::new();
for col in 0..area.width {
let symbol = buf[(area.x + col, area.y + row)].symbol();
if symbol.is_empty() {
line.push(' ');
} else {
line.push_str(symbol);
}
}
line
})
.collect();
lines.join("\n")
}
#[test]
fn renders_blank_line_between_title_and_items_without_subtitle() {
let view = make_selection_view(None);
assert_snapshot!(
"list_selection_spacing_without_subtitle",
render_lines(&view)
);
}
#[test]
fn renders_blank_line_between_subtitle_and_items() {
let view = make_selection_view(Some("Switch between Codex approval presets"));
assert_snapshot!("list_selection_spacing_with_subtitle", render_lines(&view));
}
}

View File

@@ -2,7 +2,7 @@
source: tui/src/bottom_pane/chat_composer.rs
expression: terminal.backend()
---
"▌ [Pasted Content 1002 chars][Pasted Content 1004 chars] "
"▌[Pasted Content 1002 chars][Pasted Content 1004 chars] "
"▌ "
"▌ "
"▌ "
@@ -10,5 +10,5 @@ expression: terminal.backend()
"▌ "
"▌ "
"▌ "
" "
"⏎ send ⌃J newline ⌃T transcript ⌃C quit "
" "
" ⏎ send ⌃J newline ⌃T transcript ⌃C quit "

View File

@@ -10,5 +10,5 @@ expression: terminal.backend()
"▌ "
"▌ "
"▌ "
" "
"⏎ send ⌃J newline ⌃T transcript ⌃C quit "
" "
" ⏎ send ⌃J newline ⌃T transcript ⌃C quit "

View File

@@ -2,7 +2,7 @@
source: tui/src/bottom_pane/chat_composer.rs
expression: terminal.backend()
---
"▌ [Pasted Content 1005 chars] "
"▌[Pasted Content 1005 chars] "
"▌ "
"▌ "
"▌ "
@@ -10,5 +10,5 @@ expression: terminal.backend()
"▌ "
"▌ "
"▌ "
" "
"⏎ send ⌃J newline ⌃T transcript ⌃C quit "
" "
" ⏎ send ⌃J newline ⌃T transcript ⌃C quit "

View File

@@ -2,7 +2,7 @@
source: tui/src/bottom_pane/chat_composer.rs
expression: terminal.backend()
---
"▌ [Pasted Content 1003 chars][Pasted Content 1007 chars] another short paste "
"▌[Pasted Content 1003 chars][Pasted Content 1007 chars] another short paste "
"▌ "
"▌ "
"▌ "
@@ -10,5 +10,5 @@ expression: terminal.backend()
"▌ "
"▌ "
"▌ "
" "
"⏎ send ⌃J newline ⌃T transcript ⌃C quit "
" "
" ⏎ send ⌃J newline ⌃T transcript ⌃C quit "

View File

@@ -2,7 +2,7 @@
source: tui/src/bottom_pane/chat_composer.rs
expression: terminal.backend()
---
"▌ /mo "
"▌/mo "
"▌ "
"▌/model choose what model and reasoning effort to use "
"▌/mention mention a file "

View File

@@ -2,7 +2,7 @@
source: tui/src/bottom_pane/chat_composer.rs
expression: terminal.backend()
---
"▌ short "
"▌short "
"▌ "
"▌ "
"▌ "
@@ -10,5 +10,5 @@ expression: terminal.backend()
"▌ "
"▌ "
"▌ "
" "
"⏎ send ⌃J newline ⌃T transcript ⌃C quit "
" "
" ⏎ send ⌃J newline ⌃T transcript ⌃C quit "

View File

@@ -1,11 +0,0 @@
---
source: tui/src/bottom_pane/list_selection_view.rs
expression: render_lines(&view)
---
▌ Select Approval Mode
▌ Switch between Codex approval presets
▌> 1. Read Only (current) Codex can read files
▌ 2. Full Access Codex can edit files
Press Enter to confirm or Esc to go back

View File

@@ -1,10 +0,0 @@
---
source: tui/src/bottom_pane/list_selection_view.rs
expression: render_lines(&view)
---
▌ Select Approval Mode
▌> 1. Read Only (current) Codex can read files
▌ 2. Full Access Codex can edit files
Press Enter to confirm or Esc to go back

View File

@@ -289,9 +289,7 @@ impl ChatWidget {
/// separated by newlines rather than autosubmitting the next one.
fn on_interrupted_turn(&mut self) {
// Finalize, log a gentle prompt, and clear running state.
self.finalize_turn_with_error_message(
"Conversation interrupted - tell the model what to do differently".to_owned(),
);
self.finalize_turn_with_error_message("Tell the model what to do differently".to_owned());
// If any messages were queued during the task, restore them into the composer.
if !self.queued_user_messages.is_empty() {
@@ -1089,8 +1087,6 @@ impl ChatWidget {
self.app_event_tx
.send(crate::app_event::AppEvent::ConversationHistory(ev));
}
EventMsg::EnteredReviewMode(_) => {}
EventMsg::ExitedReviewMode(_) => {}
}
}
@@ -1195,13 +1191,9 @@ impl ChatWidget {
tracing::info!(
"New model: {}, New effort: {}, Current model: {}, Current effort: {}",
model_slug.clone(),
effort
.map(|effort| effort.to_string())
.unwrap_or_else(|| "none".to_string()),
effort,
current_model,
current_effort
.map(|effort| effort.to_string())
.unwrap_or_else(|| "none".to_string())
);
})];
items.push(SelectionItem {
@@ -1272,7 +1264,7 @@ impl ChatWidget {
}
/// Set the reasoning effort in the widget's config copy.
pub(crate) fn set_reasoning_effort(&mut self, effort: Option<ReasoningEffortConfig>) {
pub(crate) fn set_reasoning_effort(&mut self, effort: ReasoningEffortConfig) {
self.config.model_reasoning_effort = effort;
}
@@ -1281,8 +1273,8 @@ impl ChatWidget {
self.config.model = model;
}
pub(crate) fn add_info_message(&mut self, message: String, hint: Option<String>) {
self.add_to_history(history_cell::new_info_event(message, hint));
pub(crate) fn add_info_message(&mut self, message: String) {
self.add_to_history(history_cell::new_info_event(message));
self.request_redraw();
}
@@ -1459,4 +1451,4 @@ fn extract_first_bold(s: &str) -> Option<String> {
}
#[cfg(test)]
pub(crate) mod tests;
mod tests;

View File

@@ -3,4 +3,4 @@ source: tui/src/chatwidget/tests.rs
expression: terminal.backend()
---
"▌ Ask Codex to do anything "
" "
" ⏎ send ⌃J newline ⌃T transcript "

View File

@@ -2,5 +2,5 @@
source: tui/src/chatwidget/tests.rs
expression: terminal.backend()
---
" Thinking (0s • Esc to interrupt) "
" Thinking (0s • Esc to interrupt) "
"▌ Ask Codex to do anything "

View File

@@ -9,8 +9,7 @@ expression: visual
└ Search Change Approved
Read diff_render.rs
Investigating rendering code (0s • Esc to interrupt)
Investigating rendering code (0s • Esc to interrupt)
Summarize recent commits
⏎ send ⌃J newline ⌃T transcript ⌃C quit
▌Summarize recent commits
⏎ send ⌃J newline ⌃T transcript ⌃C quit

View File

@@ -2,4 +2,4 @@
source: tui/src/chatwidget/tests.rs
expression: blob
---
'/model' is disabled while a task is in progress.
🖐  '/model' is disabled while a task is in progress.

View File

@@ -3,9 +3,8 @@ source: tui/src/chatwidget/tests.rs
expression: terminal.backend()
---
" "
" Analyzing (0s • Esc to interrupt) "
" Analyzing (0s • Esc to interrupt) "
" "
"▌ Ask Codex to do anything "
" "
"⏎ send ⌃J newline ⌃T transcript ⌃C quit "
" ⏎ send ⌃J newline ⌃T transcript ⌃C quit "
" "

View File

@@ -138,7 +138,7 @@ fn resumed_initial_messages_render_history() {
let configured = codex_core::protocol::SessionConfiguredEvent {
session_id: conversation_id,
model: "test-model".to_string(),
reasoning_effort: Some(ReasoningEffortConfig::default()),
reasoning_effort: ReasoningEffortConfig::default(),
history_log_id: 0,
history_entry_count: 0,
initial_messages: Some(vec![
@@ -247,17 +247,6 @@ fn make_chatwidget_manual() -> (
(widget, rx, op_rx)
}
pub(crate) fn make_chatwidget_manual_with_sender() -> (
ChatWidget,
AppEventSender,
tokio::sync::mpsc::UnboundedReceiver<AppEvent>,
tokio::sync::mpsc::UnboundedReceiver<Op>,
) {
let (widget, rx, op_rx) = make_chatwidget_manual();
let app_event_tx = widget.app_event_tx.clone();
(widget, app_event_tx, rx, op_rx)
}
fn drain_insert_history(
rx: &mut tokio::sync::mpsc::UnboundedReceiver<AppEvent>,
) -> Vec<Vec<ratatui::text::Line<'static>>> {

View File

@@ -7,7 +7,6 @@ use crate::render::line_utils::prefix_lines;
use crate::render::line_utils::push_owned_lines;
use crate::slash_command::SlashCommand;
use crate::text_formatting::format_and_truncate_tool_result;
use crate::ui_consts::LIVE_PREFIX_COLS;
use crate::wrapping::RtOptions;
use crate::wrapping::word_wrap_line;
use crate::wrapping::word_wrap_lines;
@@ -44,7 +43,6 @@ use ratatui::style::Stylize;
use ratatui::widgets::Paragraph;
use ratatui::widgets::WidgetRef;
use ratatui::widgets::Wrap;
use std::any::Any;
use std::collections::HashMap;
use std::io::Cursor;
use std::path::Path;
@@ -71,7 +69,7 @@ pub(crate) enum PatchEventType {
/// Represents an event to display in the conversation history. Returns its
/// `Vec<Line<'static>>` representation to make it easier to display in a
/// scrollable list.
pub(crate) trait HistoryCell: std::fmt::Debug + Send + Sync + Any {
pub(crate) trait HistoryCell: std::fmt::Debug + Send + Sync {
fn display_lines(&self, width: u16) -> Vec<Line<'static>>;
fn transcript_lines(&self) -> Vec<Line<'static>> {
@@ -91,15 +89,9 @@ pub(crate) trait HistoryCell: std::fmt::Debug + Send + Sync + Any {
}
}
impl dyn HistoryCell {
pub(crate) fn as_any(&self) -> &dyn Any {
self
}
}
#[derive(Debug)]
pub(crate) struct UserHistoryCell {
pub message: String,
message: String,
}
impl HistoryCell for UserHistoryCell {
@@ -107,7 +99,7 @@ impl HistoryCell for UserHistoryCell {
let mut lines: Vec<Line<'static>> = Vec::new();
// Wrap the content first, then prefix each wrapped line with the marker.
let wrap_width = width.saturating_sub(LIVE_PREFIX_COLS); // account for the ▌ prefix and trailing space
let wrap_width = width.saturating_sub(1); // account for the ▌ prefix
let wrapped = textwrap::wrap(
&self.message,
textwrap::Options::new(wrap_width as usize)
@@ -115,7 +107,7 @@ impl HistoryCell for UserHistoryCell {
);
for line in wrapped {
lines.push(vec![" ".cyan().dim(), line.to_string().dim()].into());
lines.push(vec!["".cyan().dim(), line.to_string().dim()].into());
}
lines
}
@@ -437,16 +429,9 @@ impl ExecCell {
if let Some(output) = call.output.as_ref()
&& output.exit_code != 0
{
let out = output_lines(
Some(output),
OutputLinesParams {
only_err: false,
include_angle_pipe: false,
include_prefix: false,
},
)
.into_iter()
.join("\n");
let out = output_lines(Some(output), false, false, false)
.into_iter()
.join("\n");
if !out.trim().is_empty() {
// Wrap the output.
for line in out.lines() {
@@ -645,25 +630,25 @@ pub(crate) fn new_session_info(
]));
lines.push(Line::from("".dim()));
lines.push(Line::from(
" To get started, describe a task or try one of these commands:".dim(),
" To get started, describe a task or try one of these commands:".dim(),
));
lines.push(Line::from("".dim()));
if !has_agents_md {
lines.push(Line::from(vec![
" /init".bold(),
" /init".bold(),
format!(" - {}", SlashCommand::Init.description()).dim(),
]));
}
lines.push(Line::from(vec![
" /status".bold(),
" /status".bold(),
format!(" - {}", SlashCommand::Status.description()).dim(),
]));
lines.push(Line::from(vec![
" /approvals".bold(),
" /approvals".bold(),
format!(" - {}", SlashCommand::Approvals.description()).dim(),
]));
lines.push(Line::from(vec![
" /model".bold(),
" /model".bold(),
format!(" - {}", SlashCommand::Model.description()).dim(),
]));
PlainHistoryCell { lines }
@@ -1068,13 +1053,9 @@ pub(crate) fn new_mcp_tools_output(
PlainHistoryCell { lines }
}
pub(crate) fn new_info_event(message: String, hint: Option<String>) -> PlainHistoryCell {
let mut line = vec!["> ".into(), message.into()];
if let Some(hint) = hint {
line.push(" ".into());
line.push(hint.dark_gray());
}
let lines: Vec<Line<'static>> = vec![line.into()];
pub(crate) fn new_info_event(message: String) -> PlainHistoryCell {
let lines: Vec<Line<'static>> =
vec![vec![padded_emoji("💾").green(), " ".into(), message.into()].into()];
PlainHistoryCell { lines }
}
@@ -1082,7 +1063,8 @@ pub(crate) fn new_error_event(message: String) -> PlainHistoryCell {
// Use a hair space (U+200A) to create a subtle, near-invisible separation
// before the text. VS16 is intentionally omitted to keep spacing tighter
// in terminals like Ghostty.
let lines: Vec<Line<'static>> = vec![vec![format!("{message}").red()].into()];
let lines: Vec<Line<'static>> =
vec![vec![padded_emoji("🖐").red().bold(), " ".into(), message.into()].into()];
PlainHistoryCell { lines }
}
@@ -1186,11 +1168,9 @@ pub(crate) fn new_patch_apply_failure(stderr: String) -> PlainHistoryCell {
stderr,
formatted_output: String::new(),
}),
OutputLinesParams {
only_err: true,
include_angle_pipe: true,
include_prefix: true,
},
true,
true,
true,
));
}
@@ -1271,18 +1251,12 @@ pub(crate) fn new_reasoning_summary_block(
vec![Box::new(new_reasoning_block(full_reasoning_buffer, config))]
}
struct OutputLinesParams {
fn output_lines(
output: Option<&CommandOutput>,
only_err: bool,
include_angle_pipe: bool,
include_prefix: bool,
}
fn output_lines(output: Option<&CommandOutput>, params: OutputLinesParams) -> Vec<Line<'static>> {
let OutputLinesParams {
only_err,
include_angle_pipe,
include_prefix,
} = params;
) -> Vec<Line<'static>> {
let CommandOutput {
exit_code,
stdout,
@@ -1786,7 +1760,7 @@ mod tests {
message: msg.to_string(),
};
// Small width to force wrapping more clearly. Effective wrap width is width-2 due to the ▌ prefix and trailing space.
// Small width to force wrapping more clearly. Effective wrap width is width-1 due to the ▌ prefix.
let width: u16 = 12;
let lines = cell.display_lines(width);
let rendered = render_lines(&lines).join("\n");

View File

@@ -11,7 +11,7 @@ use codex_core::RolloutRecorder;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::ConfigToml;
use codex_core::config::SWIFTFOX_MEDIUM_MODEL;
use codex_core::config::GPT5_HIGH_MODEL;
use codex_core::config::find_codex_home;
use codex_core::config::load_config_as_toml_with_cli_overrides;
use codex_core::config::persist_model_selection;
@@ -31,6 +31,7 @@ mod app;
mod app_backtrack;
mod app_event;
mod app_event_sender;
mod backtrack_helpers;
mod bottom_pane;
mod chatwidget;
mod citation_regex;
@@ -60,7 +61,6 @@ mod status_indicator_widget;
mod streaming;
mod text_formatting;
mod tui;
mod ui_consts;
mod user_approval_widget;
mod version;
mod wrapping;
@@ -122,7 +122,6 @@ pub async fn run_main(
let overrides = ConfigOverrides {
model,
review_model: None,
approval_policy,
sandbox_mode,
cwd,
@@ -369,9 +368,9 @@ async fn run_ratatui_app(
&cli,
&config,
active_profile.as_deref(),
internal_storage.swiftfox_model_prompt_seen,
internal_storage.gpt_5_high_model_prompt_seen,
) {
internal_storage.swiftfox_model_prompt_seen = true;
internal_storage.gpt_5_high_model_prompt_seen = true;
if let Err(e) = internal_storage.persist().await {
error!("Failed to persist internal storage: {e:?}");
}
@@ -380,13 +379,12 @@ async fn run_ratatui_app(
let switch_to_new_model = upgrade_decision == ModelUpgradeDecision::Switch;
if switch_to_new_model {
config.model = SWIFTFOX_MEDIUM_MODEL.to_owned();
config.model_reasoning_effort = None;
config.model = GPT5_HIGH_MODEL.to_owned();
if let Err(e) = persist_model_selection(
&config.codex_home,
active_profile.as_deref(),
&config.model,
config.model_reasoning_effort,
None,
)
.await
{
@@ -513,9 +511,8 @@ fn should_show_model_rollout_prompt(
cli: &Cli,
config: &Config,
active_profile: Option<&str>,
swiftfox_model_prompt_seen: bool,
gpt_5_high_model_prompt_seen: bool,
) -> bool {
let login_status = get_login_status(config);
// TODO(jif) drop.
let debug_high_enabled = std::env::var("DEBUG_HIGH")
.map(|v| v.eq_ignore_ascii_case("1"))
@@ -524,9 +521,8 @@ fn should_show_model_rollout_prompt(
active_profile.is_none()
&& debug_high_enabled
&& cli.model.is_none()
&& !swiftfox_model_prompt_seen
&& !gpt_5_high_model_prompt_seen
&& config.model_provider.requires_openai_auth
&& matches!(login_status, LoginStatus::AuthMode(AuthMode::ChatGPT))
&& !cli.oss
}
@@ -534,12 +530,6 @@ fn should_show_model_rollout_prompt(
mod tests {
use super::*;
use clap::Parser;
use codex_core::auth::AuthDotJson;
use codex_core::auth::get_auth_file;
use codex_core::auth::login_with_api_key;
use codex_core::auth::write_auth_json;
use codex_core::token_data::IdTokenInfo;
use codex_core::token_data::TokenData;
use std::sync::Once;
fn enable_debug_high_env() {
@@ -555,56 +545,14 @@ mod tests {
fn make_config() -> Config {
enable_debug_high_env();
// Create a unique CODEX_HOME per test to isolate auth.json writes.
let mut codex_home = std::env::temp_dir();
let unique_suffix = format!(
"codex_tui_test_{}_{}",
std::process::id(),
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos()
);
codex_home.push(unique_suffix);
std::fs::create_dir_all(&codex_home).expect("create unique CODEX_HOME");
Config::load_from_base_config_with_overrides(
ConfigToml::default(),
ConfigOverrides::default(),
codex_home,
std::env::temp_dir(),
)
.expect("load default config")
}
/// Test helper to write an `auth.json` with the requested auth mode into the
/// provided CODEX_HOME directory. This ensures `get_login_status()` reads the
/// intended mode deterministically.
fn set_auth_method(codex_home: &std::path::Path, mode: AuthMode) {
match mode {
AuthMode::ApiKey => {
login_with_api_key(codex_home, "sk-test-key").expect("write api key auth.json");
}
AuthMode::ChatGPT => {
// Minimal valid JWT payload: header.payload.signature (all base64url, no padding)
const FAKE_JWT: &str = "eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.e30.c2ln"; // {"alg":"none","typ":"JWT"}.{}."sig"
let mut id_info = IdTokenInfo::default();
id_info.raw_jwt = FAKE_JWT.to_string();
let auth = AuthDotJson {
openai_api_key: None,
tokens: Some(TokenData {
id_token: id_info,
access_token: "access-token".to_string(),
refresh_token: "refresh-token".to_string(),
account_id: None,
}),
last_refresh: None,
};
let file = get_auth_file(codex_home);
write_auth_json(&file, &auth).expect("write chatgpt auth.json");
}
}
}
#[test]
fn shows_login_when_not_authenticated() {
let cfg = make_config();
@@ -618,39 +566,27 @@ mod tests {
fn shows_model_rollout_prompt_for_default_model() {
let cli = Cli::parse_from(["codex"]);
let cfg = make_config();
set_auth_method(&cfg.codex_home, AuthMode::ChatGPT);
assert!(should_show_model_rollout_prompt(&cli, &cfg, None, false,));
}
#[test]
fn hides_model_rollout_prompt_when_api_auth_mode() {
let cli = Cli::parse_from(["codex"]);
let cfg = make_config();
set_auth_method(&cfg.codex_home, AuthMode::ApiKey);
assert!(!should_show_model_rollout_prompt(&cli, &cfg, None, false,));
assert!(should_show_model_rollout_prompt(&cli, &cfg, None, false));
}
#[test]
fn hides_model_rollout_prompt_when_marked_seen() {
let cli = Cli::parse_from(["codex"]);
let cfg = make_config();
set_auth_method(&cfg.codex_home, AuthMode::ChatGPT);
assert!(!should_show_model_rollout_prompt(&cli, &cfg, None, true,));
assert!(!should_show_model_rollout_prompt(&cli, &cfg, None, true));
}
#[test]
fn hides_model_rollout_prompt_when_cli_overrides_model() {
let cli = Cli::parse_from(["codex", "--model", "gpt-4.1"]);
let cfg = make_config();
set_auth_method(&cfg.codex_home, AuthMode::ChatGPT);
assert!(!should_show_model_rollout_prompt(&cli, &cfg, None, false,));
assert!(!should_show_model_rollout_prompt(&cli, &cfg, None, false));
}
#[test]
fn hides_model_rollout_prompt_when_profile_active() {
let cli = Cli::parse_from(["codex"]);
let cfg = make_config();
set_auth_method(&cfg.codex_home, AuthMode::ChatGPT);
assert!(!should_show_model_rollout_prompt(
&cli,
&cfg,

View File

@@ -1,7 +1,7 @@
use crate::tui::FrameRequester;
use crate::tui::Tui;
use crate::tui::TuiEvent;
use codex_core::config::SWIFTFOX_MEDIUM_MODEL;
use codex_core::config::GPT5_HIGH_MODEL;
use color_eyre::eyre::Result;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
@@ -82,15 +82,12 @@ impl WidgetRef for &ModelUpgradePopup {
Clear.render(area, buf);
let mut lines: Vec<Line> = vec![
String::new().into(),
format!(" Codex is now powered by {SWIFTFOX_MEDIUM_MODEL}, a new model that is")
.into(),
Line::from(vec![
" ".into(),
"faster, a better collaborator, ".bold(),
"and ".into(),
"more steerable.".bold(),
"> ".into(),
format!("Try {GPT5_HIGH_MODEL} as your default model").bold(),
]),
format!(" {GPT5_HIGH_MODEL} is our latest model tuned for coding workflows.").into(),
" Switch now or keep your current default you can change models any time.".into(),
"".into(),
];
@@ -109,7 +106,7 @@ impl WidgetRef for &ModelUpgradePopup {
lines.push(create_option(
0,
ModelUpgradeOption::TryNewModel,
&format!("Yes, switch me to {SWIFTFOX_MEDIUM_MODEL}"),
&format!("Yes, switch me to {GPT5_HIGH_MODEL}"),
));
lines.push(create_option(
1,

View File

@@ -1,8 +1,6 @@
use std::io::Result;
use std::sync::Arc;
use std::time::Duration;
use crate::history_cell::HistoryCell;
use crate::render::line_utils::push_owned_lines;
use crate::tui;
use crate::tui::TuiEvent;
@@ -17,7 +15,6 @@ use ratatui::style::Styled;
use ratatui::style::Stylize;
use ratatui::text::Line;
use ratatui::text::Span;
use ratatui::text::Text;
use ratatui::widgets::Paragraph;
use ratatui::widgets::WidgetRef;
@@ -27,8 +24,8 @@ pub(crate) enum Overlay {
}
impl Overlay {
pub(crate) fn new_transcript(cells: Vec<Arc<dyn HistoryCell>>) -> Self {
Self::Transcript(TranscriptOverlay::new(cells))
pub(crate) fn new_transcript(lines: Vec<Line<'static>>) -> Self {
Self::Transcript(TranscriptOverlay::new(lines))
}
pub(crate) fn new_static_with_title(lines: Vec<Line<'static>>, title: String) -> Self {
@@ -76,24 +73,21 @@ fn render_key_hints(area: Rect, buf: &mut Buffer, pairs: &[(&str, &str)]) {
/// Generic widget for rendering a pager view.
struct PagerView {
texts: Vec<Text<'static>>,
lines: Vec<Line<'static>>,
scroll_offset: usize,
title: String,
wrap_cache: Option<WrapCache>,
last_content_height: Option<usize>,
/// If set, on next render ensure this chunk is visible.
pending_scroll_chunk: Option<usize>,
}
impl PagerView {
fn new(texts: Vec<Text<'static>>, title: String, scroll_offset: usize) -> Self {
fn new(lines: Vec<Line<'static>>, title: String, scroll_offset: usize) -> Self {
Self {
texts,
lines,
scroll_offset,
title,
wrap_cache: None,
last_content_height: None,
pending_scroll_chunk: None,
}
}
@@ -102,14 +96,6 @@ impl PagerView {
let content_area = self.scroll_area(area);
self.update_last_content_height(content_area.height);
self.ensure_wrapped(content_area.width);
// If there is a pending request to scroll a specific chunk into view,
// satisfy it now that wrapping is up to date for this width.
if let (Some(idx), Some(cache)) =
(self.pending_scroll_chunk.take(), self.wrap_cache.as_ref())
&& let Some(range) = cache.chunk_ranges.get(idx).cloned()
{
self.ensure_range_visible(range, content_area.height as usize, cache.wrapped.len());
}
// Compute page bounds without holding an immutable borrow on cache while mutating self
let wrapped_len = self
.wrap_cache
@@ -122,12 +108,40 @@ impl PagerView {
let start = self.scroll_offset;
let end = (start + content_area.height as usize).min(wrapped_len);
let wrapped = self.cached();
let (wrapped, _src_idx) = self.cached();
let page = &wrapped[start..end];
self.render_content_page_prepared(content_area, buf, page);
self.render_bottom_bar(area, content_area, buf, wrapped);
}
fn render_with_highlight(
&mut self,
area: Rect,
buf: &mut Buffer,
highlight: Option<(usize, usize)>,
) {
self.render_header(area, buf);
let content_area = self.scroll_area(area);
self.update_last_content_height(content_area.height);
self.ensure_wrapped(content_area.width);
// Compute page bounds first to avoid borrow conflicts
let wrapped_len = self
.wrap_cache
.as_ref()
.map(|c| c.wrapped.len())
.unwrap_or(0);
self.scroll_offset = self
.scroll_offset
.min(wrapped_len.saturating_sub(content_area.height as usize));
let start = self.scroll_offset;
let end = (start + content_area.height as usize).min(wrapped_len);
let (wrapped, src_idx) = self.cached();
let page = self.page_with_optional_highlight(wrapped, src_idx, start, end, highlight);
self.render_content_page_prepared(content_area, buf, &page);
self.render_bottom_bar(area, content_area, buf, wrapped);
}
fn render_header(&self, area: Rect, buf: &mut Buffer) {
Span::from("/ ".repeat(area.width as usize / 2))
.dim()
@@ -256,8 +270,7 @@ impl PagerView {
struct WrapCache {
width: u16,
wrapped: Vec<Line<'static>>,
/// For each input Text chunk, the inclusive-excluded range of wrapped lines produced.
chunk_ranges: Vec<std::ops::Range<usize>>,
src_idx: Vec<usize>,
base_len: usize,
}
@@ -265,39 +278,74 @@ impl PagerView {
fn ensure_wrapped(&mut self, width: u16) {
let width = width.max(1);
let needs = match self.wrap_cache {
Some(ref c) => c.width != width || c.base_len != self.texts.len(),
Some(ref c) => c.width != width || c.base_len != self.lines.len(),
None => true,
};
if !needs {
return;
}
let mut wrapped: Vec<Line<'static>> = Vec::new();
let mut chunk_ranges: Vec<std::ops::Range<usize>> = Vec::with_capacity(self.texts.len());
for text in &self.texts {
let start = wrapped.len();
for line in &text.lines {
let ws = crate::wrapping::word_wrap_line(line, width as usize);
push_owned_lines(&ws, &mut wrapped);
}
let end = wrapped.len();
chunk_ranges.push(start..end);
let mut src_idx: Vec<usize> = Vec::new();
for (i, line) in self.lines.iter().enumerate() {
let ws = crate::wrapping::word_wrap_line(line, width as usize);
src_idx.extend(std::iter::repeat_n(i, ws.len()));
push_owned_lines(&ws, &mut wrapped);
}
self.wrap_cache = Some(WrapCache {
width,
wrapped,
chunk_ranges,
base_len: self.texts.len(),
src_idx,
base_len: self.lines.len(),
});
}
fn cached(&self) -> &[Line<'static>] {
fn cached(&self) -> (&[Line<'static>], &[usize]) {
if let Some(cache) = self.wrap_cache.as_ref() {
&cache.wrapped
(&cache.wrapped, &cache.src_idx)
} else {
&[]
(&[], &[])
}
}
fn page_with_optional_highlight<'a>(
&self,
wrapped: &'a [Line<'static>],
src_idx: &[usize],
start: usize,
end: usize,
highlight: Option<(usize, usize)>,
) -> std::borrow::Cow<'a, [Line<'static>]> {
use ratatui::style::Modifier;
let (hi_start, hi_end) = match highlight {
Some(r) => r,
None => return std::borrow::Cow::Borrowed(&wrapped[start..end]),
};
let mut out: Vec<Line<'static>> = Vec::with_capacity(end - start);
let mut bold_done = false;
for (row, src_line) in wrapped
.iter()
.enumerate()
.skip(start)
.take(end.saturating_sub(start))
{
let mut line = src_line.clone();
if let Some(src) = src_idx.get(row).copied()
&& src >= hi_start
&& src < hi_end
{
for (i, s) in line.spans.iter_mut().enumerate() {
s.style.add_modifier |= Modifier::REVERSED;
if !bold_done && i == 0 {
s.style.add_modifier |= Modifier::BOLD;
bold_done = true;
}
}
}
out.push(line);
}
std::borrow::Cow::Owned(out)
}
fn is_scrolled_to_bottom(&self) -> bool {
if self.scroll_offset == usize::MAX {
return true;
@@ -315,108 +363,38 @@ impl PagerView {
let max_scroll = cache.wrapped.len().saturating_sub(visible);
self.scroll_offset >= max_scroll
}
/// Request that the given text chunk index be scrolled into view on next render.
fn scroll_chunk_into_view(&mut self, chunk_index: usize) {
self.pending_scroll_chunk = Some(chunk_index);
}
fn ensure_range_visible(
&mut self,
range: std::ops::Range<usize>,
viewport_height: usize,
total_wrapped: usize,
) {
if viewport_height == 0 || total_wrapped == 0 {
return;
}
let first = range.start.min(total_wrapped.saturating_sub(1));
let last = range
.end
.saturating_sub(1)
.min(total_wrapped.saturating_sub(1));
let current_top = self.scroll_offset.min(total_wrapped.saturating_sub(1));
let current_bottom = current_top.saturating_add(viewport_height.saturating_sub(1));
if first < current_top {
self.scroll_offset = first;
} else if last > current_bottom {
// Scroll just enough so that 'last' is visible at the bottom
self.scroll_offset = last.saturating_sub(viewport_height.saturating_sub(1));
}
}
}
pub(crate) struct TranscriptOverlay {
view: PagerView,
cells: Vec<Arc<dyn HistoryCell>>,
highlight_cell: Option<usize>,
highlight_range: Option<(usize, usize)>,
is_done: bool,
}
impl TranscriptOverlay {
pub(crate) fn new(transcript_cells: Vec<Arc<dyn HistoryCell>>) -> Self {
pub(crate) fn new(transcript_lines: Vec<Line<'static>>) -> Self {
Self {
view: PagerView::new(
Self::render_cells_to_texts(&transcript_cells, None),
transcript_lines,
"T R A N S C R I P T".to_string(),
usize::MAX,
),
cells: transcript_cells,
highlight_cell: None,
highlight_range: None,
is_done: false,
}
}
fn render_cells_to_texts(
cells: &[Arc<dyn HistoryCell>],
highlight_cell: Option<usize>,
) -> Vec<Text<'static>> {
let mut texts: Vec<Text<'static>> = Vec::new();
let mut first = true;
for (idx, cell) in cells.iter().enumerate() {
let mut lines: Vec<Line<'static>> = Vec::new();
if !cell.is_stream_continuation() && !first {
lines.push(Line::from(""));
}
let cell_lines = if Some(idx) == highlight_cell {
cell.transcript_lines()
.into_iter()
.map(|l| l.reversed())
.collect()
} else {
cell.transcript_lines()
};
lines.extend(cell_lines);
texts.push(Text::from(lines));
first = false;
}
texts
}
pub(crate) fn insert_cell(&mut self, cell: Arc<dyn HistoryCell>) {
pub(crate) fn insert_lines(&mut self, lines: Vec<Line<'static>>) {
let follow_bottom = self.view.is_scrolled_to_bottom();
// Append as a new Text chunk (with a separating blank if needed)
let mut lines: Vec<Line<'static>> = Vec::new();
if !cell.is_stream_continuation() && !self.cells.is_empty() {
lines.push(Line::from(""));
}
lines.extend(cell.transcript_lines());
self.view.texts.push(Text::from(lines));
self.cells.push(cell);
self.view.lines.extend(lines);
self.view.wrap_cache = None;
if follow_bottom {
self.view.scroll_offset = usize::MAX;
}
}
pub(crate) fn set_highlight_cell(&mut self, cell: Option<usize>) {
self.highlight_cell = cell;
self.view.wrap_cache = None;
self.view.texts = Self::render_cells_to_texts(&self.cells, self.highlight_cell);
if let Some(idx) = self.highlight_cell {
self.view.scroll_chunk_into_view(idx);
}
pub(crate) fn set_highlight_range(&mut self, range: Option<(usize, usize)>) {
self.highlight_range = range;
}
fn render_hints(&self, area: Rect, buf: &mut Buffer) {
@@ -424,7 +402,9 @@ impl TranscriptOverlay {
let line2 = Rect::new(area.x, area.y.saturating_add(1), area.width, 1);
render_key_hints(line1, buf, PAGER_KEY_HINTS);
let mut pairs: Vec<(&str, &str)> = vec![("q", "quit"), ("Esc", "edit prev")];
if self.highlight_cell.is_some() {
if let Some((start, end)) = self.highlight_range
&& end > start
{
pairs.push(("", "edit message"));
}
render_key_hints(line2, buf, &pairs);
@@ -434,7 +414,8 @@ impl TranscriptOverlay {
let top_h = area.height.saturating_sub(3);
let top = Rect::new(area.x, area.y, area.width, top_h);
let bottom = Rect::new(area.x, area.y + top_h, area.width, 3);
self.view.render(top, buf);
self.view
.render_with_highlight(top, buf, self.highlight_range);
self.render_hints(bottom, buf);
}
}
@@ -477,6 +458,9 @@ impl TranscriptOverlay {
pub(crate) fn is_done(&self) -> bool {
self.is_done
}
pub(crate) fn set_scroll_offset(&mut self, offset: usize) {
self.view.scroll_offset = offset;
}
}
pub(crate) struct StaticOverlay {
@@ -487,7 +471,7 @@ pub(crate) struct StaticOverlay {
impl StaticOverlay {
pub(crate) fn with_title(lines: Vec<Line<'static>>, title: String) -> Self {
Self {
view: PagerView::new(vec![Text::from(lines)], title, 0),
view: PagerView::new(lines, title, 0),
is_done: false,
}
}
@@ -550,26 +534,9 @@ mod tests {
use ratatui::Terminal;
use ratatui::backend::TestBackend;
#[derive(Debug)]
struct TestCell {
lines: Vec<Line<'static>>,
}
impl crate::history_cell::HistoryCell for TestCell {
fn display_lines(&self, _width: u16) -> Vec<Line<'static>> {
self.lines.clone()
}
fn transcript_lines(&self) -> Vec<Line<'static>> {
self.lines.clone()
}
}
#[test]
fn edit_prev_hint_is_visible() {
let mut overlay = TranscriptOverlay::new(vec![Arc::new(TestCell {
lines: vec![Line::from("hello")],
})]);
let mut overlay = TranscriptOverlay::new(vec![Line::from("hello")]);
// Render into a small buffer and assert the backtrack hint is present
let area = Rect::new(0, 0, 40, 10);
@@ -594,15 +561,9 @@ mod tests {
fn transcript_overlay_snapshot_basic() {
// Prepare a transcript overlay with a few lines
let mut overlay = TranscriptOverlay::new(vec![
Arc::new(TestCell {
lines: vec![Line::from("alpha")],
}),
Arc::new(TestCell {
lines: vec![Line::from("beta")],
}),
Arc::new(TestCell {
lines: vec![Line::from("gamma")],
}),
Line::from("alpha"),
Line::from("beta"),
Line::from("gamma"),
]);
let mut term = Terminal::new(TestBackend::new(40, 10)).expect("term");
term.draw(|f| overlay.render(f.area(), f.buffer_mut()))
@@ -612,15 +573,8 @@ mod tests {
#[test]
fn transcript_overlay_keeps_scroll_pinned_at_bottom() {
let mut overlay = TranscriptOverlay::new(
(0..20)
.map(|i| {
Arc::new(TestCell {
lines: vec![Line::from(format!("line{i}"))],
}) as Arc<dyn HistoryCell>
})
.collect(),
);
let mut overlay =
TranscriptOverlay::new((0..20).map(|i| Line::from(format!("line{i}"))).collect());
let mut term = Terminal::new(TestBackend::new(40, 12)).expect("term");
term.draw(|f| overlay.render(f.area(), f.buffer_mut()))
.expect("draw");
@@ -630,33 +584,22 @@ mod tests {
"expected initial render to leave view at bottom"
);
overlay.insert_cell(Arc::new(TestCell {
lines: vec!["tail".into()],
}));
overlay.insert_lines(vec!["tail".into()]);
assert_eq!(overlay.view.scroll_offset, usize::MAX);
}
#[test]
fn transcript_overlay_preserves_manual_scroll_position() {
let mut overlay = TranscriptOverlay::new(
(0..20)
.map(|i| {
Arc::new(TestCell {
lines: vec![Line::from(format!("line{i}"))],
}) as Arc<dyn HistoryCell>
})
.collect(),
);
let mut overlay =
TranscriptOverlay::new((0..20).map(|i| Line::from(format!("line{i}"))).collect());
let mut term = Terminal::new(TestBackend::new(40, 12)).expect("term");
term.draw(|f| overlay.render(f.area(), f.buffer_mut()))
.expect("draw");
overlay.view.scroll_offset = 0;
overlay.insert_cell(Arc::new(TestCell {
lines: vec!["tail".into()],
}));
overlay.insert_lines(vec!["tail".into()]);
assert_eq!(overlay.view.scroll_offset, 0);
}
@@ -677,21 +620,17 @@ mod tests {
#[test]
fn pager_wrap_cache_reuses_for_same_width_and_rebuilds_on_change() {
let long = "This is a long line that should wrap multiple times to ensure non-empty wrapped output.";
let mut pv = PagerView::new(
vec![Text::from(vec![long.into()]), Text::from(vec![long.into()])],
"T".to_string(),
0,
);
let mut pv = PagerView::new(vec![long.into(), long.into()], "T".to_string(), 0);
// Build cache at width 24
pv.ensure_wrapped(24);
let w1 = pv.cached();
let (w1, _) = pv.cached();
assert!(!w1.is_empty(), "expected wrapped output to be non-empty");
let ptr1 = w1.as_ptr();
// Re-run with same width: cache should be reused (pointer stability heuristic)
pv.ensure_wrapped(24);
let w2 = pv.cached();
let (w2, _) = pv.cached();
let ptr2 = w2.as_ptr();
assert_eq!(ptr1, ptr2, "cache should not rebuild for unchanged width");
@@ -699,7 +638,7 @@ mod tests {
// Drop immutable borrow before mutating
let prev_len = w2.len();
pv.ensure_wrapped(36);
let w3 = pv.cached();
let (w3, _) = pv.cached();
assert_ne!(
prev_len,
w3.len(),
@@ -710,16 +649,15 @@ mod tests {
#[test]
fn pager_wrap_cache_invalidates_on_append() {
let long = "Another long line for wrapping behavior verification.";
let mut pv = PagerView::new(vec![Text::from(vec![long.into()])], "T".to_string(), 0);
let mut pv = PagerView::new(vec![long.into()], "T".to_string(), 0);
pv.ensure_wrapped(28);
let w1 = pv.cached();
let (w1, _) = pv.cached();
let len1 = w1.len();
// Append new lines should cause ensure_wrapped to rebuild due to len change
pv.texts.push(Text::from(vec![long.into()]));
pv.texts.push(Text::from(vec![long.into()]));
pv.lines.extend([long.into(), long.into()]);
pv.ensure_wrapped(28);
let w2 = pv.cached();
let (w2, _) = pv.cached();
assert!(
w2.len() >= len1,
"wrapped length should grow or stay same after append"

View File

@@ -2,7 +2,7 @@
source: tui/src/history_cell.rs
expression: rendered
---
one two
three four
five six
seven
▌one two
▌three four
▌five six
▌seven

View File

@@ -4,10 +4,10 @@ expression: term.backend()
---
"/ T R A N S C R I P T / / / / / / / / / "
"alpha "
" "
"beta "
" "
"gamma "
"~ "
"~ "
"───────────────────────────────── 100% ─"
" ↑/↓ scroll PgUp/PgDn page Home/End "
" q quit Esc edit prev "

View File

@@ -2,5 +2,5 @@
source: tui/src/status_indicator_widget.rs
expression: terminal.backend()
---
" Working (0s • Esc "
" Working (0s • Esc t"
" "

View File

@@ -2,11 +2,11 @@
source: tui/src/status_indicator_widget.rs
expression: terminal.backend()
---
" Working (0s • Esc to interrupt) "
" "
" Working (0s • Esc to interrupt) "
" ↳ first "
" ↳ second "
" ⌥↑ edit "
" "
" "
" "
" "

View File

@@ -2,5 +2,5 @@
source: tui/src/status_indicator_widget.rs
expression: terminal.backend()
---
" Working (0s • Esc to interrupt) "
" Working (0s • Esc to interrupt) "
" "

View File

@@ -17,7 +17,6 @@ use crate::app_event_sender::AppEventSender;
use crate::key_hint;
use crate::shimmer::shimmer_spans;
use crate::tui::FrameRequester;
use crate::ui_consts::LIVE_PREFIX_COLS;
pub(crate) struct StatusIndicatorWidget {
/// Animated header text (defaults to "Working").
@@ -33,7 +32,7 @@ pub(crate) struct StatusIndicatorWidget {
}
// Format elapsed seconds into a compact human-friendly form used by the status line.
// Examples: 0s, 59s, 1m 00s, 59m 59s, 1h 00m 00s, 2h 03m 09s
// Examples: 0s, 59s, 1m00s, 59m59s, 1h00m00s, 2h03m09s
fn fmt_elapsed_compact(elapsed_secs: u64) -> String {
if elapsed_secs < 60 {
return format!("{elapsed_secs}s");
@@ -41,12 +40,12 @@ fn fmt_elapsed_compact(elapsed_secs: u64) -> String {
if elapsed_secs < 3600 {
let minutes = elapsed_secs / 60;
let seconds = elapsed_secs % 60;
return format!("{minutes}m {seconds:02}s");
return format!("{minutes}m{seconds:02}s");
}
let hours = elapsed_secs / 3600;
let minutes = (elapsed_secs % 3600) / 60;
let seconds = elapsed_secs % 60;
format!("{hours}h {minutes:02}m {seconds:02}s")
format!("{hours}h{minutes:02}m{seconds:02}s")
}
impl StatusIndicatorWidget {
@@ -64,13 +63,10 @@ impl StatusIndicatorWidget {
}
pub fn desired_height(&self, width: u16) -> u16 {
// Status line + optional blank line + wrapped queued messages (up to 3 lines per message)
// Status line + wrapped queued messages (up to 3 lines per message)
// + optional ellipsis line per truncated message + 1 spacer line
let inner_width = width.max(1) as usize;
let mut total: u16 = 1; // status line
if !self.queued_messages.is_empty() {
total = total.saturating_add(1); // blank line between status and queued messages
}
let text_width = inner_width.saturating_sub(3); // account for " ↳ " prefix
if text_width > 0 {
for q in &self.queued_messages {
@@ -160,7 +156,7 @@ impl WidgetRef for StatusIndicatorWidget {
let pretty_elapsed = fmt_elapsed_compact(elapsed);
// Plain rendering: no borders or padding so the live cell is visually indistinguishable from terminal scrollback.
let mut spans = vec![" ".repeat(LIVE_PREFIX_COLS as usize).into()];
let mut spans = vec![" ".into()];
spans.extend(shimmer_spans(&self.header));
spans.extend(vec![
" ".into(),
@@ -172,9 +168,6 @@ impl WidgetRef for StatusIndicatorWidget {
// Build lines: status, then queued messages, then spacer.
let mut lines: Vec<Line<'static>> = Vec::new();
lines.push(Line::from(spans));
if !self.queued_messages.is_empty() {
lines.push(Line::from(""));
}
// Wrap queued messages using textwrap and show up to the first 3 lines per message.
let text_width = area.width.saturating_sub(3); // " ↳ " prefix
for q in &self.queued_messages {
@@ -216,13 +209,13 @@ mod tests {
assert_eq!(fmt_elapsed_compact(0), "0s");
assert_eq!(fmt_elapsed_compact(1), "1s");
assert_eq!(fmt_elapsed_compact(59), "59s");
assert_eq!(fmt_elapsed_compact(60), "1m 00s");
assert_eq!(fmt_elapsed_compact(61), "1m 01s");
assert_eq!(fmt_elapsed_compact(3 * 60 + 5), "3m 05s");
assert_eq!(fmt_elapsed_compact(59 * 60 + 59), "59m 59s");
assert_eq!(fmt_elapsed_compact(3600), "1h 00m 00s");
assert_eq!(fmt_elapsed_compact(3600 + 60 + 1), "1h 01m 01s");
assert_eq!(fmt_elapsed_compact(25 * 3600 + 2 * 60 + 3), "25h 02m 03s");
assert_eq!(fmt_elapsed_compact(60), "1m00s");
assert_eq!(fmt_elapsed_compact(61), "1m01s");
assert_eq!(fmt_elapsed_compact(3 * 60 + 5), "3m05s");
assert_eq!(fmt_elapsed_compact(59 * 60 + 59), "59m59s");
assert_eq!(fmt_elapsed_compact(3600), "1h00m00s");
assert_eq!(fmt_elapsed_compact(3600 + 60 + 1), "1h01m01s");
assert_eq!(fmt_elapsed_compact(25 * 3600 + 2 * 60 + 3), "25h02m03s");
}
#[test]

View File

@@ -1,10 +0,0 @@
//! Shared UI constants for layout and alignment within the TUI.
/// Width (in terminal columns) reserved for the left gutter/prefix used by
/// live cells and aligned widgets.
///
/// Semantics:
/// - Chat composer reserves this many columns for the left border + padding.
/// - Status indicator lines begin with this many spaces for alignment.
/// - User history lines account for this many columns (e.g., "▌ ") when wrapping.
pub(crate) const LIVE_PREFIX_COLS: u16 = 2;

View File

@@ -54,22 +54,22 @@ Send a `tools/list` request and you will see that there are two tools available:
Property | Type | Description
-------------------|----------|----------------------------------------------------------------------------------------------------------
**`prompt`** (required) | string | The initial user prompt to start the Codex conversation.
`approval-policy` | string | Approval policy for shell commands generated by the model: `untrusted`, `on-failure`, `never`.
`base-instructions` | string | The set of instructions to use instead of the default ones.
`config` | object | Individual [config settings](https://github.com/openai/codex/blob/main/docs/config.md#config) that will override what is in `$CODEX_HOME/config.toml`.
`cwd` | string | Working directory for the session. If relative, resolved against the server process's current directory.
`include-plan-tool` | boolean | Whether to include the plan tool in the conversation.
`model` | string | Optional override for the model name (e.g. `o3`, `o4-mini`).
`profile` | string | Configuration profile from `config.toml` to specify default options.
`sandbox` | string | Sandbox mode: `read-only`, `workspace-write`, or `danger-full-access`.
approval-policy | string | Approval policy for shell commands generated by the model: `untrusted`, `on-failure`, `never`.
base-instructions | string | The set of instructions to use instead of the default ones.
config | object | Individual [config settings](https://github.com/openai/codex/blob/main/docs/config.md#config) that will override what is in CODEX_HOME/config.toml.
cwd | string | Working directory for the session. If relative, resolved against the server process's current directory.
include-plan-tool | boolean | Whether to include the plan tool in the conversation.
model | string | Optional override for the model name (e.g. "o3", "o4-mini").
profile | string | Configuration profile from config.toml to specify default options.
**prompt** (required) | string | The initial user prompt to start the Codex conversation.
sandbox | string | Sandbox mode: `read-only`, `workspace-write`, or `danger-full-access`.
**`codex-reply`** - Continue a Codex session by providing the conversation id and prompt. The `codex-reply` tool takes the following properties:
**`codex-reply`** - Continue a Codex session by providing the session id and prompt. The `codex-reply` tool takes the following properties:
Property | Type | Description
-----------|--------|---------------------------------------------------------------
**`prompt`** (required) | string | The next user prompt to continue the Codex conversation.
**`conversationId`** (required) | string | The id of the conversation to continue.
**prompt** (required) | string | The next user prompt to continue the Codex conversation.
**conversationId** (required) | string | The id of the conversation to continue.
### Trying it Out
> [!TIP]