Compare commits

..

1 Commits

Author SHA1 Message Date
Dylan Hurd
72d8bde988 [apply-patch] Handle multiple context lines 2025-08-25 15:09:37 -07:00
41 changed files with 493 additions and 1458 deletions

1
.gitignore vendored
View File

@@ -81,4 +81,3 @@ CHANGELOG.ignore.md
# nix related
.direnv
.envrc
scripts/releases/

View File

@@ -11,7 +11,6 @@ In the codex-rs folder where the rust code lives:
Before finalizing a change to `codex-rs`, run `just fmt` (in `codex-rs` directory) to format the code and `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Additionally, run the tests:
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
When running interactively, ask the user before running these commands to finalize.
## TUI style conventions

1
codex-rs/Cargo.lock generated
View File

@@ -1001,7 +1001,6 @@ dependencies = [
"tui-markdown",
"unicode-segmentation",
"unicode-width 0.1.14",
"url",
"uuid",
"vt100",
]

View File

@@ -63,22 +63,6 @@ codex completion zsh
codex completion fish
```
### Custom Prompts
Save frequently used prompts as Markdown files and reuse them quickly from the slash menu.
- Location: Put files in `$CODEX_HOME/prompts/` (defaults to `~/.codex/prompts/`).
- File type: Only Markdown files with the `.md` extension are recognized.
- Name: The filename without the `.md` extension becomes the slash entry. For a file named `my-prompt.md`, type `/my-prompt`.
- Content: The file contents are sent as your message when you select the item in the slash popup and press Enter.
- How to use:
- Start a new session (Codex loads custom prompts on session start).
- In the composer, type `/` to open the slash popup and begin typing your prompt name.
- Use Up/Down to select it. Press Enter to submit its contents, or Tab to autocomplete the name.
- Notes:
- Files with names that collide with builtin commands (e.g. `/init`) are ignored and wont appear.
- New or changed files are discovered on session start. If you add a new prompt while Codex is running, start a new session to pick it up.
### Experimenting with the Codex Sandbox
To test to see what happens when a command is run under the sandbox provided by Codex, we provide the following subcommands in Codex CLI:

View File

@@ -532,32 +532,51 @@ fn compute_replacements(
let mut line_index: usize = 0;
for chunk in chunks {
// If a chunk has a `change_context`, we use seek_sequence to find it, then
// adjust our `line_index` to continue from there.
if let Some(ctx_line) = &chunk.change_context {
if let Some(idx) = seek_sequence::seek_sequence(
original_lines,
std::slice::from_ref(ctx_line),
line_index,
false,
) {
line_index = idx + 1;
} else {
return Err(ApplyPatchError::ComputeReplacements(format!(
"Failed to find context '{}' in {}",
ctx_line,
path.display()
)));
// If a chunk has context lines, we use seek_sequence to find each in order,
// then adjust our `line_index` to continue from there.
if !chunk.context_lines.is_empty() {
let total = chunk.context_lines.len();
for (i, ctx_line) in chunk.context_lines.iter().enumerate() {
if let Some(idx) = seek_sequence::seek_sequence(
original_lines,
std::slice::from_ref(ctx_line),
line_index,
false,
) {
line_index = idx + 1;
} else {
return Err(ApplyPatchError::ComputeReplacements(format!(
"Failed to find context {}/{}: '{}' in {}",
i + 1,
total,
ctx_line,
path.display()
)));
}
}
}
if chunk.old_lines.is_empty() {
// Pure addition (no old lines). We'll add them at the end or just
// before the final empty line if one exists.
let insertion_idx = if original_lines.last().is_some_and(|s| s.is_empty()) {
original_lines.len() - 1
// Pure addition (no old lines).
// Prefer to insert at the matched context anchor if one exists and
// the hunk is not explicitly marked as end-of-file.
let insertion_idx = if chunk.is_end_of_file {
if original_lines.last().is_some_and(|s| s.is_empty()) {
original_lines.len() - 1
} else {
original_lines.len()
}
} else if !chunk.context_lines.is_empty() {
// Insert immediately after the last matched context line.
line_index
} else {
original_lines.len()
// No context provided: fall back to appending at the end (before
// the trailing empty line if present).
if original_lines.last().is_some_and(|s| s.is_empty()) {
original_lines.len() - 1
} else {
original_lines.len()
}
};
replacements.push((insertion_idx, 0, chunk.new_lines.clone()));
continue;
@@ -1270,6 +1289,57 @@ g
);
}
#[test]
fn test_insert_addition_after_single_context_anchor() {
let dir = tempdir().unwrap();
let path = dir.path().join("single_ctx.txt");
fs::write(&path, "class BaseClass:\n def method():\nline1\nline2\n").unwrap();
let patch = wrap_patch(&format!(
r#"*** Update File: {}
@@ class BaseClass:
+INSERTED
"#,
path.display()
));
let mut stdout = Vec::new();
let mut stderr = Vec::new();
apply_patch(&patch, &mut stdout, &mut stderr).unwrap();
let contents = fs::read_to_string(path).unwrap();
assert_eq!(
contents,
"class BaseClass:\nINSERTED\n def method():\nline1\nline2\n"
);
}
#[test]
fn test_insert_addition_after_multi_context_anchor() {
let dir = tempdir().unwrap();
let path = dir.path().join("multi_ctx.txt");
fs::write(&path, "class BaseClass:\n def method():\nline1\nline2\n").unwrap();
let patch = wrap_patch(&format!(
r#"*** Update File: {}
@@ class BaseClass:
@@ def method():
+INSERTED
"#,
path.display()
));
let mut stdout = Vec::new();
let mut stderr = Vec::new();
apply_patch(&patch, &mut stdout, &mut stderr).unwrap();
let contents = fs::read_to_string(path).unwrap();
assert_eq!(
contents,
"class BaseClass:\n def method():\nINSERTED\nline1\nline2\n"
);
}
#[test]
fn test_apply_patch_should_resolve_absolute_paths_in_cwd() {
let session_dir = tempdir().unwrap();

View File

@@ -69,7 +69,7 @@ pub enum Hunk {
path: PathBuf,
move_path: Option<PathBuf>,
/// Chunks should be in order, i.e. the `change_context` of one chunk
/// Chunks should be in order, i.e. the first context line of one chunk
/// should occur later in the file than the previous chunk.
chunks: Vec<UpdateFileChunk>,
},
@@ -89,12 +89,13 @@ use Hunk::*;
#[derive(Debug, PartialEq, Clone)]
pub struct UpdateFileChunk {
/// A single line of context used to narrow down the position of the chunk
/// (this is usually a class, method, or function definition.)
pub change_context: Option<String>,
/// Context lines used to narrow down the position of the chunk.
/// Each entry is searched sequentially to progressively restrict the
/// search to the desired region (e.g. class → method).
pub context_lines: Vec<String>,
/// A contiguous block of lines that should be replaced with `new_lines`.
/// `old_lines` must occur strictly after `change_context`.
/// `old_lines` must occur strictly after the context.
pub old_lines: Vec<String>,
pub new_lines: Vec<String>,
@@ -344,32 +345,38 @@ fn parse_update_file_chunk(
line_number,
});
}
// If we see an explicit context marker @@ or @@ <context>, consume it; otherwise, optionally
// allow treating the chunk as starting directly with diff lines.
let (change_context, start_index) = if lines[0] == EMPTY_CHANGE_CONTEXT_MARKER {
(None, 1)
} else if let Some(context) = lines[0].strip_prefix(CHANGE_CONTEXT_MARKER) {
(Some(context.to_string()), 1)
} else {
if !allow_missing_context {
return Err(InvalidHunkError {
message: format!(
"Expected update hunk to start with a @@ context marker, got: '{}'",
lines[0]
),
line_number,
});
let mut context_lines = Vec::new();
let mut start_index = 0;
let mut saw_context_marker = false;
while start_index < lines.len() {
if lines[start_index] == EMPTY_CHANGE_CONTEXT_MARKER {
saw_context_marker = true;
start_index += 1;
} else if let Some(context) = lines[start_index].strip_prefix(CHANGE_CONTEXT_MARKER) {
saw_context_marker = true;
context_lines.push(context.to_string());
start_index += 1;
} else {
break;
}
(None, 0)
};
}
if !saw_context_marker && !allow_missing_context {
return Err(InvalidHunkError {
message: format!(
"Expected update hunk to start with a @@ context marker, got: '{}'",
lines[0]
),
line_number,
});
}
if start_index >= lines.len() {
return Err(InvalidHunkError {
message: "Update hunk does not contain any lines".to_string(),
line_number: line_number + 1,
line_number: line_number + start_index,
});
}
let mut chunk = UpdateFileChunk {
change_context,
context_lines,
old_lines: Vec::new(),
new_lines: Vec::new(),
is_end_of_file: false,
@@ -381,7 +388,7 @@ fn parse_update_file_chunk(
if parsed_lines == 0 {
return Err(InvalidHunkError {
message: "Update hunk does not contain any lines".to_string(),
line_number: line_number + 1,
line_number: line_number + start_index,
});
}
chunk.is_end_of_file = true;
@@ -411,7 +418,7 @@ fn parse_update_file_chunk(
message: format!(
"Unexpected line found in update hunk: '{line_contents}'. Every line should start with ' ' (context line), '+' (added line), or '-' (removed line)"
),
line_number: line_number + 1,
line_number: line_number + start_index,
});
}
// Assume this is the start of the next hunk.
@@ -491,7 +498,7 @@ fn test_parse_patch() {
path: PathBuf::from("path/update.py"),
move_path: Some(PathBuf::from("path/update2.py")),
chunks: vec![UpdateFileChunk {
change_context: Some("def f():".to_string()),
context_lines: vec!["def f():".to_string()],
old_lines: vec![" pass".to_string()],
new_lines: vec![" return 123".to_string()],
is_end_of_file: false
@@ -518,7 +525,7 @@ fn test_parse_patch() {
path: PathBuf::from("file.py"),
move_path: None,
chunks: vec![UpdateFileChunk {
change_context: None,
context_lines: Vec::new(),
old_lines: vec![],
new_lines: vec!["line".to_string()],
is_end_of_file: false
@@ -548,7 +555,7 @@ fn test_parse_patch() {
path: PathBuf::from("file2.py"),
move_path: None,
chunks: vec![UpdateFileChunk {
change_context: None,
context_lines: Vec::new(),
old_lines: vec!["import foo".to_string()],
new_lines: vec!["import foo".to_string(), "bar".to_string()],
is_end_of_file: false,
@@ -568,7 +575,7 @@ fn test_parse_patch_lenient() {
path: PathBuf::from("file2.py"),
move_path: None,
chunks: vec![UpdateFileChunk {
change_context: None,
context_lines: Vec::new(),
old_lines: vec!["import foo".to_string()],
new_lines: vec!["import foo".to_string(), "bar".to_string()],
is_end_of_file: false,
@@ -701,7 +708,7 @@ fn test_update_file_chunk() {
),
Ok((
(UpdateFileChunk {
change_context: Some("change_context".to_string()),
context_lines: vec!["change_context".to_string()],
old_lines: vec![
"".to_string(),
"context".to_string(),
@@ -723,7 +730,7 @@ fn test_update_file_chunk() {
parse_update_file_chunk(&["@@", "+line", "*** End of File"], 123, false),
Ok((
(UpdateFileChunk {
change_context: None,
context_lines: Vec::new(),
old_lines: vec![],
new_lines: vec!["line".to_string()],
is_end_of_file: true
@@ -731,4 +738,29 @@ fn test_update_file_chunk() {
3
))
);
assert_eq!(
parse_update_file_chunk(
&[
"@@ class BaseClass",
"@@ def method()",
" context",
"-old",
"+new",
],
123,
false
),
Ok((
(UpdateFileChunk {
context_lines: vec![
"class BaseClass".to_string(),
" def method()".to_string()
],
old_lines: vec!["context".to_string(), "old".to_string()],
new_lines: vec!["context".to_string(), "new".to_string()],
is_end_of_file: false
}),
5
))
);
}

View File

@@ -1,5 +1,6 @@
use std::io::BufRead;
use std::path::Path;
use std::sync::OnceLock;
use std::time::Duration;
use bytes::Bytes;
@@ -7,6 +8,7 @@ use codex_login::AuthManager;
use codex_login::AuthMode;
use eventsource_stream::Eventsource;
use futures::prelude::*;
use regex_lite::Regex;
use reqwest::StatusCode;
use serde::Deserialize;
use serde::Serialize;
@@ -52,11 +54,8 @@ struct ErrorResponse {
#[derive(Debug, Deserialize)]
struct Error {
r#type: Option<String>,
code: Option<String>,
message: Option<String>,
// Optional fields available on "usage_limit_reached" and "usage_not_included" errors
plan_type: Option<String>,
resets_in_seconds: Option<u64>,
}
#[derive(Debug, Clone)]
@@ -143,12 +142,9 @@ impl ModelClient {
}
let auth_manager = self.auth_manager.clone();
let auth = auth_manager.as_ref().and_then(|m| m.auth());
let auth_mode = auth_manager
.as_ref()
.and_then(|m| m.auth())
.as_ref()
.map(|a| a.mode);
let auth_mode = auth.as_ref().map(|a| a.mode);
let store = prompt.store && auth_mode != Some(AuthMode::ChatGPT);
@@ -215,18 +211,15 @@ impl ModelClient {
let mut attempt = 0;
let max_retries = self.provider.request_max_retries();
trace!(
"POST to {}: {}",
self.provider.get_full_url(&auth),
serde_json::to_string(&payload)?
);
loop {
attempt += 1;
// Always fetch the latest auth in case a prior attempt refreshed the token.
let auth = auth_manager.as_ref().and_then(|m| m.auth());
trace!(
"POST to {}: {}",
self.provider.get_full_url(&auth),
serde_json::to_string(&payload)?
);
let mut req_builder = self
.provider
.create_request_builder(&self.client, &auth)
@@ -310,20 +303,19 @@ impl ModelClient {
if status == StatusCode::TOO_MANY_REQUESTS {
let body = res.json::<ErrorResponse>().await.ok();
if let Some(ErrorResponse { error }) = body {
if error.r#type.as_deref() == Some("usage_limit_reached") {
// Prefer the plan_type provided in the error message if present
// because it's more up to date than the one encoded in the auth
// token.
let plan_type = error
.plan_type
.or_else(|| auth.and_then(|a| a.get_plan_type()));
let resets_in_seconds = error.resets_in_seconds;
if let Some(ErrorResponse {
error:
Error {
r#type: Some(error_type),
..
},
}) = body
{
if error_type == "usage_limit_reached" {
return Err(CodexErr::UsageLimitReached(UsageLimitReachedError {
plan_type,
resets_in_seconds,
plan_type: auth.and_then(|a| a.get_plan_type()),
}));
} else if error.r#type.as_deref() == Some("usage_not_included") {
} else if error_type == "usage_not_included" {
return Err(CodexErr::UsageNotIncluded);
}
}
@@ -571,8 +563,9 @@ async fn process_sse<S>(
if let Some(error) = error {
match serde_json::from_value::<Error>(error.clone()) {
Ok(error) => {
let delay = try_parse_retry_after(&error);
let message = error.message.unwrap_or_default();
response_error = Some(CodexErr::Stream(message, None));
response_error = Some(CodexErr::Stream(message, delay));
}
Err(e) => {
debug!("failed to parse ErrorResponse: {e}");
@@ -660,6 +653,40 @@ async fn stream_from_fixture(
Ok(ResponseStream { rx_event })
}
fn rate_limit_regex() -> &'static Regex {
static RE: OnceLock<Regex> = OnceLock::new();
#[expect(clippy::unwrap_used)]
RE.get_or_init(|| Regex::new(r"Please try again in (\d+(?:\.\d+)?)(s|ms)").unwrap())
}
fn try_parse_retry_after(err: &Error) -> Option<Duration> {
if err.code != Some("rate_limit_exceeded".to_string()) {
return None;
}
// parse the Please try again in 1.898s format using regex
let re = rate_limit_regex();
if let Some(message) = &err.message
&& let Some(captures) = re.captures(message)
{
let seconds = captures.get(1);
let unit = captures.get(2);
if let (Some(value), Some(unit)) = (seconds, unit) {
let value = value.as_str().parse::<f64>().ok()?;
let unit = unit.as_str();
if unit == "s" {
return Some(Duration::from_secs_f64(value));
} else if unit == "ms" {
return Some(Duration::from_millis(value as u64));
}
}
}
None
}
#[cfg(test)]
mod tests {
use super::*;
@@ -880,7 +907,7 @@ mod tests {
msg,
"Rate limit reached for gpt-5 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."
);
assert_eq!(*delay, None);
assert_eq!(*delay, Some(Duration::from_secs_f64(11.054)));
}
other => panic!("unexpected second event: {other:?}"),
}
@@ -984,4 +1011,27 @@ mod tests {
);
}
}
#[test]
fn test_try_parse_retry_after() {
let err = Error {
r#type: None,
message: Some("Rate limit reached for gpt-5 in organization org- on tokens per min (TPM): Limit 1, Used 1, Requested 19304. Please try again in 28ms. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()),
code: Some("rate_limit_exceeded".to_string()),
};
let delay = try_parse_retry_after(&err);
assert_eq!(delay, Some(Duration::from_millis(28)));
}
#[test]
fn test_try_parse_retry_after_no_delay() {
let err = Error {
r#type: None,
message: Some("Rate limit reached for gpt-5 in organization <ORG> on tokens per min (TPM): Limit 30000, Used 6899, Requested 24050. Please try again in 1.898s. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()),
code: Some("rate_limit_exceeded".to_string()),
};
let delay = try_parse_retry_after(&err);
assert_eq!(delay, Some(Duration::from_secs_f64(1.898)));
}
}

View File

@@ -108,7 +108,6 @@ use crate::user_notification::UserNotification;
use crate::util::backoff;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::custom_prompts::CustomPrompt;
use codex_protocol::models::ContentItem;
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::LocalShellAction;
@@ -1153,7 +1152,6 @@ async fn submission_loop(
if let Err(items) = sess.inject_input(items) {
// Derive a fresh TurnContext for this turn using the provided overrides.
let provider = turn_context.client.get_provider();
let auth_manager = turn_context.client.get_auth_manager();
// Derive a model family for the requested model; fall back to the session's.
let model_family = find_family_for_model(&model)
@@ -1168,7 +1166,7 @@ async fn submission_loop(
// Reuse the same provider and session id; auth defaults to env/API key.
let client = ModelClient::new(
Arc::new(per_turn_config),
auth_manager,
None,
provider,
effort,
summary,
@@ -1276,31 +1274,6 @@ async fn submission_loop(
warn!("failed to send McpListToolsResponse event: {e}");
}
}
Op::ListCustomPrompts => {
let tx_event = sess.tx_event.clone();
let sub_id = sub.id.clone();
// Discover prompts under the default prompts dir (includes content).
let custom_prompts: Vec<CustomPrompt> =
tokio::task::spawn_blocking(
|| match crate::custom_prompts::default_prompts_dir() {
Some(dir) => crate::custom_prompts::discover_prompts_in(&dir),
None => Vec::new(),
},
)
.await
.unwrap_or_default();
let event = Event {
id: sub_id,
msg: EventMsg::ListCustomPromptsResponse(
crate::protocol::ListCustomPromptsResponseEvent { custom_prompts },
),
};
if let Err(e) = tx_event.send(event).await {
warn!("failed to send ListCustomPromptsResponse event: {e}");
}
}
Op::Compact => {
// Create a summarization request as user input
const SUMMARIZATION_PROMPT: &str = include_str!("prompt_for_compact_command.md");

View File

@@ -1,95 +0,0 @@
use codex_protocol::custom_prompts::CustomPrompt;
use std::collections::HashSet;
use std::path::Path;
use std::path::PathBuf;
/// Return the default prompts directory: `$CODEX_HOME/prompts`.
/// If `CODEX_HOME` cannot be resolved, returns `None`.
pub fn default_prompts_dir() -> Option<PathBuf> {
crate::config::find_codex_home()
.ok()
.map(|home| home.join("prompts"))
}
/// Discover prompt files in the given directory, returning entries sorted by name.
/// Non-files are ignored. If the directory does not exist or cannot be read, returns empty.
pub fn discover_prompts_in(dir: &Path) -> Vec<CustomPrompt> {
discover_prompts_in_excluding(dir, &HashSet::new())
}
/// Discover prompt files in the given directory, excluding any with names in `exclude`.
/// Returns entries sorted by name. Non-files are ignored. Missing/unreadable dir yields empty.
pub fn discover_prompts_in_excluding(dir: &Path, exclude: &HashSet<String>) -> Vec<CustomPrompt> {
let mut out: Vec<CustomPrompt> = Vec::new();
if let Ok(entries) = std::fs::read_dir(dir) {
for entry in entries.flatten() {
let path = entry.path();
if !path.is_file() {
continue;
}
// Only include Markdown files with a .md extension.
let is_md = path
.extension()
.and_then(|s| s.to_str())
.map(|ext| ext.eq_ignore_ascii_case("md"))
.unwrap_or(false);
if !is_md {
continue;
}
let Some(name) = path
.file_stem()
.and_then(|s| s.to_str())
.map(|s| s.to_string())
else {
continue;
};
if exclude.contains(&name) {
continue;
}
let content = std::fs::read_to_string(&path).unwrap_or_default();
out.push(CustomPrompt { name, content });
}
out.sort_by(|a, b| a.name.cmp(&b.name));
}
out
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use tempfile::tempdir;
#[test]
fn empty_when_dir_missing() {
let tmp = tempdir().expect("create TempDir");
let missing = tmp.path().join("nope");
let found = discover_prompts_in(&missing);
assert!(found.is_empty());
}
#[test]
fn discovers_and_sorts_files() {
let tmp = tempdir().expect("create TempDir");
let dir = tmp.path();
fs::write(dir.join("b.md"), b"b").unwrap();
fs::write(dir.join("a.md"), b"a").unwrap();
fs::create_dir(dir.join("subdir")).unwrap();
let found = discover_prompts_in(dir);
let names: Vec<String> = found.into_iter().map(|e| e.name).collect();
assert_eq!(names, vec!["a", "b"]);
}
#[test]
fn excludes_builtins() {
let tmp = tempdir().expect("create TempDir");
let dir = tmp.path();
fs::write(dir.join("init.md"), b"ignored").unwrap();
fs::write(dir.join("foo.md"), b"ok").unwrap();
let mut exclude = HashSet::new();
exclude.insert("init".to_string());
let found = discover_prompts_in_excluding(dir, &exclude);
let names: Vec<String> = found.into_iter().map(|e| e.name).collect();
assert_eq!(names, vec!["foo"]);
}
}

View File

@@ -128,70 +128,27 @@ pub enum CodexErr {
#[derive(Debug)]
pub struct UsageLimitReachedError {
pub plan_type: Option<String>,
pub resets_in_seconds: Option<u64>,
}
impl std::fmt::Display for UsageLimitReachedError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Base message differs slightly for legacy ChatGPT Plus plan users.
if let Some(plan_type) = &self.plan_type
&& plan_type == "plus"
{
write!(
f,
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing) or try again"
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), or wait for limits to reset (every 5h and every week.)."
)?;
if let Some(secs) = self.resets_in_seconds {
let reset_duration = format_reset_duration(secs);
write!(f, " in {reset_duration}.")?;
} else {
write!(f, " later.")?;
}
} else {
write!(f, "You've hit your usage limit.")?;
if let Some(secs) = self.resets_in_seconds {
let reset_duration = format_reset_duration(secs);
write!(f, " Try again in {reset_duration}.")?;
} else {
write!(f, " Try again later.")?;
}
write!(
f,
"You've hit your usage limit. Limits reset every 5h and every week."
)?;
}
Ok(())
}
}
fn format_reset_duration(total_secs: u64) -> String {
let days = total_secs / 86_400;
let hours = (total_secs % 86_400) / 3_600;
let minutes = (total_secs % 3_600) / 60;
let mut parts: Vec<String> = Vec::new();
if days > 0 {
let unit = if days == 1 { "day" } else { "days" };
parts.push(format!("{} {}", days, unit));
}
if hours > 0 {
let unit = if hours == 1 { "hour" } else { "hours" };
parts.push(format!("{} {}", hours, unit));
}
if minutes > 0 {
let unit = if minutes == 1 { "minute" } else { "minutes" };
parts.push(format!("{} {}", minutes, unit));
}
if parts.is_empty() {
return "less than a minute".to_string();
}
match parts.len() {
1 => parts[0].clone(),
2 => format!("{} {}", parts[0], parts[1]),
_ => format!("{} {} {}", parts[0], parts[1], parts[2]),
}
}
#[derive(Debug)]
pub struct EnvVarError {
/// Name of the environment variable that is missing.
@@ -224,8 +181,6 @@ impl CodexErr {
pub fn get_error_message_ui(e: &CodexErr) -> String {
match e {
CodexErr::Sandbox(SandboxErr::Denied(_, _, stderr)) => stderr.to_string(),
// Timeouts are not sandbox errors from a UX perspective; present them plainly
CodexErr::Sandbox(SandboxErr::Timeout) => "error: command timed out".to_string(),
_ => e.to_string(),
}
}
@@ -238,23 +193,19 @@ mod tests {
fn usage_limit_reached_error_formats_plus_plan() {
let err = UsageLimitReachedError {
plan_type: Some("plus".to_string()),
resets_in_seconds: None,
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing) or try again later."
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), or wait for limits to reset (every 5h and every week.)."
);
}
#[test]
fn usage_limit_reached_error_formats_default_when_none() {
let err = UsageLimitReachedError {
plan_type: None,
resets_in_seconds: None,
};
let err = UsageLimitReachedError { plan_type: None };
assert_eq!(
err.to_string(),
"You've hit your usage limit. Try again later."
"You've hit your usage limit. Limits reset every 5h and every week."
);
}
@@ -262,59 +213,10 @@ mod tests {
fn usage_limit_reached_error_formats_default_for_other_plans() {
let err = UsageLimitReachedError {
plan_type: Some("pro".to_string()),
resets_in_seconds: None,
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. Try again later."
);
}
#[test]
fn usage_limit_reached_includes_minutes_when_available() {
let err = UsageLimitReachedError {
plan_type: None,
resets_in_seconds: Some(5 * 60),
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. Try again in 5 minutes."
);
}
#[test]
fn usage_limit_reached_includes_hours_and_minutes() {
let err = UsageLimitReachedError {
plan_type: Some("plus".to_string()),
resets_in_seconds: Some(3 * 3600 + 32 * 60),
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing) or try again in 3 hours 32 minutes."
);
}
#[test]
fn usage_limit_reached_includes_days_hours_minutes() {
let err = UsageLimitReachedError {
plan_type: None,
resets_in_seconds: Some(2 * 86_400 + 3 * 3600 + 5 * 60),
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. Try again in 2 days 3 hours 5 minutes."
);
}
#[test]
fn usage_limit_reached_less_than_minute() {
let err = UsageLimitReachedError {
plan_type: None,
resets_in_seconds: Some(30),
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. Try again in less than a minute."
"You've hit your usage limit. Limits reset every 5h and every week."
);
}
}

View File

@@ -17,7 +17,6 @@ pub mod config;
pub mod config_profile;
pub mod config_types;
mod conversation_history;
pub mod custom_prompts;
mod environment_context;
pub mod error;
pub mod exec;

View File

@@ -17,10 +17,6 @@ use crate::error::EnvVarError;
const DEFAULT_STREAM_IDLE_TIMEOUT_MS: u64 = 300_000;
const DEFAULT_STREAM_MAX_RETRIES: u64 = 5;
const DEFAULT_REQUEST_MAX_RETRIES: u64 = 4;
/// Hard cap for user-configured `stream_max_retries`.
const MAX_STREAM_MAX_RETRIES: u64 = 100;
/// Hard cap for user-configured `request_max_retries`.
const MAX_REQUEST_MAX_RETRIES: u64 = 100;
/// Wire protocol that the provider speaks. Most third-party services only
/// implement the classic OpenAI Chat Completions JSON schema, whereas OpenAI
@@ -211,14 +207,12 @@ impl ModelProviderInfo {
pub fn request_max_retries(&self) -> u64 {
self.request_max_retries
.unwrap_or(DEFAULT_REQUEST_MAX_RETRIES)
.min(MAX_REQUEST_MAX_RETRIES)
}
/// Effective maximum number of stream reconnection attempts for this provider.
pub fn stream_max_retries(&self) -> u64 {
self.stream_max_retries
.unwrap_or(DEFAULT_STREAM_MAX_RETRIES)
.min(MAX_STREAM_MAX_RETRIES)
}
/// Effective idle timeout for streaming responses.

View File

@@ -533,9 +533,6 @@ impl EventProcessor for EventProcessorWithHumanOutput {
EventMsg::McpListToolsResponse(_) => {
// Currently ignored in exec output.
}
EventMsg::ListCustomPromptsResponse(_) => {
// Currently ignored in exec output.
}
EventMsg::TurnAborted(abort_reason) => match abort_reason.reason {
TurnAbortReason::Interrupted => {
ts_println!(self, "task interrupted");

View File

@@ -0,0 +1,25 @@
[
{
"type": "response.output_item.done",
"item": {
"type": "custom_tool_call",
"name": "apply_patch",
"input": "*** Begin Patch\n*** Update File: app.py\n@@ class BaseClass:\n@@ def method():\n- return False\n+ return True\n*** End Patch",
"call_id": "__ID__"
}
},
{
"type": "response.completed",
"response": {
"id": "__ID__",
"usage": {
"input_tokens": 0,
"input_tokens_details": null,
"output_tokens": 0,
"output_tokens_details": null,
"total_tokens": 0
},
"output": []
}
}
]

View File

@@ -106,3 +106,41 @@ async fn test_apply_patch_freeform_tool() -> anyhow::Result<()> {
);
Ok(())
}
#[cfg(not(target_os = "windows"))]
#[tokio::test]
async fn test_apply_patch_context() -> anyhow::Result<()> {
use crate::suite::common::run_e2e_exec_test;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return Ok(());
}
let tmp_cwd = tempdir().expect("failed to create temp dir");
run_e2e_exec_test(
tmp_cwd.path(),
vec![
include_str!("../fixtures/sse_apply_patch_freeform_add.json").to_string(),
include_str!("../fixtures/sse_apply_patch_context_update.json").to_string(),
include_str!("../fixtures/sse_response_completed.json").to_string(),
],
)
.await;
// Verify final file contents
let final_path = tmp_cwd.path().join("app.py");
let contents = std::fs::read_to_string(&final_path)
.unwrap_or_else(|e| panic!("failed reading {}: {e}", final_path.display()));
assert_eq!(
contents,
r#"class BaseClass:
def method():
return True
"#
);
Ok(())
}

View File

@@ -264,7 +264,6 @@ async fn run_codex_tool_session_inner(
| EventMsg::McpToolCallBegin(_)
| EventMsg::McpToolCallEnd(_)
| EventMsg::McpListToolsResponse(_)
| EventMsg::ListCustomPromptsResponse(_)
| EventMsg::ExecCommandBegin(_)
| EventMsg::ExecCommandOutputDelta(_)
| EventMsg::ExecCommandEnd(_)

View File

@@ -1,8 +0,0 @@
use serde::Deserialize;
use serde::Serialize;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct CustomPrompt {
pub name: String,
pub content: String,
}

View File

@@ -1,5 +1,4 @@
pub mod config_types;
pub mod custom_prompts;
pub mod mcp_protocol;
pub mod message_history;
pub mod models;

View File

@@ -10,7 +10,6 @@ use std::path::PathBuf;
use std::str::FromStr;
use std::time::Duration;
use crate::custom_prompts::CustomPrompt;
use mcp_types::CallToolResult;
use mcp_types::Tool as McpTool;
use serde::Deserialize;
@@ -147,9 +146,6 @@ pub enum Op {
/// Reply is delivered via `EventMsg::McpListToolsResponse`.
ListMcpTools,
/// Request the list of available custom prompts.
ListCustomPrompts,
/// Request the agent to summarize the current conversation context.
/// The agent will use its existing context (either conversation history or previous response id)
/// to generate a summary which will be returned as an AgentMessage event.
@@ -476,9 +472,6 @@ pub enum EventMsg {
/// List of MCP tools available to the agent.
McpListToolsResponse(McpListToolsResponseEvent),
/// List of custom prompts available to the agent.
ListCustomPromptsResponse(ListCustomPromptsResponseEvent),
PlanUpdate(UpdatePlanArgs),
TurnAborted(TurnAbortedEvent),
@@ -808,12 +801,6 @@ pub struct McpListToolsResponseEvent {
pub tools: std::collections::HashMap<String, McpTool>,
}
/// Response payload for `Op::ListCustomPrompts`.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ListCustomPromptsResponseEvent {
pub custom_prompts: Vec<CustomPrompt>,
}
#[derive(Debug, Default, Clone, Deserialize, Serialize)]
pub struct SessionConfiguredEvent {
/// Unique id for this session.

View File

@@ -40,10 +40,7 @@ codex-login = { path = "../login" }
codex-ollama = { path = "../ollama" }
codex-protocol = { path = "../protocol" }
color-eyre = "0.6.3"
crossterm = { version = "0.28.1", features = [
"bracketed-paste",
"event-stream",
] }
crossterm = { version = "0.28.1", features = ["bracketed-paste", "event-stream"] }
diffy = "0.4.2"
image = { version = "^0.25.6", default-features = false, features = [
"jpeg",
@@ -85,7 +82,6 @@ tui-input = "0.14.0"
tui-markdown = "0.3.3"
unicode-segmentation = "1.12.0"
unicode-width = "0.1"
url = "2"
uuid = "1"
[target.'cfg(unix)'.dependencies]

View File

@@ -276,6 +276,22 @@ impl App {
async fn handle_key_event(&mut self, tui: &mut tui::Tui, key_event: KeyEvent) {
match key_event {
KeyEvent {
code: KeyCode::Char('c'),
modifiers: crossterm::event::KeyModifiers::CONTROL,
kind: KeyEventKind::Press,
..
} => {
self.chat_widget.on_ctrl_c();
}
KeyEvent {
code: KeyCode::Char('d'),
modifiers: crossterm::event::KeyModifiers::CONTROL,
kind: KeyEventKind::Press,
..
} if self.chat_widget.composer_is_empty() => {
self.app_event_tx.send(AppEvent::ExitRequest);
}
KeyEvent {
code: KeyCode::Char('t'),
modifiers: crossterm::event::KeyModifiers::CONTROL,
@@ -287,21 +303,13 @@ impl App {
self.transcript_overlay = Some(TranscriptApp::new(self.transcript_lines.clone()));
tui.frame_requester().schedule_frame();
}
// Esc primes/advances backtracking only in normal (not working) mode
// with an empty composer. In any other state, forward Esc so the
// active UI (e.g. status indicator, modals, popups) handles it.
// Esc primes/advances backtracking when composer is empty.
KeyEvent {
code: KeyCode::Esc,
kind: KeyEventKind::Press | KeyEventKind::Repeat,
..
} => {
if self.chat_widget.is_normal_backtrack_mode()
&& self.chat_widget.composer_is_empty()
{
self.handle_backtrack_esc_key(tui);
} else {
self.chat_widget.handle_key_event(key_event);
}
self.handle_backtrack_esc_key(tui);
}
// Enter confirms backtrack when primed + count > 0. Otherwise pass to widget.
KeyEvent {

View File

@@ -207,9 +207,9 @@ impl App {
overlay.handle_event(tui, event)?;
if overlay.is_done {
self.close_transcript_overlay(tui);
tui.frame_requester().schedule_frame();
}
}
tui.frame_requester().schedule_frame();
Ok(())
}

View File

@@ -1,7 +1,6 @@
use codex_core::protocol::TokenUsage;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
use crossterm::event::KeyModifiers;
use ratatui::buffer::Buffer;
use ratatui::layout::Constraint;
@@ -22,18 +21,14 @@ use ratatui::widgets::StatefulWidgetRef;
use ratatui::widgets::WidgetRef;
use super::chat_composer_history::ChatComposerHistory;
use super::command_popup::CommandItem;
use super::command_popup::CommandPopup;
use super::file_search_popup::FileSearchPopup;
use crate::slash_command::SlashCommand;
use codex_protocol::custom_prompts::CustomPrompt;
use crate::app_event::AppEvent;
use crate::app_event_sender::AppEventSender;
use crate::bottom_pane::textarea::TextArea;
use crate::bottom_pane::textarea::TextAreaState;
use crate::clipboard_paste::normalize_pasted_path;
use crate::clipboard_paste::pasted_image_format;
use codex_file_search::FileMatch;
use std::cell::RefCell;
use std::collections::HashMap;
@@ -102,7 +97,6 @@ pub(crate) struct ChatComposer {
// Buffer to accumulate characters during a detected non-bracketed paste burst.
paste_burst_buffer: String,
in_paste_burst_mode: bool,
custom_prompts: Vec<CustomPrompt>,
}
/// Popup state at most one can be visible at any time.
@@ -142,7 +136,6 @@ impl ChatComposer {
paste_burst_until: None,
paste_burst_buffer: String::new(),
in_paste_burst_mode: false,
custom_prompts: Vec::new(),
}
}
@@ -227,8 +220,6 @@ impl ChatComposer {
let placeholder = format!("[Pasted Content {char_count} chars]");
self.textarea.insert_element(&placeholder);
self.pending_pastes.push((placeholder, pasted));
} else if self.handle_paste_image_path(pasted.clone()) {
self.textarea.insert_str(" ");
} else {
self.textarea.insert_str(&pasted);
}
@@ -241,25 +232,6 @@ impl ChatComposer {
true
}
pub fn handle_paste_image_path(&mut self, pasted: String) -> bool {
let Some(path_buf) = normalize_pasted_path(&pasted) else {
return false;
};
match image::image_dimensions(&path_buf) {
Ok((w, h)) => {
tracing::info!("OK: {pasted}");
let format_label = pasted_image_format(&path_buf).label();
self.attach_image(path_buf, w, h, format_label);
true
}
Err(err) => {
tracing::info!("ERR: {err}");
false
}
}
}
/// Replace the entire composer content with `text` and reset cursor.
pub(crate) fn set_text_content(&mut self, text: String) {
self.textarea.set_text(&text);
@@ -335,11 +307,6 @@ impl ChatComposer {
result
}
/// Return true if either the slash-command popup or the file-search popup is active.
pub(crate) fn popup_active(&self) -> bool {
!matches!(self.active_popup, ActivePopup::None)
}
/// Handle key event when the slash-command popup is visible.
fn handle_key_event_with_slash_popup(&mut self, key_event: KeyEvent) -> (InputResult, bool) {
let ActivePopup::Command(popup) = &mut self.active_popup else {
@@ -360,37 +327,19 @@ impl ChatComposer {
popup.move_down();
(InputResult::None, true)
}
KeyEvent {
code: KeyCode::Esc, ..
} => {
// Dismiss the slash popup; keep the current input untouched.
self.active_popup = ActivePopup::None;
(InputResult::None, true)
}
KeyEvent {
code: KeyCode::Tab, ..
} => {
if let Some(sel) = popup.selected_item() {
if let Some(cmd) = popup.selected_command() {
let first_line = self.textarea.text().lines().next().unwrap_or("");
match sel {
CommandItem::Builtin(cmd) => {
let starts_with_cmd = first_line
.trim_start()
.starts_with(&format!("/{}", cmd.command()));
if !starts_with_cmd {
self.textarea.set_text(&format!("/{} ", cmd.command()));
}
}
CommandItem::Prompt(idx) => {
if let Some(name) = popup.prompt_name(idx) {
let starts_with_cmd =
first_line.trim_start().starts_with(&format!("/{name}"));
if !starts_with_cmd {
self.textarea.set_text(&format!("/{name} "));
}
}
}
let starts_with_cmd = first_line
.trim_start()
.starts_with(&format!("/{}", cmd.command()));
if !starts_with_cmd {
self.textarea.set_text(&format!("/{} ", cmd.command()));
self.textarea.set_cursor(self.textarea.text().len());
}
// After completing the command, move cursor to the end.
if !self.textarea.text().is_empty() {
@@ -405,30 +354,16 @@ impl ChatComposer {
modifiers: KeyModifiers::NONE,
..
} => {
if let Some(sel) = popup.selected_item() {
if let Some(cmd) = popup.selected_command() {
// Clear textarea so no residual text remains.
self.textarea.set_text("");
// Capture any needed data from popup before clearing it.
let prompt_content = match sel {
CommandItem::Prompt(idx) => {
popup.prompt_content(idx).map(|s| s.to_string())
}
_ => None,
};
// Hide popup since an action has been dispatched.
let result = (InputResult::Command(*cmd), true);
// Hide popup since the command has been dispatched.
self.active_popup = ActivePopup::None;
match sel {
CommandItem::Builtin(cmd) => {
return (InputResult::Command(cmd), true);
}
CommandItem::Prompt(_) => {
if let Some(contents) = prompt_content {
return (InputResult::Submitted(contents), true);
}
return (InputResult::None, true);
}
}
return result;
}
// Fallback to default newline handling if no command selected.
self.handle_key_event_without_popup(key_event)
@@ -687,15 +622,6 @@ impl ChatComposer {
/// Handle key event when no popup is visible.
fn handle_key_event_without_popup(&mut self, key_event: KeyEvent) -> (InputResult, bool) {
match key_event {
KeyEvent {
code: KeyCode::Char('d'),
modifiers: crossterm::event::KeyModifiers::CONTROL,
kind: KeyEventKind::Press,
..
} if self.is_empty() => {
self.app_event_tx.send(AppEvent::ExitRequest);
(InputResult::None, true)
}
// -------------------------------------------------------------
// History navigation (Up / Down) only when the composer is not
// empty or when the cursor is at the correct position, to avoid
@@ -792,6 +718,13 @@ impl ChatComposer {
}
self.pending_pastes.clear();
// Strip image placeholders from the submitted text; images are retrieved via take_recent_submission_images()
for img in &self.attached_images {
if text.contains(&img.placeholder) {
text = text.replace(&img.placeholder, "");
}
}
text = text.trim().to_string();
if !text.is_empty() {
self.history.record_local_submission(&text);
@@ -1117,7 +1050,7 @@ impl ChatComposer {
}
_ => {
if input_starts_with_slash {
let mut command_popup = CommandPopup::new(self.custom_prompts.clone());
let mut command_popup = CommandPopup::new();
command_popup.on_composer_text_change(first_line.to_string());
self.active_popup = ActivePopup::Command(command_popup);
}
@@ -1125,13 +1058,6 @@ impl ChatComposer {
}
}
pub(crate) fn set_custom_prompts(&mut self, prompts: Vec<CustomPrompt>) {
self.custom_prompts = prompts.clone();
if let ActivePopup::Command(popup) = &mut self.active_popup {
popup.set_prompts(prompts);
}
}
/// Synchronize `self.file_search_popup` with the current text in the textarea.
/// Note this is only called when self.active_popup is NOT Command.
fn sync_file_search_popup(&mut self) {
@@ -1298,10 +1224,7 @@ impl WidgetRef for ChatComposer {
#[cfg(test)]
mod tests {
use super::*;
use image::ImageBuffer;
use image::Rgba;
use std::path::PathBuf;
use tempfile::tempdir;
use crate::app_event::AppEvent;
use crate::bottom_pane::AppEventSender;
@@ -1884,7 +1807,7 @@ mod tests {
let (result, _) =
composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE));
match result {
InputResult::Submitted(text) => assert_eq!(text, "[image 32x16 PNG] hi"),
InputResult::Submitted(text) => assert_eq!(text, "hi"),
_ => panic!("expected Submitted"),
}
let imgs = composer.take_recent_submission_images();
@@ -1902,7 +1825,7 @@ mod tests {
let (result, _) =
composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE));
match result {
InputResult::Submitted(text) => assert_eq!(text, "[image 10x5 PNG]"),
InputResult::Submitted(text) => assert!(text.is_empty()),
_ => panic!("expected Submitted"),
}
let imgs = composer.take_recent_submission_images();
@@ -1978,53 +1901,4 @@ mod tests {
"one image mapping remains"
);
}
#[test]
fn pasting_filepath_attaches_image() {
let tmp = tempdir().expect("create TempDir");
let tmp_path: PathBuf = tmp.path().join("codex_tui_test_paste_image.png");
let img: ImageBuffer<Rgba<u8>, Vec<u8>> =
ImageBuffer::from_fn(3, 2, |_x, _y| Rgba([1, 2, 3, 255]));
img.save(&tmp_path).expect("failed to write temp png");
let (tx, _rx) = unbounded_channel::<AppEvent>();
let sender = AppEventSender::new(tx);
let mut composer =
ChatComposer::new(true, sender, false, "Ask Codex to do anything".to_string());
let needs_redraw = composer.handle_paste(tmp_path.to_string_lossy().to_string());
assert!(needs_redraw);
assert!(composer.textarea.text().starts_with("[image 3x2 PNG] "));
let imgs = composer.take_recent_submission_images();
assert_eq!(imgs, vec![tmp_path.clone()]);
}
#[test]
fn selecting_custom_prompt_submits_file_contents() {
let prompt_text = "Hello from saved prompt";
let (tx, _rx) = unbounded_channel::<AppEvent>();
let sender = AppEventSender::new(tx);
let mut composer =
ChatComposer::new(true, sender, false, "Ask Codex to do anything".to_string());
// Inject prompts as if received via event.
composer.set_custom_prompts(vec![CustomPrompt {
name: "my-prompt".to_string(),
content: prompt_text.to_string(),
}]);
// Type the prompt name to focus it in the slash popup and press Enter.
for ch in ['/', 'm', 'y', '-', 'p', 'r', 'o', 'm', 'p', 't'] {
let _ = composer.handle_key_event(KeyEvent::new(KeyCode::Char(ch), KeyModifiers::NONE));
}
let (result, _needs_redraw) =
composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE));
match result {
InputResult::Submitted(s) => assert_eq!(s, prompt_text),
_ => panic!("expected Submitted with prompt contents"),
}
}
}

View File

@@ -9,58 +9,22 @@ use super::selection_popup_common::render_rows;
use crate::slash_command::SlashCommand;
use crate::slash_command::built_in_slash_commands;
use codex_common::fuzzy_match::fuzzy_match;
use codex_protocol::custom_prompts::CustomPrompt;
/// A selectable item in the popup: either a built-in command or a user prompt.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub(crate) enum CommandItem {
Builtin(SlashCommand),
// Index into `prompts`
Prompt(usize),
}
pub(crate) struct CommandPopup {
command_filter: String,
builtins: Vec<(&'static str, SlashCommand)>,
prompts: Vec<CustomPrompt>,
all_commands: Vec<(&'static str, SlashCommand)>,
state: ScrollState,
}
impl CommandPopup {
pub(crate) fn new(mut prompts: Vec<CustomPrompt>) -> Self {
let builtins = built_in_slash_commands();
// Exclude prompts that collide with builtin command names and sort by name.
let exclude: std::collections::HashSet<String> =
builtins.iter().map(|(n, _)| (*n).to_string()).collect();
prompts.retain(|p| !exclude.contains(&p.name));
prompts.sort_by(|a, b| a.name.cmp(&b.name));
pub(crate) fn new() -> Self {
Self {
command_filter: String::new(),
builtins,
prompts,
all_commands: built_in_slash_commands(),
state: ScrollState::new(),
}
}
pub(crate) fn set_prompts(&mut self, mut prompts: Vec<CustomPrompt>) {
let exclude: std::collections::HashSet<String> = self
.builtins
.iter()
.map(|(n, _)| (*n).to_string())
.collect();
prompts.retain(|p| !exclude.contains(&p.name));
prompts.sort_by(|a, b| a.name.cmp(&b.name));
self.prompts = prompts;
}
pub(crate) fn prompt_name(&self, idx: usize) -> Option<&str> {
self.prompts.get(idx).map(|p| p.name.as_str())
}
pub(crate) fn prompt_content(&self, idx: usize) -> Option<&str> {
self.prompts.get(idx).map(|p| p.content.as_str())
}
/// Update the filter string based on the current composer text. The text
/// passed in is expected to start with a leading '/'. Everything after the
/// *first* '/" on the *first* line becomes the active filter that is used
@@ -86,7 +50,7 @@ impl CommandPopup {
}
// Reset or clamp selected index based on new filtered list.
let matches_len = self.filtered_items().len();
let matches_len = self.filtered_commands().len();
self.state.clamp_selection(matches_len);
self.state
.ensure_visible(matches_len, MAX_POPUP_ROWS.min(matches_len));
@@ -95,73 +59,56 @@ impl CommandPopup {
/// Determine the preferred height of the popup. This is the number of
/// rows required to show at most MAX_POPUP_ROWS commands.
pub(crate) fn calculate_required_height(&self) -> u16 {
self.filtered_items().len().clamp(1, MAX_POPUP_ROWS) as u16
self.filtered_commands().len().clamp(1, MAX_POPUP_ROWS) as u16
}
/// Compute fuzzy-filtered matches paired with optional highlight indices and score.
/// Sorted by ascending score, then by command name for stability.
fn filtered(&self) -> Vec<(CommandItem, Option<Vec<usize>>, i32)> {
fn filtered(&self) -> Vec<(&SlashCommand, Option<Vec<usize>>, i32)> {
let filter = self.command_filter.trim();
let mut out: Vec<(CommandItem, Option<Vec<usize>>, i32)> = Vec::new();
let mut out: Vec<(&SlashCommand, Option<Vec<usize>>, i32)> = Vec::new();
if filter.is_empty() {
// Built-ins first, in presentation order.
for (_, cmd) in self.builtins.iter() {
out.push((CommandItem::Builtin(*cmd), None, 0));
}
// Then prompts, already sorted by name.
for idx in 0..self.prompts.len() {
out.push((CommandItem::Prompt(idx), None, 0));
for (_, cmd) in self.all_commands.iter() {
out.push((cmd, None, 0));
}
// Keep the original presentation order when no filter is applied.
return out;
}
for (_, cmd) in self.builtins.iter() {
if let Some((indices, score)) = fuzzy_match(cmd.command(), filter) {
out.push((CommandItem::Builtin(*cmd), Some(indices), score));
} else {
for (_, cmd) in self.all_commands.iter() {
if let Some((indices, score)) = fuzzy_match(cmd.command(), filter) {
out.push((cmd, Some(indices), score));
}
}
}
for (idx, p) in self.prompts.iter().enumerate() {
if let Some((indices, score)) = fuzzy_match(&p.name, filter) {
out.push((CommandItem::Prompt(idx), Some(indices), score));
}
}
// When filtering, sort by ascending score and then by name for stability.
out.sort_by(|a, b| {
let an = match a.0 {
CommandItem::Builtin(c) => c.command(),
CommandItem::Prompt(i) => &self.prompts[i].name,
};
let bn = match b.0 {
CommandItem::Builtin(c) => c.command(),
CommandItem::Prompt(i) => &self.prompts[i].name,
};
a.2.cmp(&b.2).then_with(|| an.cmp(bn))
});
// When filtering, sort by ascending score and then by command for stability.
out.sort_by(|a, b| a.2.cmp(&b.2).then_with(|| a.0.command().cmp(b.0.command())));
out
}
fn filtered_items(&self) -> Vec<CommandItem> {
fn filtered_commands(&self) -> Vec<&SlashCommand> {
self.filtered().into_iter().map(|(c, _, _)| c).collect()
}
/// Move the selection cursor one step up.
pub(crate) fn move_up(&mut self) {
let len = self.filtered_items().len();
let matches = self.filtered_commands();
let len = matches.len();
self.state.move_up_wrap(len);
self.state.ensure_visible(len, MAX_POPUP_ROWS.min(len));
}
/// Move the selection cursor one step down.
pub(crate) fn move_down(&mut self) {
let matches_len = self.filtered_items().len();
let matches = self.filtered_commands();
let matches_len = matches.len();
self.state.move_down_wrap(matches_len);
self.state
.ensure_visible(matches_len, MAX_POPUP_ROWS.min(matches_len));
}
/// Return currently selected command, if any.
pub(crate) fn selected_item(&self) -> Option<CommandItem> {
let matches = self.filtered_items();
pub(crate) fn selected_command(&self) -> Option<&SlashCommand> {
let matches = self.filtered_commands();
self.state
.selected_idx
.and_then(|idx| matches.get(idx).copied())
@@ -176,19 +123,11 @@ impl WidgetRef for CommandPopup {
} else {
matches
.into_iter()
.map(|(item, indices, _)| match item {
CommandItem::Builtin(cmd) => GenericDisplayRow {
name: format!("/{}", cmd.command()),
match_indices: indices.map(|v| v.into_iter().map(|i| i + 1).collect()),
is_current: false,
description: Some(cmd.description().to_string()),
},
CommandItem::Prompt(i) => GenericDisplayRow {
name: format!("/{}", self.prompts[i].name),
match_indices: indices.map(|v| v.into_iter().map(|i| i + 1).collect()),
is_current: false,
description: Some("send saved prompt".to_string()),
},
.map(|(cmd, indices, _)| GenericDisplayRow {
name: format!("/{}", cmd.command()),
match_indices: indices.map(|v| v.into_iter().map(|i| i + 1).collect()),
is_current: false,
description: Some(cmd.description().to_string()),
})
.collect()
};
@@ -202,79 +141,31 @@ mod tests {
#[test]
fn filter_includes_init_when_typing_prefix() {
let mut popup = CommandPopup::new(Vec::new());
let mut popup = CommandPopup::new();
// Simulate the composer line starting with '/in' so the popup filters
// matching commands by prefix.
popup.on_composer_text_change("/in".to_string());
// Access the filtered list via the selected command and ensure that
// one of the matches is the new "init" command.
let matches = popup.filtered_items();
let has_init = matches.iter().any(|item| match item {
CommandItem::Builtin(cmd) => cmd.command() == "init",
CommandItem::Prompt(_) => false,
});
let matches = popup.filtered_commands();
assert!(
has_init,
matches.iter().any(|cmd| cmd.command() == "init"),
"expected '/init' to appear among filtered commands"
);
}
#[test]
fn selecting_init_by_exact_match() {
let mut popup = CommandPopup::new(Vec::new());
let mut popup = CommandPopup::new();
popup.on_composer_text_change("/init".to_string());
// When an exact match exists, the selected command should be that
// command by default.
let selected = popup.selected_item();
let selected = popup.selected_command();
match selected {
Some(CommandItem::Builtin(cmd)) => assert_eq!(cmd.command(), "init"),
Some(CommandItem::Prompt(_)) => panic!("unexpected prompt selected for '/init'"),
Some(cmd) => assert_eq!(cmd.command(), "init"),
None => panic!("expected a selected command for exact match"),
}
}
#[test]
fn prompt_discovery_lists_custom_prompts() {
let prompts = vec![
CustomPrompt {
name: "foo".to_string(),
content: "hello from foo".to_string(),
},
CustomPrompt {
name: "bar".to_string(),
content: "hello from bar".to_string(),
},
];
let popup = CommandPopup::new(prompts);
let items = popup.filtered_items();
let mut prompt_names: Vec<String> = items
.into_iter()
.filter_map(|it| match it {
CommandItem::Prompt(i) => popup.prompt_name(i).map(|s| s.to_string()),
_ => None,
})
.collect();
prompt_names.sort();
assert_eq!(prompt_names, vec!["bar".to_string(), "foo".to_string()]);
}
#[test]
fn prompt_name_collision_with_builtin_is_ignored() {
// Create a prompt named like a builtin (e.g. "init").
let popup = CommandPopup::new(vec![CustomPrompt {
name: "init".to_string(),
content: "should be ignored".to_string(),
}]);
let items = popup.filtered_items();
let has_collision_prompt = items.into_iter().any(|it| match it {
CommandItem::Prompt(i) => popup.prompt_name(i) == Some("init"),
_ => false,
});
assert!(
!has_collision_prompt,
"prompt with builtin name should be ignored"
);
}
}

View File

@@ -34,7 +34,6 @@ pub(crate) enum CancellationEvent {
pub(crate) use chat_composer::ChatComposer;
pub(crate) use chat_composer::InputResult;
use codex_protocol::custom_prompts::CustomPrompt;
use crate::status_indicator_widget::StatusIndicatorWidget;
use approval_modal_view::ApprovalModalView;
@@ -73,7 +72,7 @@ pub(crate) struct BottomPaneParams {
}
impl BottomPane {
const BOTTOM_PAD_LINES: u16 = 1;
const BOTTOM_PAD_LINES: u16 = 2;
pub fn new(params: BottomPaneParams) -> Self {
let enhanced_keys_supported = params.enhanced_keys_supported;
Self {
@@ -330,12 +329,6 @@ impl BottomPane {
self.request_redraw();
}
/// Update custom prompts available for the slash popup.
pub(crate) fn set_custom_prompts(&mut self, prompts: Vec<CustomPrompt>) {
self.composer.set_custom_prompts(prompts);
self.request_redraw();
}
pub(crate) fn composer_is_empty(&self) -> bool {
self.composer.is_empty()
}
@@ -344,13 +337,6 @@ impl BottomPane {
self.is_task_running
}
/// Return true when the pane is in the regular composer state without any
/// overlays or popups and not running a task. This is the safe context to
/// use Esc-Esc for backtracking from the main view.
pub(crate) fn is_normal_backtrack_mode(&self) -> bool {
!self.is_task_running && self.active_view.is_none() && !self.composer.popup_active()
}
/// Update the *context-window remaining* indicator in the composer. This
/// is forwarded directly to the underlying `ChatComposer`.
pub(crate) fn set_token_usage(
@@ -655,15 +641,28 @@ mod tests {
"expected Working header on top row: {top:?}"
);
// Last row should be blank padding; the row above should generally contain composer content.
// Next row (spacer) is blank, and bottom two rows are blank padding
let mut spacer = String::new();
let mut r_last = String::new();
let mut r_last2 = String::new();
for x in 0..area.width {
// Spacer row immediately below the status header lives at y=2.
spacer.push(buf[(x, 2)].symbol().chars().next().unwrap_or(' '));
r_last.push(buf[(x, height - 1)].symbol().chars().next().unwrap_or(' '));
r_last2.push(buf[(x, height - 2)].symbol().chars().next().unwrap_or(' '));
}
assert!(
spacer.trim().is_empty(),
"expected spacer line blank: {spacer:?}"
);
assert!(
r_last.trim().is_empty(),
"expected last row blank: {r_last:?}"
);
assert!(
r_last2.trim().is_empty(),
"expected second-to-last row blank: {r_last2:?}"
);
}
#[test]

View File

@@ -19,7 +19,6 @@ use codex_core::protocol::ExecApprovalRequestEvent;
use codex_core::protocol::ExecCommandBeginEvent;
use codex_core::protocol::ExecCommandEndEvent;
use codex_core::protocol::InputItem;
use codex_core::protocol::ListCustomPromptsResponseEvent;
use codex_core::protocol::McpListToolsResponseEvent;
use codex_core::protocol::McpToolCallBeginEvent;
use codex_core::protocol::McpToolCallEndEvent;
@@ -154,8 +153,6 @@ impl ChatWidget {
event,
self.show_welcome_banner,
));
// Ask codex-core to enumerate custom prompts for this session.
self.submit_op(Op::ListCustomPrompts);
if let Some(user_message) = self.initial_user_message.take() {
self.submit_user_message(user_message);
}
@@ -247,9 +244,6 @@ impl ChatWidget {
}
fn on_error(&mut self, message: String) {
// Before emitting the error message, finalize the active exec as failed
// so spinners are replaced with a red ✗ marker.
self.finalize_active_exec_cell_as_failed();
self.add_to_history(history_cell::new_error_event(message));
self.bottom_pane.set_task_running(false);
self.running_commands.clear();
@@ -540,7 +534,17 @@ impl ChatWidget {
ev.result,
));
}
fn interrupt_running_task(&mut self) {
if self.bottom_pane.is_task_running() {
self.active_exec_cell = None;
self.running_commands.clear();
self.bottom_pane.clear_ctrl_c_quit_hint();
self.submit_op(Op::Interrupt);
self.bottom_pane.set_task_running(false);
self.stream.clear_all();
self.request_redraw();
}
}
fn layout_areas(&self, area: Rect) -> [Rect; 2] {
Layout::vertical([
Constraint::Max(
@@ -653,57 +657,48 @@ impl ChatWidget {
}
pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) {
match key_event {
KeyEvent {
code: KeyCode::Char('c'),
modifiers: crossterm::event::KeyModifiers::CONTROL,
kind: KeyEventKind::Press,
..
} => {
self.on_ctrl_c();
return;
}
other if other.kind == KeyEventKind::Press => {
self.bottom_pane.clear_ctrl_c_quit_hint();
}
_ => {}
if key_event.kind == KeyEventKind::Press {
self.bottom_pane.clear_ctrl_c_quit_hint();
}
match key_event {
// Alt+Up: Edit the most recent queued user message (if any).
if matches!(
key_event,
KeyEvent {
code: KeyCode::Up,
modifiers: KeyModifiers::ALT,
kind: KeyEventKind::Press,
..
} if !self.queued_user_messages.is_empty() => {
// Prefer the most recently queued item.
if let Some(user_message) = self.queued_user_messages.pop_back() {
self.bottom_pane.set_composer_text(user_message.text);
}
) && !self.queued_user_messages.is_empty()
{
// Prefer the most recently queued item.
if let Some(user_message) = self.queued_user_messages.pop_back() {
self.bottom_pane.set_composer_text(user_message.text);
self.refresh_queued_user_messages();
self.request_redraw();
}
return;
}
match self.bottom_pane.handle_key_event(key_event) {
InputResult::Submitted(text) => {
// If a task is running, queue the user input to be sent after the turn completes.
let user_message = UserMessage {
text,
image_paths: self.bottom_pane.take_recent_submission_images(),
};
if self.bottom_pane.is_task_running() {
self.queued_user_messages.push_back(user_message);
self.refresh_queued_user_messages();
self.request_redraw();
} else {
self.submit_user_message(user_message);
}
}
_ => {
match self.bottom_pane.handle_key_event(key_event) {
InputResult::Submitted(text) => {
// If a task is running, queue the user input to be sent after the turn completes.
let user_message = UserMessage {
text,
image_paths: self.bottom_pane.take_recent_submission_images(),
};
if self.bottom_pane.is_task_running() {
self.queued_user_messages.push_back(user_message);
self.refresh_queued_user_messages();
} else {
self.submit_user_message(user_message);
}
}
InputResult::Command(cmd) => {
self.dispatch_command(cmd);
}
InputResult::None => {}
}
InputResult::Command(cmd) => {
self.dispatch_command(cmd);
}
InputResult::None => {}
}
}
@@ -935,7 +930,6 @@ impl ChatWidget {
EventMsg::WebSearchBegin(ev) => self.on_web_search_begin(ev),
EventMsg::GetHistoryEntryResponse(ev) => self.on_get_history_entry_response(ev),
EventMsg::McpListToolsResponse(ev) => self.on_list_mcp_tools(ev),
EventMsg::ListCustomPromptsResponse(ev) => self.on_list_custom_prompts(ev),
EventMsg::ShutdownComplete => self.on_shutdown_complete(),
EventMsg::TurnDiff(TurnDiffEvent { unified_diff }) => self.on_turn_diff(unified_diff),
EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => {
@@ -954,16 +948,6 @@ impl ChatWidget {
self.frame_requester.schedule_frame();
}
/// Mark the active exec cell as failed (✗) and flush it into history.
fn finalize_active_exec_cell_as_failed(&mut self) {
if let Some(cell) = self.active_exec_cell.take() {
let cell = cell.into_failed();
// Insert finalized exec into history and keep grouping consistent.
self.add_to_history(cell);
self.last_history_was_exec = true;
}
}
// If idle and there are queued inputs, submit exactly one to start the next turn.
fn maybe_send_next_queued_input(&mut self) {
if self.bottom_pane.is_task_running() {
@@ -1118,15 +1102,22 @@ impl ChatWidget {
}
/// Handle Ctrl-C key press.
fn on_ctrl_c(&mut self) {
if self.bottom_pane.on_ctrl_c() == CancellationEvent::Ignored {
if self.bottom_pane.is_task_running() {
self.submit_op(Op::Interrupt);
} else if self.bottom_pane.ctrl_c_quit_hint_visible() {
self.submit_op(Op::Shutdown);
} else {
self.bottom_pane.show_ctrl_c_quit_hint();
}
/// Returns CancellationEvent::Handled if the event was consumed by the UI, or
/// CancellationEvent::Ignored if the caller should handle it (e.g. exit).
pub(crate) fn on_ctrl_c(&mut self) -> CancellationEvent {
match self.bottom_pane.on_ctrl_c() {
CancellationEvent::Handled => return CancellationEvent::Handled,
CancellationEvent::Ignored => {}
}
if self.bottom_pane.is_task_running() {
self.interrupt_running_task();
CancellationEvent::Ignored
} else if self.bottom_pane.ctrl_c_quit_hint_visible() {
self.submit_op(Op::Shutdown);
CancellationEvent::Handled
} else {
self.bottom_pane.show_ctrl_c_quit_hint();
CancellationEvent::Ignored
}
}
@@ -1134,13 +1125,6 @@ impl ChatWidget {
self.bottom_pane.composer_is_empty()
}
/// True when the UI is in the regular composer state with no running task,
/// no modal overlay (e.g. approvals or status indicator), and no composer popups.
/// In this state Esc-Esc backtracking is enabled.
pub(crate) fn is_normal_backtrack_mode(&self) -> bool {
self.bottom_pane.is_normal_backtrack_mode()
}
pub(crate) fn insert_str(&mut self, text: &str) {
self.bottom_pane.insert_str(text);
}
@@ -1165,13 +1149,6 @@ impl ChatWidget {
self.add_to_history(history_cell::new_mcp_tools_output(&self.config, ev.tools));
}
fn on_list_custom_prompts(&mut self, ev: ListCustomPromptsResponseEvent) {
let len = ev.custom_prompts.len();
debug!("received {} custom prompts", len);
// Forward to bottom pane so the slash popup can show them now.
self.bottom_pane.set_custom_prompts(ev.custom_prompts);
}
/// Programmatically submit a user text message as if typed in the
/// composer. The text will be added to conversation history and sent to
/// the agent.

View File

@@ -1,6 +1,5 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 728
expression: terminal.backend()
---
"? Codex wants to run echo hello world "
@@ -11,3 +10,4 @@ expression: terminal.backend()
"▌ Yes Always No No, provide feedback "
"▌ Approve and run the command "
" "
" "

View File

@@ -1,6 +1,6 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 763
assertion_line: 690
expression: terminal.backend()
---
"The model wants to apply changes "
@@ -11,3 +11,4 @@ expression: terminal.backend()
"▌ Yes No No, provide feedback "
"▌ Approve and apply the changes "
" "
" "

View File

@@ -1,6 +1,5 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 779
expression: terminal.backend()
---
"▌ Ask Codex to do anything "

View File

@@ -1,8 +1,7 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 779
expression: terminal.backend()
---
" "
"▌ Ask Codex to do anything "
" "
" "

View File

@@ -1,6 +1,5 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 807
expression: terminal.backend()
---
"▌ Ask Codex to do anything "

View File

@@ -1,6 +1,5 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 807
expression: terminal.backend()
---
" "

View File

@@ -1,6 +0,0 @@
---
source: tui/src/chatwidget/tests.rs
expression: exec_blob
---
>_
✗ ⌨  sleep 1

View File

@@ -1,6 +1,6 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 878
assertion_line: 806
expression: terminal.backend()
---
" "
@@ -9,3 +9,4 @@ expression: terminal.backend()
"▌ Ask Codex to do anything "
" ⏎ send Ctrl+J newline Ctrl+T transcript Ctrl+C quit "
" "
" "

View File

@@ -1,6 +1,5 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 851
expression: terminal.backend()
---
"? Codex wants to run echo 'hello world' "
@@ -11,3 +10,4 @@ expression: terminal.backend()
"▌ Yes Always No No, provide feedback "
"▌ Approve and run the command "
" "
" "

View File

@@ -18,14 +18,11 @@ use codex_core::protocol::ExecApprovalRequestEvent;
use codex_core::protocol::ExecCommandBeginEvent;
use codex_core::protocol::ExecCommandEndEvent;
use codex_core::protocol::FileChange;
use codex_core::protocol::InputItem;
use codex_core::protocol::ListCustomPromptsResponseEvent;
use codex_core::protocol::PatchApplyBeginEvent;
use codex_core::protocol::PatchApplyEndEvent;
use codex_core::protocol::StreamErrorEvent;
use codex_core::protocol::TaskCompleteEvent;
use codex_login::CodexAuth;
use codex_protocol::custom_prompts::CustomPrompt;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyModifiers;
@@ -217,48 +214,6 @@ fn lines_to_single_string(lines: &[ratatui::text::Line<'static>]) -> String {
s
}
#[test]
fn selecting_custom_prompt_sends_user_message() {
let (mut chat, _rx, mut op_rx) = make_chatwidget_manual();
// Provide a custom prompt via protocol event, as core would do.
let prompt_text = "Hello from saved prompt".to_string();
chat.handle_codex_event(Event {
id: "sub-prompts".into(),
msg: EventMsg::ListCustomPromptsResponse(ListCustomPromptsResponseEvent {
custom_prompts: vec![CustomPrompt {
name: "my-prompt".to_string(),
content: prompt_text.clone(),
}],
}),
});
// Type the prompt name to focus it in the slash popup and press Enter.
for ch in ['/', 'm', 'y', '-', 'p', 'r', 'o', 'm', 'p', 't'] {
chat.handle_key_event(KeyEvent::new(KeyCode::Char(ch), KeyModifiers::NONE));
}
chat.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE));
// Expect a UserInput op to be sent containing the prompt's content.
let mut found = false;
while let Ok(op) = op_rx.try_recv() {
if let Op::UserInput { items } = op {
let texts: Vec<String> = items
.into_iter()
.filter_map(|it| match it {
InputItem::Text { text } => Some(text),
_ => None,
})
.collect();
if texts.iter().any(|t| t == &prompt_text) {
found = true;
break;
}
}
}
assert!(found, "expected UserInput op containing prompt content");
}
fn open_fixture(name: &str) -> std::fs::File {
// 1) Prefer fixtures within this crate
{
@@ -415,48 +370,6 @@ fn exec_history_cell_shows_working_then_failed() {
);
}
// Snapshot test: interrupting a running exec finalizes the active cell with a red ✗
// marker (replacing the spinner) and flushes it into history.
#[test]
fn interrupt_exec_marks_failed_snapshot() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
// Begin a long-running command so we have an active exec cell with a spinner.
chat.handle_codex_event(Event {
id: "call-int".into(),
msg: EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
call_id: "call-int".into(),
command: vec!["bash".into(), "-lc".into(), "sleep 1".into()],
cwd: std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")),
parsed_cmd: vec![
codex_core::parse_command::ParsedCommand::Unknown {
cmd: "sleep 1".into(),
}
.into(),
],
}),
});
// Simulate the task being aborted (as if ESC was pressed), which should
// cause the active exec cell to be finalized as failed and flushed.
chat.handle_codex_event(Event {
id: "call-int".into(),
msg: EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent {
reason: TurnAbortReason::Interrupted,
}),
});
let cells = drain_insert_history(&mut rx);
assert!(
!cells.is_empty(),
"expected finalized exec cell to be inserted into history"
);
// The first inserted cell should be the finalized exec; snapshot its text.
let exec_blob = lines_to_single_string(&cells[0]);
assert_snapshot!("interrupt_exec_marks_failed", exec_blob);
}
#[test]
fn exec_history_extends_previous_when_consecutive() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
@@ -1252,7 +1165,7 @@ fn stream_error_is_rendered_to_history() {
let cells = drain_insert_history(&mut rx);
assert!(!cells.is_empty(), "expected a history cell for StreamError");
let blob = lines_to_single_string(cells.last().unwrap());
assert!(blob.contains(" "));
assert!(blob.contains(""));
assert!(blob.contains("stream error:"));
assert!(blob.contains("idle timeout waiting for SSE"));
}

View File

@@ -1,4 +1,3 @@
use std::path::Path;
use std::path::PathBuf;
use tempfile::Builder;
@@ -25,16 +24,12 @@ impl std::error::Error for PasteImageError {}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum EncodedImageFormat {
Png,
Jpeg,
Other,
}
impl EncodedImageFormat {
pub fn label(self) -> &'static str {
match self {
EncodedImageFormat::Png => "PNG",
EncodedImageFormat::Jpeg => "JPEG",
EncodedImageFormat::Other => "IMG",
}
}
}
@@ -100,185 +95,3 @@ pub fn paste_image_to_temp_png() -> Result<(PathBuf, PastedImageInfo), PasteImag
.map_err(|e| PasteImageError::IoError(e.error.to_string()))?;
Ok((path, info))
}
/// Normalize pasted text that may represent a filesystem path.
///
/// Supports:
/// - `file://` URLs (converted to local paths)
/// - Windows/UNC paths
/// - shell-escaped single paths (via `shlex`)
pub fn normalize_pasted_path(pasted: &str) -> Option<PathBuf> {
let pasted = pasted.trim();
// file:// URL → filesystem path
if let Ok(url) = url::Url::parse(pasted)
&& url.scheme() == "file"
{
return url.to_file_path().ok();
}
// TODO: We'll improve the implementation/unit tests over time, as appropriate.
// Possibly use typed-path: https://github.com/openai/codex/pull/2567/commits/3cc92b78e0a1f94e857cf4674d3a9db918ed352e
//
// Detect unquoted Windows paths and bypass POSIX shlex which
// treats backslashes as escapes (e.g., C:\Users\Alice\file.png).
// Also handles UNC paths (\\server\share\path).
let looks_like_windows_path = {
// Drive letter path: C:\ or C:/
let drive = pasted
.chars()
.next()
.map(|c| c.is_ascii_alphabetic())
.unwrap_or(false)
&& pasted.get(1..2) == Some(":")
&& pasted
.get(2..3)
.map(|s| s == "\\" || s == "/")
.unwrap_or(false);
// UNC path: \\server\share
let unc = pasted.starts_with("\\\\");
drive || unc
};
if looks_like_windows_path {
return Some(PathBuf::from(pasted));
}
// shell-escaped single path → unescaped
let parts: Vec<String> = shlex::Shlex::new(pasted).collect();
if parts.len() == 1 {
return parts.into_iter().next().map(PathBuf::from);
}
None
}
/// Infer an image format for the provided path based on its extension.
pub fn pasted_image_format(path: &Path) -> EncodedImageFormat {
match path
.extension()
.and_then(|e| e.to_str())
.map(|s| s.to_ascii_lowercase())
.as_deref()
{
Some("png") => EncodedImageFormat::Png,
Some("jpg") | Some("jpeg") => EncodedImageFormat::Jpeg,
_ => EncodedImageFormat::Other,
}
}
#[cfg(test)]
mod pasted_paths_tests {
use super::*;
#[cfg(not(windows))]
#[test]
fn normalize_file_url() {
let input = "file:///tmp/example.png";
let result = normalize_pasted_path(input).expect("should parse file URL");
assert_eq!(result, PathBuf::from("/tmp/example.png"));
}
#[test]
fn normalize_file_url_windows() {
let input = r"C:\Temp\example.png";
let result = normalize_pasted_path(input).expect("should parse file URL");
assert_eq!(result, PathBuf::from(r"C:\Temp\example.png"));
}
#[test]
fn normalize_shell_escaped_single_path() {
let input = "/home/user/My\\ File.png";
let result = normalize_pasted_path(input).expect("should unescape shell-escaped path");
assert_eq!(result, PathBuf::from("/home/user/My File.png"));
}
#[test]
fn normalize_simple_quoted_path_fallback() {
let input = "\"/home/user/My File.png\"";
let result = normalize_pasted_path(input).expect("should trim simple quotes");
assert_eq!(result, PathBuf::from("/home/user/My File.png"));
}
#[test]
fn normalize_single_quoted_unix_path() {
let input = "'/home/user/My File.png'";
let result = normalize_pasted_path(input).expect("should trim single quotes via shlex");
assert_eq!(result, PathBuf::from("/home/user/My File.png"));
}
#[test]
fn normalize_multiple_tokens_returns_none() {
// Two tokens after shell splitting → not a single path
let input = "/home/user/a\\ b.png /home/user/c.png";
let result = normalize_pasted_path(input);
assert!(result.is_none());
}
#[test]
fn pasted_image_format_png_jpeg_unknown() {
assert_eq!(
pasted_image_format(Path::new("/a/b/c.PNG")),
EncodedImageFormat::Png
);
assert_eq!(
pasted_image_format(Path::new("/a/b/c.jpg")),
EncodedImageFormat::Jpeg
);
assert_eq!(
pasted_image_format(Path::new("/a/b/c.JPEG")),
EncodedImageFormat::Jpeg
);
assert_eq!(
pasted_image_format(Path::new("/a/b/c")),
EncodedImageFormat::Other
);
assert_eq!(
pasted_image_format(Path::new("/a/b/c.webp")),
EncodedImageFormat::Other
);
}
#[test]
fn normalize_single_quoted_windows_path() {
let input = r"'C:\\Users\\Alice\\My File.jpeg'";
let result =
normalize_pasted_path(input).expect("should trim single quotes on windows path");
assert_eq!(result, PathBuf::from(r"C:\\Users\\Alice\\My File.jpeg"));
}
#[test]
fn normalize_unquoted_windows_path_with_spaces() {
let input = r"C:\\Users\\Alice\\My Pictures\\example image.png";
let result = normalize_pasted_path(input).expect("should accept unquoted windows path");
assert_eq!(
result,
PathBuf::from(r"C:\\Users\\Alice\\My Pictures\\example image.png")
);
}
#[test]
fn normalize_unc_windows_path() {
let input = r"\\\\server\\share\\folder\\file.jpg";
let result = normalize_pasted_path(input).expect("should accept UNC windows path");
assert_eq!(
result,
PathBuf::from(r"\\\\server\\share\\folder\\file.jpg")
);
}
#[test]
fn pasted_image_format_with_windows_style_paths() {
assert_eq!(
pasted_image_format(Path::new(r"C:\\a\\b\\c.PNG")),
EncodedImageFormat::Png
);
assert_eq!(
pasted_image_format(Path::new(r"C:\\a\\b\\c.jpeg")),
EncodedImageFormat::Jpeg
);
assert_eq!(
pasted_image_format(Path::new(r"C:\\a\\b\\noext")),
EncodedImageFormat::Other
);
}
}

View File

@@ -175,26 +175,6 @@ impl WidgetRef for &ExecCell {
}
}
impl ExecCell {
/// Convert an active exec cell into a failed, completed exec cell.
/// Replaces the spinner with a red ✗ and sets a zero/elapsed duration.
pub(crate) fn into_failed(mut self) -> ExecCell {
let elapsed = self
.start_time
.map(|st| st.elapsed())
.unwrap_or_else(|| Duration::from_millis(0));
self.start_time = None;
self.duration = Some(elapsed);
self.output = Some(CommandOutput {
exit_code: 1,
stdout: String::new(),
stderr: String::new(),
formatted_output: String::new(),
});
self
}
}
#[derive(Debug)]
struct CompletedMcpToolCallWithImageOutput {
_image: DynamicImage,
@@ -230,20 +210,6 @@ fn pretty_provider_name(id: &str) -> String {
title_case(id)
}
}
/// Return the emoji followed by a hair space (U+200A) and a normal space.
/// This creates a reasonable gap across different terminals,
/// in particular Terminal.app and iTerm, which render too tightly with just a single normal space.
///
/// Improvements here could be to condition this behavior on terminal,
/// or possibly on emoji.
fn padded_emoji(emoji: &str) -> String {
format!("{emoji}\u{200A} ")
}
/// Convenience function over `padded_emoji()`.
fn padded_emoji_with(emoji: &str, text: impl AsRef<str>) -> String {
format!("{}{}", padded_emoji(emoji), text.as_ref())
}
pub(crate) fn new_session_info(
config: &Config,
@@ -382,22 +348,22 @@ fn new_parsed_command(
for parsed in parsed_commands.iter() {
let text = match parsed {
ParsedCommand::Read { name, .. } => padded_emoji_with("📖", name),
ParsedCommand::Read { name, .. } => format!("📖 {name}"),
ParsedCommand::ListFiles { cmd, path } => match path {
Some(p) => padded_emoji_with("📂", p),
None => padded_emoji_with("📂", cmd),
Some(p) => format!("📂 {p}"),
None => format!("📂 {cmd}"),
},
ParsedCommand::Search { query, path, cmd } => match (query, path) {
(Some(q), Some(p)) => padded_emoji_with("🔎", format!("{q} in {p}")),
(Some(q), None) => padded_emoji_with("🔎", q),
(None, Some(p)) => padded_emoji_with("🔎", p),
(None, None) => padded_emoji_with("🔎", cmd),
(Some(q), Some(p)) => format!("🔎 {q} in {p}"),
(Some(q), None) => format!("🔎 {q}"),
(None, Some(p)) => format!("🔎 {p}"),
(None, None) => format!("🔎 {cmd}"),
},
ParsedCommand::Format { .. } => padded_emoji_with("", "Formatting"),
ParsedCommand::Test { cmd } => padded_emoji_with("🧪", cmd),
ParsedCommand::Lint { cmd, .. } => padded_emoji_with("🧹", cmd),
ParsedCommand::Unknown { cmd } => padded_emoji_with("", cmd),
ParsedCommand::Noop { cmd } => padded_emoji_with("🔄", cmd),
ParsedCommand::Format { .. } => "✨ Formatting".to_string(),
ParsedCommand::Test { cmd } => format!("🧪 {cmd}"),
ParsedCommand::Lint { cmd, .. } => format!("🧹 {cmd}"),
ParsedCommand::Unknown { cmd } => format!(" {cmd}"),
ParsedCommand::Noop { cmd } => format!("🔄 {cmd}"),
};
// Prefix: two spaces, marker, space. Continuations align under the text block.
for (j, line_text) in text.lines().enumerate() {
@@ -483,10 +449,8 @@ pub(crate) fn new_active_mcp_tool_call(invocation: McpInvocation) -> PlainHistor
}
pub(crate) fn new_web_search_call(query: String) -> PlainHistoryCell {
let lines: Vec<Line<'static>> = vec![
Line::from(""),
Line::from(vec![padded_emoji("🌐").into(), query.into()]),
];
let lines: Vec<Line<'static>> =
vec![Line::from(""), Line::from(vec!["🌐 ".into(), query.into()])];
PlainHistoryCell { lines }
}
@@ -630,10 +594,7 @@ pub(crate) fn new_status_output(
};
// 📂 Workspace
lines.push(Line::from(vec![
padded_emoji("📂").into(),
"Workspace".bold(),
]));
lines.push(Line::from(vec!["📂 ".into(), "Workspace".bold()]));
// Path (home-relative, e.g., ~/code/project)
let cwd_str = match relativize_to_home(&config.cwd) {
Some(rel) if !rel.as_os_str().is_empty() => {
@@ -714,10 +675,7 @@ pub(crate) fn new_status_output(
if let Ok(auth) = try_read_auth_json(&auth_file)
&& let Some(tokens) = auth.tokens.clone()
{
lines.push(Line::from(vec![
padded_emoji("👤").into(),
"Account".bold(),
]));
lines.push(Line::from(vec!["👤 ".into(), "Account".bold()]));
lines.push(Line::from(" • Signed in with ChatGPT"));
let info = tokens.id_token;
@@ -744,7 +702,7 @@ pub(crate) fn new_status_output(
}
// 🧠 Model
lines.push(Line::from(vec![padded_emoji("🧠").into(), "Model".bold()]));
lines.push(Line::from(vec!["🧠 ".into(), "Model".bold()]));
lines.push(Line::from(vec![
" • Name: ".into(),
config.model.clone().into(),
@@ -895,21 +853,13 @@ pub(crate) fn new_mcp_tools_output(
}
pub(crate) fn new_error_event(message: String) -> PlainHistoryCell {
// Use a hair space (U+200A) to create a subtle, near-invisible separation
// before the text. VS16 is intentionally omitted to keep spacing tighter
// in terminals like Ghostty.
let lines: Vec<Line<'static>> = vec![
"".into(),
vec![padded_emoji("🖐").red().bold(), message.into()].into(),
];
let lines: Vec<Line<'static>> = vec!["".into(), vec!["🖐 ".red().bold(), message.into()].into()];
PlainHistoryCell { lines }
}
pub(crate) fn new_stream_error_event(message: String) -> PlainHistoryCell {
let lines: Vec<Line<'static>> = vec![
vec![padded_emoji("").magenta().bold(), message.dim()].into(),
"".into(),
];
let lines: Vec<Line<'static>> =
vec![vec![" ".magenta().bold(), message.dim()].into(), "".into()];
PlainHistoryCell { lines }
}

View File

@@ -123,10 +123,10 @@ impl WidgetRef for StatusIndicatorWidget {
for (i, piece) in wrapped.iter().take(3).enumerate() {
let prefix = if i == 0 { "" } else { " " };
let content = format!("{prefix}{piece}");
lines.push(Line::from(content.dim().italic()));
lines.push(Line::from(content.dim()));
}
if wrapped.len() > 3 {
lines.push(Line::from("".dim().italic()));
lines.push(Line::from("".dim()));
}
}
if !self.queued_messages.is_empty() {

View File

@@ -3698,7 +3698,7 @@
{"ts":"2025-08-09T15:53:04.318Z","dir":"to_tui","kind":"app_event","variant":"RequestRedraw"}
{"ts":"2025-08-09T15:53:04.320Z","dir":"to_tui","kind":"app_event","variant":"Redraw"}
{"ts":"2025-08-09T15:53:04.353Z","dir":"to_tui","kind":"codex_event","payload":{"id":"1","msg":{"type":"exec_command_output_delta","call_id":"call_KOxVodT3X5ci7LJmudvcovhW","stream":"stderr","chunk":[32,32,32,32,70,105,110,105,115,104,101,100,32,96,100,101,118,96,32,112,114,111,102,105,108,101,32,91,117,110,111,112,116,105,109,105,122,101,100,32,43,32,100,101,98,117,103,105,110,102,111,93,32,116,97,114,103,101,116,40,115,41,32,105,110,32,53,56,46,50,49,115,10]}}}
{"ts":"2025-08-09T15:53:04.389Z","dir":"to_tui","kind":"codex_event","payload":{"id":"1","msg":{"type":"exec_command_end","call_id":"call_KOxVodT3X5ci7LJmudvcovhW","stdout":"","stderr":"error: command timed out","exit_code":-1,"duration":{"secs":0,"nanos":0}}}}
{"ts":"2025-08-09T15:53:04.389Z","dir":"to_tui","kind":"codex_event","payload":{"id":"1","msg":{"type":"exec_command_end","call_id":"call_KOxVodT3X5ci7LJmudvcovhW","stdout":"","stderr":"sandbox error: command timed out","exit_code":-1,"duration":{"secs":0,"nanos":0}}}}
{"ts":"2025-08-09T15:53:04.389Z","dir":"to_tui","kind":"codex_event","payload":{"id":"1","msg":{"type":"token_count","input_tokens":14859,"cached_input_tokens":14625,"output_tokens":117,"reasoning_output_tokens":64,"total_tokens":14976}}}
{"ts":"2025-08-09T15:53:04.389Z","dir":"to_tui","kind":"codex_event","payload":{"id":"1","msg":{"type":"turn_diff","unified_diff":"diff --git a/codex-rs/core/tests/common/lib.rs b/codex-rs/core/tests/common/lib.rs\nindex a0bb4e69e27ae82c5f70d2f4cd079c5cea3ae4f7..18bae310be9cfb81ca73e136be05148ba0510cc5\n--- a/codex-rs/core/tests/common/lib.rs\n+++ b/codex-rs/core/tests/common/lib.rs\n@@ -90,14 +90,11 @@\n where\n F: FnMut(&codex_core::protocol::EventMsg) -> bool,\n {\n+ use tokio::time::Duration;\n use tokio::time::timeout;\n loop {\n-<<<<<<< HEAD\n // Allow a bit more time to accommodate async startup work (e.g. config IO, tool discovery)\n- let ev = timeout(Duration::from_secs(5), codex.next_event())\n-=======\n- let ev = timeout(wait_time, codex.next_event())\n->>>>>>> origin/main\n+ let ev = timeout(wait_time.max(Duration::from_secs(5)), codex.next_event())\n .await\n .expect(\"timeout waiting for event\")\n .expect(\"stream ended unexpectedly\");\n"}}}
{"ts":"2025-08-09T15:53:04.389Z","dir":"to_tui","kind":"insert_history","lines":3}

View File

@@ -1,321 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Simple stderr logger
header() { echo "==> $*" >&2; }
# Generate summarized release notes using Codex CLI based on PR dump.
# Can also generate just the dump via --dump-only.
usage() {
cat <<'USAGE'
Usage: scripts/release_gen.sh [--dump-only] [-q|--quiet] [owner/repo] <from_tag> <to_tag> [version]
Examples:
scripts/release_gen.sh openai/codex v0.23.0 v0.24.0
scripts/release_gen.sh v0.23.0 v0.24.0 # auto-detect repo from git remote
scripts/release_gen.sh v0.23.0 v0.24.0 0.24.0 # auto-detect with explicit version
scripts/release_gen.sh --dump-only v0.23.0 v0.24.0 # only generate releases/release_dump_<ver>.txt
scripts/release_gen.sh -q v0.23.0 v0.24.0 # quiet Codex call with progress dots
Notes:
- Requires: gh and jq for dump generation; codex CLI for note generation.
- If release_dump_<ver>.txt does not exist, it will be created automatically.
- Then runs codex to generate <ver>.txt based on the dump (unless --dump-only).
- If you omit tags, the script lists the last 20 releases for the repo.
USAGE
}
# Parse flags (currently: --dump-only, --quiet)
DUMP_ONLY=0
QUIET=0
ARGS=()
for arg in "$@"; do
case "$arg" in
--dump-only)
DUMP_ONLY=1
;;
-q|--quiet)
QUIET=1
;;
-h|--help)
usage
exit 0
;;
*)
ARGS+=("$arg")
;;
esac
done
# Reset positional args safely under set -u, even if ARGS is empty
if ((${#ARGS[@]})); then
set -- "${ARGS[@]}"
else
set --
fi
if [[ ${1:-} == "-h" || ${1:-} == "--help" ]]; then
usage
exit 1
fi
# Resolve repo: allow optional first arg; otherwise detect from git remote
detect_repo() {
local remote
remote=$(git remote get-url origin 2>/dev/null || git remote get-url upstream 2>/dev/null || true)
if [[ -z "$remote" ]]; then
echo ""; return 1
fi
# Normalize and extract owner/repo from SSH or HTTPS/HTTP URL
local path="$remote"
# Strip protocols and user@
path="${path#git@}"
path="${path#ssh://}"
path="${path#https://}"
path="${path#http://}"
path="${path#*@}"
# If contains github.com:, take after ':'; else after 'github.com/' if present
if [[ "$path" == *":"* ]]; then
path="${path#*:}"
fi
if [[ "$path" == *github.com/* ]]; then
path="${path#*github.com/}"
fi
# Trim leading slashes
path="${path#/}"
# Drop trailing .git
path="${path%.git}"
# Ensure only owner/repo
echo "$path" | awk -F/ '{print $1"/"$2}'
}
if [[ ${1:-} == */* ]]; then
REPO="$1"; shift
else
REPO="$(detect_repo || true)"
if [[ -z "$REPO" ]]; then
echo "Error: failed to auto-detect repository from git remote. Provide [owner/repo] explicitly." >&2
exit 1
fi
fi
# Show a recent releases list if tags are missing
show_recent_releases_and_exit() {
local repo="$1"
echo "" >&2
echo "Please pass a source/target release." >&2
echo "" >&2
echo "e.g.: ./scripts/release_gen.sh rust-v0.23.0 rust-v0.24.0" >&2
echo "" >&2
header "Recent releases for $repo:"
echo "" >&2
local list
list=$(gh release list --repo "$repo" --limit 20 2>/dev/null || true)
if [[ -z "$list" ]]; then
echo "Error: unable to fetch releases for $repo" >&2
exit 1
fi
# Print only the tag (first column) as bullets to stderr
printf '%s\n' "$list" | awk '{print "- " $1}' >&2
exit 1
}
if [[ $# -lt 2 ]]; then
show_recent_releases_and_exit "$REPO"
fi
FROM_TAG="$1"
TO_TAG="$2"
VER="${3:-$TO_TAG}"
VER="${VER#v}"
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
RELEASES_DIR="$SCRIPT_DIR/releases"
DUMP_FILE="$RELEASES_DIR/release_dump_$VER.txt"
GEN_FILE="$RELEASES_DIR/$VER.txt"
# Ensure releases directory exists (under scripts/)
mkdir -p "$RELEASES_DIR"
abspath() {
local p="$1"
if command -v realpath >/dev/null 2>&1; then
realpath "$p"
else
python3 -c 'import os,sys;print(os.path.abspath(sys.argv[1]))' "$p" 2>/dev/null || echo "$(pwd)/$p"
fi
}
# ========== Dump generation logic (ported from release_dump_util.sh) ==========
header() { echo "==> $*" >&2; }
# Get an ISO 8601 datetime for a tag. Prefer release publish date; fallback to tag/commit date.
get_tag_datetime_iso() {
local repo="$1" tag="$2"
# Try release by tag
local ts
ts=$(gh release view "$tag" --repo "$repo" --json publishedAt --jq '.publishedAt' 2>/dev/null || true)
if [[ -n "$ts" && "$ts" != "null" ]]; then
echo "$ts"; return 0
fi
# Fallback: tag ref -> (annotated tag ->) commit -> date
local ref obj_type obj_url commit_sha commit
ref=$(gh api "repos/$repo/git/ref/tags/$tag")
obj_type=$(jq -r '.object.type' <<<"$ref")
obj_url=$(jq -r '.object.url' <<<"$ref")
if [[ "$obj_type" == "tag" ]]; then
local tag_obj
tag_obj=$(gh api "$obj_url")
commit_sha=$(jq -r '.object.sha' <<<"$tag_obj")
else
commit_sha=$(jq -r '.object.sha' <<<"$ref")
fi
commit=$(gh api "repos/$repo/commits/$commit_sha")
jq -r '.commit.committer.date' <<<"$commit"
}
collect_prs_within_range() {
local repo="$1" from_iso="$2" to_iso="$3"
gh pr list --repo "$repo" --state merged --limit 1000 \
--json number,title,mergedAt,author,body | \
jq -c --arg from "$from_iso" --arg to "$to_iso" \
'[ .[]
| select(.mergedAt != null and .mergedAt >= $from and .mergedAt <= $to)
| {
number: .number,
title: .title,
merged_at: .mergedAt,
author: (.author.login // "-"),
body: (.body // "")
}
] | sort_by(.merged_at) | reverse | .[]'
}
format_related_issues() {
# shellcheck disable=SC2016
sed 's/\r//g' | \
grep -Eio '(close|closed|closes|fix|fixed|fixes|resolve|resolved|resolves)[[:space:]:]+([[:alnum:]_.-]+\/[[:alnum:]_.-]+)?#[0-9]+' || true | \
grep -Eo '#[0-9]+' | tr -d '#' | sort -n -u | sed 's/^/#/' | paste -sd ', ' -
}
generate_dump() {
local repo="$1" from_tag="$2" to_tag="$3" out_file="$4"
command -v gh >/dev/null 2>&1 || { echo "Error: gh (GitHub CLI) is required" >&2; exit 1; }
command -v jq >/dev/null 2>&1 || { echo "Error: jq is required" >&2; exit 1; }
header "Resolving tag dates ($from_tag -> $to_tag)"
local from_iso to_iso
from_iso=$(get_tag_datetime_iso "$repo" "$from_tag")
to_iso=$(get_tag_datetime_iso "$repo" "$to_tag")
if [[ -z "$from_iso" || -z "$to_iso" ]]; then
echo "Error: failed to resolve tag dates. from=$from_tag ($from_iso) to=$to_tag ($to_iso)" >&2
exit 1
fi
header "Collecting merged PRs via gh pr list"
local tmpdir sorted
tmpdir=$(mktemp -d)
sorted="$tmpdir/prs.sorted.ndjson"
collect_prs_within_range "$repo" "$from_iso" "$to_iso" > "$sorted"
local count
count=$(wc -l < "$sorted" | tr -d ' ')
header "Writing $out_file (Total PRs: $count)"
{
echo "Repository: $repo"
echo "Range: $from_tag ($from_iso) -> $to_tag ($to_iso)"
echo "Generated: $(date -u +%Y-%m-%dT%H:%M:%SZ)"
echo "Total PRs: $count"
echo ""
} > "$out_file"
if [[ "$count" -eq 0 ]]; then
return 0
fi
while IFS= read -r line; do
local title number merged_at author body issues
title=$(jq -r '.title' <<<"$line")
number=$(jq -r '.number' <<<"$line")
merged_at=$(jq -r '.merged_at' <<<"$line")
author=$(jq -r '.author' <<<"$line")
body=$(jq -r '.body' <<<"$line")
issues=$(printf '%s' "$body" | format_related_issues || true)
[[ -z "$issues" ]] && issues="-"
{
echo "PR #$number: $title"
echo "Merged: $merged_at | Author: $author"
echo "Related issues: $issues"
echo ""
# Skip verbose descriptions for Dependabot PRs
if [[ "$author" != "app/dependabot" && "$author" != "dependabot[bot]" && ! "$author" =~ [Dd]ependabot ]]; then
echo "Description:"
# Limit descriptions to 2000 characters; add ellipses if truncated
local max=2000
if (( ${#body} > max )); then
printf '%s\n' "${body:0:max}..."
else
printf '%s\n' "$body"
fi
echo ""
fi
echo "-----"
echo ""
} >> "$out_file"
done < "$sorted"
header "Done -> $out_file"
}
# ========== Orchestrate dump + optional codex generation ==========
# Create dump if missing
if [[ ! -f "$DUMP_FILE" ]]; then
header "Dump not found: $DUMP_FILE. Generating..."
generate_dump "$REPO" "$FROM_TAG" "$TO_TAG" "$DUMP_FILE"
else
header "Using existing dump: $DUMP_FILE"
fi
if (( DUMP_ONLY )); then
# Dump-only mode: no stdout output
exit 0
fi
# Now run codex to generate notes
command -v codex >/dev/null 2>&1 || { echo "Error: codex CLI is required for generation. Use --dump-only to skip." >&2; exit 1; }
DUMP_PATH="$(abspath "$DUMP_FILE")"
PROMPT="`cat ${DUMP_PATH}`\n\n---\n\nPlease generate a summarized release note based on the list of PRs above. Then, write a file called $GEN_FILE with your suggested release notes. It should follow this structure (+ the style/tone/brevity in this example):\n\n\"- Highlights:\n - New commands and controls: support /mcp in TUI (#2430) and a slash command /approvals to control approvals (#2474).\n - Reasoning controls: change reasoning effort and model at runtime (#2435) /model; add “minimal” effort for GPT5 models (#2326).\n - Auth improvements: show login options when not signed in with ChatGPT (#2440) and autorefresh ChatGPT auth token (#2484).\n - UI/UX polish: Ghostty Ctrlb/Ctrlf fallback (#2427), Ctrl+H as backspace (#2412), cursor position tweak after tab completion (#2442), color/accessibility updates (#2401, #2421).\n - Distribution/infra: zip archived binaries added to releases (#2438) and DotSlash entry for Windows x86_64 (#2361); upgraded to Rust 1.89 (#2465, #2467).\n- Full list of merged PRs:\n\n - #2352 tui: skip identical consecutive entries in local composer history\n - #2355 fix: introduce codex-protocol crate\n...\"\n\nMake sure you limit the highlights to, at most, 5 bullet points."
header "Calling codex to generate $GEN_FILE"
if (( QUIET )); then
# Quiet mode: run Codex silently and show progress dots
(
set +x 2>/dev/null || true
codex exec --sandbox workspace-write "$PROMPT" >/dev/null 2>&1
) &
CODEX_PID=$!
while :; do
kill -0 "$CODEX_PID" 2>/dev/null || break
printf "." >&2
sleep 1
done
wait "$CODEX_PID" || true
CODEX_STATUS=$?
echo "" >&2
else
# Normal mode: stream Codex output to stderr as before
codex exec --sandbox workspace-write "$PROMPT" 1>&2
fi
if [[ -f "$GEN_FILE" ]]; then
# On success, output only the generated release notes to stdout
cat "$GEN_FILE"
else
echo "Warning: $GEN_FILE not created. Check codex output." >&2
exit 1
fi