41 KiB
PR #1594: Add streaming to exec and tui
- URL: https://github.com/openai/codex/pull/1594
- Author: aibrahim-oai
- Created: 2025-07-16 22:58:30 UTC
- Updated: 2025-07-17 05:26:42 UTC
- Changes: +149/-32, Files changed: 5, Commits: 22
Description
Added support for streaming in tui
Added support for streaming in exec
https://github.com/user-attachments/assets/4215892e-d940-452c-a1d0-416ed0cf14eb
Full Diff
diff --git a/codex-rs/exec/src/event_processor.rs b/codex-rs/exec/src/event_processor.rs
index 2a7c4c621b..5ab09994b1 100644
--- a/codex-rs/exec/src/event_processor.rs
+++ b/codex-rs/exec/src/event_processor.rs
@@ -23,6 +23,7 @@ use owo_colors::OwoColorize;
use owo_colors::Style;
use shlex::try_join;
use std::collections::HashMap;
+use std::io::Write;
use std::time::Instant;
/// This should be configurable. When used in CI, users may not want to impose
@@ -52,10 +53,12 @@ pub(crate) struct EventProcessor {
/// Whether to include `AgentReasoning` events in the output.
show_agent_reasoning: bool,
+ answer_started: bool,
+ reasoning_started: bool,
}
impl EventProcessor {
- pub(crate) fn create_with_ansi(with_ansi: bool, show_agent_reasoning: bool) -> Self {
+ pub(crate) fn create_with_ansi(with_ansi: bool, config: &Config) -> Self {
let call_id_to_command = HashMap::new();
let call_id_to_patch = HashMap::new();
let call_id_to_tool_call = HashMap::new();
@@ -72,7 +75,9 @@ impl EventProcessor {
green: Style::new().green(),
cyan: Style::new().cyan(),
call_id_to_tool_call,
- show_agent_reasoning,
+ show_agent_reasoning: !config.hide_agent_reasoning,
+ answer_started: false,
+ reasoning_started: false,
}
} else {
Self {
@@ -86,7 +91,9 @@ impl EventProcessor {
green: Style::new(),
cyan: Style::new(),
call_id_to_tool_call,
- show_agent_reasoning,
+ show_agent_reasoning: !config.hide_agent_reasoning,
+ answer_started: false,
+ reasoning_started: false,
}
}
}
@@ -186,18 +193,45 @@ impl EventProcessor {
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
ts_println!(self, "tokens used: {total_tokens}");
}
- EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
+ if !self.answer_started {
+ ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta));
+ self.answer_started = true;
+ }
+ print!("{delta}");
+ #[allow(clippy::expect_used)]
+ std::io::stdout().flush().expect("could not flush stdout");
}
- EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
+ if !self.show_agent_reasoning {
+ return;
+ }
+ if !self.reasoning_started {
+ ts_println!(
+ self,
+ "{}\n",
+ "thinking".style(self.italic).style(self.magenta),
+ );
+ self.reasoning_started = true;
+ }
+ print!("{delta}");
+ #[allow(clippy::expect_used)]
+ std::io::stdout().flush().expect("could not flush stdout");
}
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
- ts_println!(
- self,
- "{}\n{message}",
- "codex".style(self.bold).style(self.magenta)
- );
+ // if answer_started is false, this means we haven't received any
+ // delta. Thus, we need to print the message as a new answer.
+ if !self.answer_started {
+ ts_println!(
+ self,
+ "{}\n{}",
+ "codex".style(self.italic).style(self.magenta),
+ message,
+ );
+ } else {
+ println!();
+ self.answer_started = false;
+ }
}
EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
call_id,
@@ -351,7 +385,7 @@ impl EventProcessor {
);
// Pretty-print the patch summary with colored diff markers so
- // it’s easy to scan in the terminal output.
+ // it's easy to scan in the terminal output.
for (path, change) in changes.iter() {
match change {
FileChange::Add { content } => {
@@ -449,12 +483,17 @@ impl EventProcessor {
}
EventMsg::AgentReasoning(agent_reasoning_event) => {
if self.show_agent_reasoning {
- ts_println!(
- self,
- "{}\n{}",
- "thinking".style(self.italic).style(self.magenta),
- agent_reasoning_event.text
- );
+ if !self.reasoning_started {
+ ts_println!(
+ self,
+ "{}\n{}",
+ "codex".style(self.italic).style(self.magenta),
+ agent_reasoning_event.text,
+ );
+ } else {
+ println!();
+ self.reasoning_started = false;
+ }
}
}
EventMsg::SessionConfigured(session_configured_event) => {
diff --git a/codex-rs/exec/src/lib.rs b/codex-rs/exec/src/lib.rs
index 44dddd4d0f..afefed1a93 100644
--- a/codex-rs/exec/src/lib.rs
+++ b/codex-rs/exec/src/lib.rs
@@ -115,8 +115,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
};
let config = Config::load_with_cli_overrides(cli_kv_overrides, overrides)?;
- let mut event_processor =
- EventProcessor::create_with_ansi(stdout_with_ansi, !config.hide_agent_reasoning);
+ let mut event_processor = EventProcessor::create_with_ansi(stdout_with_ansi, &config);
// Print the effective configuration and prompt so users can see what Codex
// is using.
event_processor.print_config_summary(&config, &prompt);
diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs
index 33297ad372..883250400d 100644
--- a/codex-rs/tui/src/app.rs
+++ b/codex-rs/tui/src/app.rs
@@ -297,6 +297,8 @@ impl<'a> App<'a> {
}
fn draw_next_frame(&mut self, terminal: &mut tui::Tui) -> Result<()> {
+ // TODO: add a throttle to avoid redrawing too often
+
match &mut self.app_state {
AppState::Chat { widget } => {
terminal.draw(|frame| frame.render_widget_ref(&**widget, frame.area()))?;
diff --git a/codex-rs/tui/src/chatwidget.rs b/codex-rs/tui/src/chatwidget.rs
index 28014c6e40..860439ffb6 100644
--- a/codex-rs/tui/src/chatwidget.rs
+++ b/codex-rs/tui/src/chatwidget.rs
@@ -51,6 +51,8 @@ pub(crate) struct ChatWidget<'a> {
config: Config,
initial_user_message: Option<UserMessage>,
token_usage: TokenUsage,
+ reasoning_buffer: String,
+ answer_buffer: String,
}
#[derive(Clone, Copy, Eq, PartialEq)]
@@ -137,6 +139,8 @@ impl ChatWidget<'_> {
initial_images,
),
token_usage: TokenUsage::default(),
+ reasoning_buffer: String::new(),
+ answer_buffer: String::new(),
}
}
@@ -242,16 +246,51 @@ impl ChatWidget<'_> {
self.request_redraw();
}
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
+ // if the answer buffer is empty, this means we haven't received any
+ // delta. Thus, we need to print the message as a new answer.
+ if self.answer_buffer.is_empty() {
+ self.conversation_history
+ .add_agent_message(&self.config, message);
+ } else {
+ self.conversation_history
+ .replace_prev_agent_message(&self.config, message);
+ }
+ self.answer_buffer.clear();
+ self.request_redraw();
+ }
+ EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
+ if self.answer_buffer.is_empty() {
+ self.conversation_history
+ .add_agent_message(&self.config, "".to_string());
+ }
+ self.answer_buffer.push_str(&delta.clone());
self.conversation_history
- .add_agent_message(&self.config, message);
+ .replace_prev_agent_message(&self.config, self.answer_buffer.clone());
+ self.request_redraw();
+ }
+ EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
+ if self.reasoning_buffer.is_empty() {
+ self.conversation_history
+ .add_agent_reasoning(&self.config, "".to_string());
+ }
+ self.reasoning_buffer.push_str(&delta.clone());
+ self.conversation_history
+ .replace_prev_agent_reasoning(&self.config, self.reasoning_buffer.clone());
self.request_redraw();
}
EventMsg::AgentReasoning(AgentReasoningEvent { text }) => {
- if !self.config.hide_agent_reasoning {
+ // if the reasoning buffer is empty, this means we haven't received any
+ // delta. Thus, we need to print the message as a new reasoning.
+ if self.reasoning_buffer.is_empty() {
self.conversation_history
- .add_agent_reasoning(&self.config, text);
- self.request_redraw();
+ .add_agent_reasoning(&self.config, "".to_string());
+ } else {
+ // else, we rerender one last time.
+ self.conversation_history
+ .replace_prev_agent_reasoning(&self.config, text);
}
+ self.reasoning_buffer.clear();
+ self.request_redraw();
}
EventMsg::TaskStarted => {
self.bottom_pane.clear_ctrl_c_quit_hint();
@@ -377,12 +416,6 @@ impl ChatWidget<'_> {
self.bottom_pane
.on_history_entry_response(log_id, offset, entry.map(|e| e.text));
}
- EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the TUI
- }
- EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the TUI
- }
event => {
self.conversation_history
.add_background_event(format!("{event:?}"));
diff --git a/codex-rs/tui/src/conversation_history_widget.rs b/codex-rs/tui/src/conversation_history_widget.rs
index c0e5031d70..01a8dc6834 100644
--- a/codex-rs/tui/src/conversation_history_widget.rs
+++ b/codex-rs/tui/src/conversation_history_widget.rs
@@ -202,6 +202,14 @@ impl ConversationHistoryWidget {
self.add_to_history(HistoryCell::new_agent_reasoning(config, text));
}
+ pub fn replace_prev_agent_reasoning(&mut self, config: &Config, text: String) {
+ self.replace_last_agent_reasoning(config, text);
+ }
+
+ pub fn replace_prev_agent_message(&mut self, config: &Config, text: String) {
+ self.replace_last_agent_message(config, text);
+ }
+
pub fn add_background_event(&mut self, message: String) {
self.add_to_history(HistoryCell::new_background_event(message));
}
@@ -249,6 +257,42 @@ impl ConversationHistoryWidget {
});
}
+ pub fn replace_last_agent_reasoning(&mut self, config: &Config, text: String) {
+ if let Some(idx) = self
+ .entries
+ .iter()
+ .rposition(|entry| matches!(entry.cell, HistoryCell::AgentReasoning { .. }))
+ {
+ let width = self.cached_width.get();
+ let entry = &mut self.entries[idx];
+ entry.cell = HistoryCell::new_agent_reasoning(config, text);
+ let height = if width > 0 {
+ entry.cell.height(width)
+ } else {
+ 0
+ };
+ entry.line_count.set(height);
+ }
+ }
+
+ pub fn replace_last_agent_message(&mut self, config: &Config, text: String) {
+ if let Some(idx) = self
+ .entries
+ .iter()
+ .rposition(|entry| matches!(entry.cell, HistoryCell::AgentMessage { .. }))
+ {
+ let width = self.cached_width.get();
+ let entry = &mut self.entries[idx];
+ entry.cell = HistoryCell::new_agent_message(config, text);
+ let height = if width > 0 {
+ entry.cell.height(width)
+ } else {
+ 0
+ };
+ entry.line_count.set(height);
+ }
+ }
+
pub fn record_completed_exec_command(
&mut self,
call_id: String,
@@ -454,7 +498,7 @@ impl WidgetRef for ConversationHistoryWidget {
{
// Choose a thumb color that stands out only when this pane has focus so that the
- // user’s attention is naturally drawn to the active viewport. When unfocused we show
+ // user's attention is naturally drawn to the active viewport. When unfocused we show
// a low-contrast thumb so the scrollbar fades into the background without becoming
// invisible.
let thumb_style = if self.has_input_focus {
Review Comments
codex-rs/exec/src/event_processor.rs
- Created: 2025-07-16 23:05:23 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2211783428
@@ -186,18 +194,31 @@ impl EventProcessor {
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
ts_println!(self, "tokens used: {total_tokens}");
}
- EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
+ if !self.answer_started {
+ ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta),);
ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta));
- Created: 2025-07-16 23:06:41 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2211785115
@@ -447,15 +468,12 @@ impl EventProcessor {
EventMsg::ApplyPatchApprovalRequest(_) => {
// Should we exit?
}
- EventMsg::AgentReasoning(agent_reasoning_event) => {
- if self.show_agent_reasoning {
- ts_println!(
Shouldn't this still happen?
- Created: 2025-07-16 23:08:08 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2211787407
@@ -52,10 +52,14 @@ pub(crate) struct EventProcessor {
/// Whether to include `AgentReasoning` events in the output.
show_agent_reasoning: bool,
+ /// answer started
What happens if you use
codex execwith chat completions (without streaming?)
- Created: 2025-07-16 23:08:44 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2211788149
@@ -186,18 +194,31 @@ impl EventProcessor {
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
ts_println!(self, "tokens used: {total_tokens}");
}
- EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
+ if !self.answer_started {
+ ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta),);
+
+ self.answer_started = true;
+ }
+ print!("{delta}");
}
- EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
+ if !self.show_agent_reasoning {
+ return;
+ }
+ if !self.reasoning_started {
+ ts_println!(
+ self,
+ "{}\n",
+ "thinking".style(self.italic).style(self.magenta),
+ );
+ self.reasoning_started = true;
+ }
+ print!("{delta}");
}
- EventMsg::AgentMessage(AgentMessageEvent { message }) => {
- ts_println!(
Shouldn't this still happen?
- Created: 2025-07-16 23:43:57 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2211839861
@@ -186,18 +194,38 @@ impl EventProcessor {
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
ts_println!(self, "tokens used: {total_tokens}");
}
- EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
+ if !self.answer_started {
+ ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta),);
+ self.answer_started = true;
+ }
+ print!("{delta}");
+ use std::io::Write;
please pull this to the top
- Created: 2025-07-16 23:48:11 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2211843337
@@ -186,18 +194,38 @@ impl EventProcessor {
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
ts_println!(self, "tokens used: {total_tokens}");
}
- EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
+ if !self.answer_started {
+ ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta),);
+ self.answer_started = true;
+ }
+ print!("{delta}");
+ use std::io::Write;
+ if let Err(e) = std::io::stdout().flush() {
+ eprintln!("Failed to flush stdout: {e}");
+ }
}
- EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
+ if !self.show_agent_reasoning {
+ return;
+ }
+ if !self.reasoning_started {
+ ts_println!(
+ self,
+ "{}\n",
+ "thinking".style(self.italic).style(self.magenta),
+ );
+ self.reasoning_started = true;
+ }
+ print!("{delta}");
+ use std::io::Write;
+ if let Err(e) = std::io::stdout().flush() {
I guess flushing on every
print!is the right thing to do...
- Created: 2025-07-17 03:19:02 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212061865
@@ -186,18 +195,49 @@ impl EventProcessor {
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
ts_println!(self, "tokens used: {total_tokens}");
}
- EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
+ if !self.answer_started {
+ ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta));
+ self.answer_started = true;
+ }
+ print!("{delta}");
+ if let Err(e) = std::io::stdout().flush() {
+ eprintln!("Failed to flush stdout: {e}");
+ }
}
- EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
+ if !self.show_agent_reasoning {
+ return;
+ }
+ if !self.reasoning_started {
+ ts_println!(
+ self,
+ "{}\n",
+ "thinking".style(self.italic).style(self.magenta),
+ );
+ self.reasoning_started = true;
+ }
+ print!("{delta}");
+ use std::io::Write;
+ if let Err(e) = std::io::stdout().flush() {
+ eprintln!("Failed to flush stdout: {e}");
+ }
}
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
- ts_println!(
- self,
- "{}\n{message}",
- "codex".style(self.bold).style(self.magenta)
- );
+ // if answer_started is false, this means we haven't received any
+ // delta. Thus, we need to print the message as a new answer.
+ if !self.answer_started {
+ ts_println!(
+ self,
+ "{}\n{}",
+ "codex".style(self.italic).style(self.magenta),
+ message,
+ );
+ return;
instead of
returncan you doelse?early returns are slightly harder to reason about
- Created: 2025-07-17 03:19:21 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212062069
@@ -186,18 +195,49 @@ impl EventProcessor {
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
ts_println!(self, "tokens used: {total_tokens}");
}
- EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
+ if !self.answer_started {
+ ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta));
+ self.answer_started = true;
+ }
+ print!("{delta}");
+ if let Err(e) = std::io::stdout().flush() {
+ eprintln!("Failed to flush stdout: {e}");
+ }
}
- EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
+ if !self.show_agent_reasoning {
+ return;
+ }
+ if !self.reasoning_started {
+ ts_println!(
+ self,
+ "{}\n",
+ "thinking".style(self.italic).style(self.magenta),
+ );
+ self.reasoning_started = true;
+ }
+ print!("{delta}");
+ use std::io::Write;
+ if let Err(e) = std::io::stdout().flush() {
+ eprintln!("Failed to flush stdout: {e}");
+ }
}
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
- ts_println!(
- self,
- "{}\n{message}",
- "codex".style(self.bold).style(self.magenta)
- );
+ // if answer_started is false, this means we haven't received any
+ // delta. Thus, we need to print the message as a new answer.
+ if !self.answer_started {
+ ts_println!(
+ self,
+ "{}\n{}",
+ "codex".style(self.italic).style(self.magenta),
+ message,
+ );
+ return;
+ }
+ println!();
+ // reset answer_started to false so we can print the next message as a new answer.
remove comment?
- Created: 2025-07-17 03:21:39 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212063812
@@ -448,14 +488,23 @@ impl EventProcessor {
// Should we exit?
}
EventMsg::AgentReasoning(agent_reasoning_event) => {
- if self.show_agent_reasoning {
+ if !self.show_agent_reasoning {
again, I would generally avoid
returnifif elseis an option.This is a large
matchstatement and it's arguably cleaner if there are noreturnstatements so it's easier to reason about new code added after thematch.
- Created: 2025-07-17 03:22:02 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212064109
@@ -448,14 +488,23 @@ impl EventProcessor {
// Should we exit?
}
EventMsg::AgentReasoning(agent_reasoning_event) => {
- if self.show_agent_reasoning {
+ if !self.show_agent_reasoning {
+ return;
+ }
+ // if reasoning_started is false, this means we haven't received any
I would suggest removing, or at least abridging, this comment.
- Created: 2025-07-17 03:22:08 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212064178
@@ -448,14 +488,23 @@ impl EventProcessor {
// Should we exit?
}
EventMsg::AgentReasoning(agent_reasoning_event) => {
- if self.show_agent_reasoning {
+ if !self.show_agent_reasoning {
+ return;
+ }
+ // if reasoning_started is false, this means we haven't received any
+ // delta. Thus, we need to print the message as a new answer.
+ if !self.reasoning_started {
ts_println!(
self,
"{}\n{}",
- "thinking".style(self.italic).style(self.magenta),
- agent_reasoning_event.text
+ "codex".style(self.italic).style(self.magenta),
+ agent_reasoning_event.text,
);
+ return;
}
+ // reset reasoning_started to false so we can print the next message as a new answer.
here too
- Created: 2025-07-17 04:45:35 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212242915
@@ -52,10 +53,14 @@ pub(crate) struct EventProcessor {
/// Whether to include `AgentReasoning` events in the output.
show_agent_reasoning: bool,
+ /// answer started
redundant comment?
- Created: 2025-07-17 04:45:40 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212242997
@@ -52,10 +53,14 @@ pub(crate) struct EventProcessor {
/// Whether to include `AgentReasoning` events in the output.
show_agent_reasoning: bool,
+ /// answer started
+ answer_started: bool,
+ /// reasoning started
redundant comment?
- Created: 2025-07-17 04:46:51 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212244085
@@ -186,18 +195,47 @@ impl EventProcessor {
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
ts_println!(self, "tokens used: {total_tokens}");
}
- EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
+ if !self.answer_started {
+ ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta));
+ self.answer_started = true;
+ }
+ print!("{delta}");
+ if let Err(e) = std::io::stdout().flush() {
+ eprintln!("Failed to flush stdout: {e}");
+ }
Honestly, maybe we should just do:
std::io::stdout().flush().expect("could not flush stdout");
- Created: 2025-07-17 04:47:30 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212244616
@@ -186,18 +194,38 @@ impl EventProcessor {
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
ts_println!(self, "tokens used: {total_tokens}");
}
- EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
+ if !self.answer_started {
+ ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta),);
+ self.answer_started = true;
+ }
+ print!("{delta}");
+ use std::io::Write;
+ if let Err(e) = std::io::stdout().flush() {
+ eprintln!("Failed to flush stdout: {e}");
+ }
}
- EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta: _ }) => {
- // TODO: think how we want to support this in the CLI
+ EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
+ if !self.show_agent_reasoning {
+ return;
+ }
+ if !self.reasoning_started {
+ ts_println!(
+ self,
+ "{}\n",
+ "thinking".style(self.italic).style(self.magenta),
+ );
+ self.reasoning_started = true;
+ }
+ print!("{delta}");
+ use std::io::Write;
+ if let Err(e) = std::io::stdout().flush() {
Same as above about using
expect(), though you will have to suppress the clippy warning in both cases.
codex-rs/tui/src/app.rs
- Created: 2025-07-17 04:55:51 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212255717
@@ -297,6 +303,12 @@ impl<'a> App<'a> {
}
fn draw_next_frame(&mut self, terminal: &mut tui::Tui) -> Result<()> {
+ // skip if the last redraw was less than 100ms ago
This does not seem quite right to me.
If I understand correctly, this means that if we have:
- 0ms: redraw request
- 10ms:
AppEventthat changes a render (like aKeyEvent)- 50ms: redraw event
This means that the user will not see the result of the event of the 10ms mark until another redraw comes in, which could be at some indefinite point in the future? The app could be stuck looking like it is hanging, no?
Admittedly, debounce logic is usually tricky to get right.
For example, we probably don't want to debounce redraws that are in response to the user typing, since that will feel laggy?
Taking a step back: what is the case we are trying to solve for? Is it actually a problem today if the stream of tokens comes back very quickly? Does the UI lock up?
codex-rs/tui/src/chatwidget.rs
- Created: 2025-07-16 23:16:02 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2211798072
@@ -398,7 +426,10 @@ impl ChatWidget<'_> {
}
fn request_redraw(&mut self) {
- self.app_event_tx.send(AppEvent::Redraw);
+ if Instant::now().duration_since(self.last_redraw_time) > Duration::from_millis(100) {
If we are going to ad logic to control the render late, we should be doing this at a higher level in the TUI so it applies globally, no?
- Created: 2025-07-17 03:22:32 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212064465
@@ -242,16 +250,51 @@ impl ChatWidget<'_> {
self.request_redraw();
}
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
+ // if the answer buffer is empty, this means we haven't received any
+ // delta. Thus, we need to print the message as a new answer.
+ if self.answer_buffer.is_empty() {
again, can you just use if/else?
- Created: 2025-07-17 03:23:02 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212064830
@@ -242,16 +250,51 @@ impl ChatWidget<'_> {
self.request_redraw();
}
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
+ // if the answer buffer is empty, this means we haven't received any
+ // delta. Thus, we need to print the message as a new answer.
+ if self.answer_buffer.is_empty() {
+ self.conversation_history
+ .add_agent_message(&self.config, message);
+ return;
+ }
+ // else, we rerender one last time.
+ self.conversation_history
+ .replace_prev_agent_message(&self.config, message);
+ self.answer_buffer.clear();
+ self.request_redraw();
+ }
+ EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
+ if self.answer_buffer.is_empty() {
+ self.conversation_history
+ .add_agent_message(&self.config, "".to_string());
+ }
+ self.answer_buffer.push_str(&delta.clone());
self.conversation_history
- .add_agent_message(&self.config, message);
+ .replace_prev_agent_message(&self.config, self.answer_buffer.clone());
+ self.request_redraw();
+ }
+ EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
+ if self.reasoning_buffer.is_empty() {
+ self.conversation_history
+ .add_agent_reasoning(&self.config, "".to_string());
+ }
+ self.reasoning_buffer.push_str(&delta.clone());
+ self.conversation_history
+ .replace_prev_agent_reasoning(&self.config, self.reasoning_buffer.clone());
self.request_redraw();
}
EventMsg::AgentReasoning(AgentReasoningEvent { text }) => {
- if !self.config.hide_agent_reasoning {
+ // if the reasoning buffer is empty, this means we haven't received any
+ // delta. Thus, we need to print the message as a new reasoning.
+ if self.reasoning_buffer.is_empty() {
self.conversation_history
- .add_agent_reasoning(&self.config, text);
- self.request_redraw();
+ .add_agent_reasoning(&self.config, "".to_string());
}
+ // else, we rerender one last time.
this is not an exclusive else, so this comment feels a bit misleading
- Created: 2025-07-17 03:23:39 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212065334
@@ -398,7 +426,10 @@ impl ChatWidget<'_> {
}
fn request_redraw(&mut self) {
- self.app_event_tx.send(AppEvent::Redraw);
+ if Instant::now().duration_since(self.last_redraw_time) > Duration::from_millis(100) {
Right, but in general, it's possible the top-level event loop gets too many
AppEvent::Redrawrequests, so why not do the throttling there?
codex-rs/tui/src/conversation_history_widget.rs
- Created: 2025-07-16 23:13:52 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2211793101
@@ -249,6 +257,35 @@ impl ConversationHistoryWidget {
});
}
+ pub fn replace_last_agent_reasoning(&mut self, config: &Config, text: String) {
+ if let Some(idx) = self
+ .entries
+ .iter()
+ .rposition(|entry| matches!(entry.cell, HistoryCell::AgentReasoning { .. }))
+ {
+ let width = self.cached_width.get();
+ let entry = &mut self.entries[idx];
+ entry.cell = HistoryCell::new_agent_reasoning(config, text);
+ if width > 0 {
+ entry.line_count.set(entry.cell.height(width));
For here and the other function, if
widthis somehow0, should we doline_count.set(0)?
- Created: 2025-07-17 03:23:53 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212065545
@@ -249,6 +253,39 @@ impl ConversationHistoryWidget {
});
}
+ pub fn replace_last_agent_reasoning(&mut self, config: &Config, text: String) {
+ if let Some(idx) = self
+ .entries
+ .iter()
+ .rposition(|entry| matches!(entry.cell, HistoryCell::AgentReasoning { .. }))
+ {
+ let width = self.cached_width.get();
+ let entry = &mut self.entries[idx];
+ entry.cell = HistoryCell::new_agent_reasoning(config, text);
+ entry.line_count.set(if width > 0 {
+ entry.cell.height(width)
+ } else {
+ 0
+ });
+ }
+ }
+ pub fn replace_last_agent_message(&mut self, config: &Config, text: String) {
blank line before new fn decl?
- Created: 2025-07-17 03:24:57 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212066410
@@ -249,6 +253,39 @@ impl ConversationHistoryWidget {
});
}
+ pub fn replace_last_agent_reasoning(&mut self, config: &Config, text: String) {
+ if let Some(idx) = self
+ .entries
+ .iter()
+ .rposition(|entry| matches!(entry.cell, HistoryCell::AgentReasoning { .. }))
+ {
+ let width = self.cached_width.get();
+ let entry = &mut self.entries[idx];
+ entry.cell = HistoryCell::new_agent_reasoning(config, text);
+ entry.line_count.set(if width > 0 {
+ entry.cell.height(width)
+ } else {
+ 0
+ });
I would consider the following, but up to you:
let height = if width > 0 { entry.cell.height(width) } else { 0 }; entry.line_count.set(height);
- Created: 2025-07-17 03:25:13 UTC | Link: https://github.com/openai/codex/pull/1594#discussion_r2212066584
@@ -249,6 +253,39 @@ impl ConversationHistoryWidget {
});
}
+ pub fn replace_last_agent_reasoning(&mut self, config: &Config, text: String) {
+ if let Some(idx) = self
+ .entries
+ .iter()
+ .rposition(|entry| matches!(entry.cell, HistoryCell::AgentReasoning { .. }))
+ {
+ let width = self.cached_width.get();
+ let entry = &mut self.entries[idx];
+ entry.cell = HistoryCell::new_agent_reasoning(config, text);
+ entry.line_count.set(if width > 0 {
+ entry.cell.height(width)
+ } else {
+ 0
+ });
+ }
+ }
+ pub fn replace_last_agent_message(&mut self, config: &Config, text: String) {
+ if let Some(idx) = self
+ .entries
+ .iter()
+ .rposition(|entry| matches!(entry.cell, HistoryCell::AgentMessage { .. }))
+ {
+ let width = self.cached_width.get();
+ let entry = &mut self.entries[idx];
+ entry.cell = HistoryCell::new_agent_message(config, text);
+ entry.line_count.set(if width > 0 {
same here