fixing bugs

This commit is contained in:
Ahmed Ibrahim
2025-07-14 23:09:52 -07:00
parent 9487ae4ce7
commit 5f16fe8dda
6 changed files with 553 additions and 47 deletions

View File

@@ -131,6 +131,13 @@ pub struct Config {
/// request using the Responses API.
pub model_reasoning_summary: ReasoningSummary,
/// Whether to surface live streaming delta events in front-ends. When `true`
/// (default) Codex will forward `AgentMessageDelta` / `AgentReasoningDelta`
/// events and UIs may show a typing indicator. When `false` Codex UIs should
/// ignore delta events and rely solely on the final aggregated
/// `AgentMessage`/`AgentReasoning` items (legacy behaviour).
pub streaming_enabled: bool,
/// When set to `true`, overrides the default heuristic and forces
/// `model_supports_reasoning_summaries()` to return `true`.
pub model_supports_reasoning_summaries: bool,
@@ -321,6 +328,13 @@ pub struct ConfigToml {
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
pub chatgpt_base_url: Option<String>,
/// Whether to surface live streaming delta events in front-ends. When `true`
/// (default) Codex will forward `AgentMessageDelta` / `AgentReasoningDelta`
/// events and UIs may show a typing indicator. When `false` Codex UIs should
/// ignore delta events and rely solely on the final aggregated
/// `AgentMessage`/`AgentReasoning` items (legacy behaviour).
pub streaming: Option<bool>,
}
impl ConfigToml {
@@ -486,6 +500,7 @@ impl Config {
.or(cfg.model_reasoning_summary)
.unwrap_or_default(),
streaming_enabled: cfg.streaming.unwrap_or(true),
model_supports_reasoning_summaries: cfg
.model_supports_reasoning_summaries
.unwrap_or(false),
@@ -798,6 +813,7 @@ disable_response_storage = true
hide_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::High,
model_reasoning_summary: ReasoningSummary::Detailed,
streaming_enabled: true,
model_supports_reasoning_summaries: false,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
},
@@ -844,6 +860,7 @@ disable_response_storage = true
hide_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
streaming_enabled: true,
model_supports_reasoning_summaries: false,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
};
@@ -905,6 +922,7 @@ disable_response_storage = true
hide_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
streaming_enabled: true,
model_supports_reasoning_summaries: false,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
};

View File

@@ -228,3 +228,62 @@ pub enum ReasoningSummary {
/// Option to disable reasoning summaries.
None,
}
/// Base config deserialized from ~/.codex/config.toml.
#[derive(Deserialize, Debug, Clone, Default)]
pub struct ConfigToml {
/// Optional override of model selection.
pub model: Option<String>,
/// Provider to use from the model_providers map.
pub model_provider: Option<String>,
/// Size of the context window for the model, in tokens.
pub model_context_window: Option<u64>,
/// Maximum number of output tokens.
pub model_max_output_tokens: Option<u64>,
/// Default approval policy for executing commands.
pub approval_policy: Option<crate::protocol::AskForApproval>,
#[serde(default)]
pub shell_environment_policy: crate::config_types::ShellEnvironmentPolicyToml,
/// Sandbox mode to use.
pub sandbox_mode: Option<crate::config_types::SandboxMode>,
/// Sandbox configuration to apply if `sandbox` is `WorkspaceWrite`.
pub sandbox_workspace_write: Option<crate::config_types::SandboxWorkplaceWrite>,
/// Disable server-side response storage.
pub disable_response_storage: Option<bool>,
/// Optional external command to spawn for end-user notifications.
#[serde(default)]
pub notify: Option<Vec<String>>,
/// System instructions.
pub instructions: Option<String>,
/// Definition for MCP servers that Codex can reach out to for tool calls.
#[serde(default)]
pub mcp_servers: HashMap<String, crate::config_types::McpServerConfig>,
/// User-defined provider entries that extend/override the built-in list.
#[serde(default)]
pub model_providers: HashMap<String, crate::model_provider_info::ModelProviderInfo>,
/// Maximum number of bytes to include from an AGENTS.md project doc file.
pub project_doc_max_bytes: Option<usize>,
/// Profile to use from the `profiles` map.
pub profile: Option<String>,
/// Named profiles to facilitate switching between different configurations.
#[serde(default)]
pub profiles: HashMap<String, crate::config_profile::ConfigProfile>,
/// Settings that govern if and what will be written to `~/.codex/history.jsonl`.
#[serde(default)]
pub history: Option<crate::config_types::History>,
/// Optional URI-based file opener.
pub file_opener: Option<crate::config_types::UriBasedFileOpener>,
/// Collection of settings that are specific to the TUI.
pub tui: Option<crate::config_types::Tui>,
/// When set to `true`, `AgentReasoning` events will be hidden from the UI/output.
pub hide_agent_reasoning: Option<bool>,
pub model_reasoning_effort: Option<crate::config_types::ReasoningEffort>,
pub model_reasoning_summary: Option<crate::config_types::ReasoningSummary>,
/// Override to force-enable reasoning summaries for the configured model.
pub model_supports_reasoning_summaries: Option<bool>,
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
pub chatgpt_base_url: Option<String>,
/// Global toggle to enable/disable streaming (delta) output.
#[serde(default)]
pub streaming: Option<bool>,
}

View File

@@ -18,10 +18,11 @@ use codex_core::protocol::PatchApplyEndEvent;
use codex_core::protocol::SessionConfiguredEvent;
use codex_core::protocol::TokenUsage;
use owo_colors::OwoColorize;
use std::io::{self, Write};
use owo_colors::Style;
use shlex::try_join;
use std::collections::HashMap;
use std::io::Write;
use std::io::{self};
use std::time::Instant;
/// This should be configurable. When used in CI, users may not want to impose
@@ -51,10 +52,20 @@ pub(crate) struct EventProcessor {
/// Whether to include `AgentReasoning` events in the output.
show_agent_reasoning: bool,
/// Whether to surface streaming deltas (true = print deltas + suppress final message).
streaming_enabled: bool,
/// Internal: have we already printed the `codex` header for the current streaming turn?
printed_agent_header: bool,
/// Internal: have we already printed the `thinking` header for current streaming turn?
printed_reasoning_header: bool,
}
impl EventProcessor {
pub(crate) fn create_with_ansi(with_ansi: bool, show_agent_reasoning: bool) -> Self {
pub(crate) fn create_with_ansi(
with_ansi: bool,
show_agent_reasoning: bool,
streaming_enabled: bool,
) -> Self {
let call_id_to_command = HashMap::new();
let call_id_to_patch = HashMap::new();
let call_id_to_tool_call = HashMap::new();
@@ -72,6 +83,9 @@ impl EventProcessor {
cyan: Style::new().cyan(),
call_id_to_tool_call,
show_agent_reasoning,
streaming_enabled,
printed_agent_header: false,
printed_reasoning_header: false,
}
} else {
Self {
@@ -86,6 +100,9 @@ impl EventProcessor {
cyan: Style::new(),
call_id_to_tool_call,
show_agent_reasoning,
streaming_enabled,
printed_agent_header: false,
printed_reasoning_header: false,
}
}
}
@@ -180,21 +197,46 @@ impl EventProcessor {
ts_println!(self, "{}", message.style(self.dimmed));
}
EventMsg::TaskStarted | EventMsg::TaskComplete(_) => {
// Reset streaming headers at start/end boundaries.
if matches!(msg, EventMsg::TaskStarted) {
self.printed_agent_header = false;
self.printed_reasoning_header = false;
}
// Ignore.
}
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
ts_println!(self, "tokens used: {total_tokens}");
}
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
ts_println!(
self,
"{}\n{message}",
"codex".style(self.bold).style(self.magenta)
);
if self.streaming_enabled {
// Suppress full message when streaming; final markdown not printed in CLI.
// If no deltas were seen, fall back to printing now.
if !self.printed_agent_header {
ts_println!(
self,
"{}\n{message}",
"codex".style(self.bold).style(self.magenta)
);
}
} else {
ts_println!(
self,
"{}\n{message}",
"codex".style(self.bold).style(self.magenta)
);
}
}
EventMsg::AgentMessageDelta(AgentMessageEvent { message }) => {
print!("{message}");
let _ = io::stdout().flush();
if !self.streaming_enabled {
// streaming disabled, ignore
} else {
if !self.printed_agent_header {
ts_println!(self, "{}", "codex".style(self.bold).style(self.magenta));
self.printed_agent_header = true;
}
print!("{message}");
let _ = io::stdout().flush();
}
}
EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
call_id,
@@ -348,7 +390,7 @@ impl EventProcessor {
);
// Pretty-print the patch summary with colored diff markers so
// its easy to scan in the terminal output.
// it's easy to scan in the terminal output.
for (path, change) in changes.iter() {
match change {
FileChange::Add { content } => {
@@ -446,16 +488,35 @@ impl EventProcessor {
}
EventMsg::AgentReasoning(agent_reasoning_event) => {
if self.show_agent_reasoning {
ts_println!(
self,
"{}\n{}",
"thinking".style(self.italic).style(self.magenta),
agent_reasoning_event.text
);
if self.streaming_enabled {
if !self.printed_reasoning_header {
ts_println!(
self,
"{}\n{}",
"thinking".style(self.italic).style(self.magenta),
agent_reasoning_event.text
);
}
} else {
ts_println!(
self,
"{}\n{}",
"thinking".style(self.italic).style(self.magenta),
agent_reasoning_event.text
);
}
}
}
EventMsg::AgentReasoningDelta(agent_reasoning_event) => {
if self.show_agent_reasoning {
if self.show_agent_reasoning && self.streaming_enabled {
if !self.printed_reasoning_header {
ts_println!(
self,
"{}",
"thinking".style(self.italic).style(self.magenta)
);
self.printed_reasoning_header = true;
}
print!("{}", agent_reasoning_event.text);
let _ = io::stdout().flush();
}

View File

@@ -115,8 +115,11 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
};
let config = Config::load_with_cli_overrides(cli_kv_overrides, overrides)?;
let mut event_processor =
EventProcessor::create_with_ansi(stdout_with_ansi, !config.hide_agent_reasoning);
let mut event_processor = EventProcessor::create_with_ansi(
stdout_with_ansi,
!config.hide_agent_reasoning,
config.streaming_enabled,
);
// Print the effective configuration and prompt so users can see what Codex
// is using.
event_processor.print_config_summary(&config, &prompt);

View File

@@ -40,6 +40,15 @@ use crate::history_cell::PatchEventType;
use crate::user_approval_widget::ApprovalRequest;
use codex_file_search::FileMatch;
/// Bookkeeping for a live streaming cell. We track the `sub_id` to know when
/// a new turn has started (and thus when to start a new cell) and accumulate
/// the full text so we can re-render markdown cleanly when the turn ends.
#[derive(Default)]
struct StreamingBuf {
sub_id: Option<String>,
text: String,
}
pub(crate) struct ChatWidget<'a> {
app_event_tx: AppEventSender,
codex_op_tx: UnboundedSender<Op>,
@@ -49,6 +58,10 @@ pub(crate) struct ChatWidget<'a> {
config: Config,
initial_user_message: Option<UserMessage>,
token_usage: TokenUsage,
/// Accumulates assistant streaming text for the *current* turn.
streaming_agent: StreamingBuf,
/// Accumulates reasoning streaming text for the *current* turn.
streaming_reasoning: StreamingBuf,
}
#[derive(Clone, Copy, Eq, PartialEq)]
@@ -135,6 +148,8 @@ impl ChatWidget<'_> {
initial_images,
),
token_usage: TokenUsage::default(),
streaming_agent: StreamingBuf::default(),
streaming_reasoning: StreamingBuf::default(),
}
}
@@ -220,6 +235,8 @@ impl ChatWidget<'_> {
pub(crate) fn handle_codex_event(&mut self, event: Event) {
let Event { id, msg } = event;
// We need a copy of `id` for streaming bookkeeping because it is moved into some match arms.
let event_id = id.clone();
match msg {
EventMsg::SessionConfigured(event) => {
// Record session information at the top of the conversation.
@@ -240,27 +257,118 @@ impl ChatWidget<'_> {
self.request_redraw();
}
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
self.conversation_history
.add_agent_message(&self.config, message);
if self.config.streaming_enabled {
// Final full assistant message. If we have an in-flight streaming cell for this id, replace it.
let same_turn = self
.streaming_agent
.sub_id
.as_ref()
.map(|s| s == &event_id)
.unwrap_or(false);
if same_turn {
self.conversation_history
.replace_last_agent_message(&self.config, message.clone());
self.streaming_agent.sub_id = None;
self.streaming_agent.text.clear();
} else {
// Streaming enabled but we never saw deltas just render normally.
self.finalize_streams_if_new_turn(&event_id);
self.conversation_history
.add_agent_message(&self.config, message.clone());
}
} else {
// Streaming disabled -> always render final message, ignore any deltas.
self.conversation_history
.add_agent_message(&self.config, message.clone());
}
self.request_redraw();
}
EventMsg::AgentMessageDelta(AgentMessageEvent { message }) => {
self
.conversation_history
.append_agent_message_delta(&self.config, message);
// Streaming Assistant text.
if !self.config.streaming_enabled {
// Ignore when streaming disabled.
return;
}
// Start a new cell if this delta belongs to a new turn.
let is_new_stream = self
.streaming_agent
.sub_id
.as_ref()
.map(|s| s != &event_id)
.unwrap_or(true);
if is_new_stream {
// Finalise any in-flight stream from the prior turn.
self.finalize_streams_if_new_turn(&event_id);
// Start a header-only streaming cell so we don't parse partial markdown.
self.conversation_history
.add_agent_message(&self.config, String::new());
self.streaming_agent.sub_id = Some(event_id.clone());
self.streaming_agent.text.clear();
self.streaming_agent.text.push_str(&message);
// Append the first chunk into the new streaming cell.
self.conversation_history
.append_agent_message_delta(&self.config, message);
} else {
self.streaming_agent.text.push_str(&message);
self.conversation_history
.append_agent_message_delta(&self.config, message);
}
self.request_redraw();
}
EventMsg::AgentReasoning(AgentReasoningEvent { text }) => {
if !self.config.hide_agent_reasoning {
self.conversation_history
.add_agent_reasoning(&self.config, text);
if self.config.streaming_enabled {
// Final full reasoning summary. Replace streaming cell if same turn.
let same_turn = self
.streaming_reasoning
.sub_id
.as_ref()
.map(|s| s == &event_id)
.unwrap_or(false);
if same_turn {
self.conversation_history
.replace_last_agent_reasoning(&self.config, text.clone());
self.streaming_reasoning.sub_id = None;
self.streaming_reasoning.text.clear();
} else {
self.finalize_streams_if_new_turn(&event_id);
self.conversation_history
.add_agent_reasoning(&self.config, text.clone());
}
} else {
self.conversation_history
.add_agent_reasoning(&self.config, text.clone());
}
self.request_redraw();
}
}
EventMsg::AgentReasoningDelta(AgentReasoningEvent { text }) => {
if !self.config.hide_agent_reasoning {
self.conversation_history
.append_agent_reasoning_delta(&self.config, text);
if !self.config.streaming_enabled {
// Ignore when streaming disabled.
return;
}
let is_new_stream = self
.streaming_reasoning
.sub_id
.as_ref()
.map(|s| s != &event_id)
.unwrap_or(true);
if is_new_stream {
self.finalize_streams_if_new_turn(&event_id);
// Start header-only streaming cell.
self.conversation_history
.add_agent_reasoning(&self.config, String::new());
self.streaming_reasoning.sub_id = Some(event_id.clone());
self.streaming_reasoning.text.clear();
self.streaming_reasoning.text.push_str(&text);
self.conversation_history
.append_agent_reasoning_delta(&self.config, text);
} else {
self.streaming_reasoning.text.push_str(&text);
self.conversation_history
.append_agent_reasoning_delta(&self.config, text);
}
self.request_redraw();
}
}
@@ -272,6 +380,8 @@ impl ChatWidget<'_> {
EventMsg::TaskComplete(TaskCompleteEvent {
last_agent_message: _,
}) => {
// Turn has ended ensure no lingering streaming cells remain un-finalised.
self.finalize_streams();
self.bottom_pane.set_task_running(false);
self.request_redraw();
}
@@ -451,6 +561,42 @@ impl ChatWidget<'_> {
tracing::error!("failed to submit op: {e}");
}
}
/// Finalise (render) streaming buffers when we detect a new turn id.
fn finalize_streams_if_new_turn(&mut self, new_id: &str) {
// If the incoming id differs from the current stream id(s) we must flush.
let agent_changed = self
.streaming_agent
.sub_id
.as_ref()
.map(|s| s != new_id)
.unwrap_or(false);
let reasoning_changed = self
.streaming_reasoning
.sub_id
.as_ref()
.map(|s| s != new_id)
.unwrap_or(false);
if agent_changed || reasoning_changed {
self.finalize_streams();
}
}
/// Re-render any in-flight streaming cells with full markdown and clear buffers.
fn finalize_streams(&mut self) {
let had_agent = self.streaming_agent.sub_id.take().is_some();
if had_agent {
let text = std::mem::take(&mut self.streaming_agent.text);
self.conversation_history
.replace_last_agent_message(&self.config, text);
}
let had_reasoning = self.streaming_reasoning.sub_id.take().is_some();
if had_reasoning {
let text = std::mem::take(&mut self.streaming_reasoning.text);
self.conversation_history
.replace_last_agent_reasoning(&self.config, text);
}
}
}
impl WidgetRef for &ChatWidget<'_> {

View File

@@ -3,13 +3,13 @@ use crate::history_cell::CommandOutput;
use crate::history_cell::HistoryCell;
use crate::history_cell::PatchEventType;
use codex_core::config::Config;
use crate::markdown::append_markdown;
use codex_core::protocol::FileChange;
use codex_core::protocol::SessionConfiguredEvent;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use ratatui::prelude::*;
use ratatui::style::Style;
use ratatui::text::Span;
use ratatui::widgets::*;
use serde_json::Value as JsonValue;
use std::cell::Cell as StdCell;
@@ -34,6 +34,11 @@ pub struct ConversationHistoryWidget {
/// The height of the viewport last time render_ref() was called
last_viewport_height: StdCell<usize>,
has_input_focus: bool,
/// Scratch buffer used while incrementally streaming an agent message.
/// We accumulate the full text so we can re-render markdown cleanly when the turn finishes.
streaming_agent_message_buf: String,
/// Scratch buffer used while incrementally streaming agent reasoning.
streaming_agent_reasoning_buf: String,
}
impl ConversationHistoryWidget {
@@ -45,6 +50,8 @@ impl ConversationHistoryWidget {
num_rendered_lines: StdCell::new(0),
last_viewport_height: StdCell::new(0),
has_input_focus: false,
streaming_agent_message_buf: String::new(),
streaming_agent_reasoning_buf: String::new(),
}
}
@@ -196,23 +203,124 @@ impl ConversationHistoryWidget {
}
pub fn add_agent_message(&mut self, config: &Config, message: String) {
// Reset streaming buffer we are starting a new message.
self.streaming_agent_message_buf.clear();
self.streaming_agent_message_buf.push_str(&message);
self.add_to_history(HistoryCell::new_agent_message(config, message));
}
pub fn add_agent_reasoning(&mut self, config: &Config, text: String) {
self.streaming_agent_reasoning_buf.clear();
self.streaming_agent_reasoning_buf.push_str(&text);
self.add_to_history(HistoryCell::new_agent_reasoning(config, text));
}
pub fn append_agent_message_delta(&mut self, config: &Config, text: String) {
/// Append incremental assistant text without *forcing* a newline between chunks.
///
/// The earlier implementation re-ran the markdown renderer on each delta.
/// Because `tui_markdown` always produces whole `Line` values, every chunk
/// showed up on its own row ("stairstepping" output). Instead we take a
/// lightweight approach during streaming: extend the last visible line with
/// plain text and only honour explicit `\n` boundaries from the model. When
/// the turn completes the caller should invoke `replace_last_agent_message()`
/// to re-render the full accumulated markdown so styling (code blocks, bold,
/// links, etc.) is correct.
pub fn append_agent_message_delta(&mut self, _config: &Config, text: String) {
if text.is_empty() {
return;
}
// Accumulate in scratch buffer so we can re-render later.
self.streaming_agent_message_buf.push_str(&text);
// If the newly received chunk contains a newline we re-render the entire
// accumulated buffer using the markdown renderer so formatting (bold,
// code blocks, links) becomes visible incrementally. This is cheaper and
// less visually noisy than re-rendering on every token while still
// giving the user feedback at natural boundaries.
if text.contains('\n') {
// Rebuild the most recent AgentMessage entry from scratch.
let mut found_idx: Option<usize> = None;
for i in (0..self.entries.len()).rev() {
if matches!(self.entries[i].cell, HistoryCell::AgentMessage { .. }) {
found_idx = Some(i);
break;
}
}
if let Some(idx) = found_idx {
let width = self.cached_width.get();
// Rebuild cell then borrow entry once.
let rebuilt = HistoryCell::new_agent_message(
_config,
self.streaming_agent_message_buf.clone(),
);
let entry = &mut self.entries[idx];
entry.cell = rebuilt;
// Drop the trailing blank added by new_agent_message so we can continue streaming.
if let HistoryCell::AgentMessage { view } = &mut entry.cell {
if let Some(last) = view.lines.last() {
if last.spans.len() == 1 && last.spans[0].content.is_empty() {
view.lines.pop();
}
}
if width > 0 {
entry.line_count.set(view.height(width));
}
}
return;
}
}
if let Some(entry) = self.entries.last_mut() {
if let HistoryCell::AgentMessage { view } = &mut entry.cell {
if let Some(last) = view.lines.last() {
if last.spans.len() == 1 && last.spans[0].content.is_empty() {
view.lines.pop();
// Ensure there is *at least* one line available for content after the header.
// new_agent_message() with an empty string produces a header line and one blank line.
// We keep that blank as our first append target.
if view.lines.len() < 2 {
view.lines.push(Line::from(""));
}
// Trim *at most one* trailing separator line that we added for spacing.
// Preserve user-intended blank lines (paragraph breaks) so we do not collapse newlines.
if view.lines.len() > 1 {
if let Some(last) = view.lines.last() {
if last.spans.len() == 1 && last.spans[0].content.is_empty() {
view.lines.pop();
}
}
}
append_markdown(&text, &mut view.lines, config);
view.lines.push(Line::from(""));
// Append respecting embedded newlines from the chunk.
let mut first_part = true;
for part in text.split_inclusive('\n') {
let has_newline = part.ends_with('\n');
let content = part.trim_end_matches('\n');
if first_part {
if let Some(last_line) = view.lines.last_mut() {
last_line.spans.push(Span::raw(content.to_string()));
} else {
view.lines.push(Line::from(content.to_string()));
}
first_part = false;
} else {
// Option 1: If this is a new line, and content starts with a space, trim it.
let trimmed_content = if content.starts_with(' ')
&& matches!(view.lines.last(), Some(l) if l.spans.is_empty())
{
content.trim_start()
} else {
content
};
view.lines.push(Line::from(trimmed_content.to_string()));
}
if has_newline {
// honour explicit newline: start a fresh empty line (content target)
view.lines.push(Line::from(""));
}
}
// DO NOT push the cell separator yet; we'll add it on finalisation.
let width = self.cached_width.get();
if width > 0 {
entry.line_count.set(view.height(width));
@@ -220,20 +328,89 @@ impl ConversationHistoryWidget {
return;
}
}
// Fallback: create new entry
self.add_agent_message(config, text);
// Fallback: no existing AgentMessage start a new one.
// Start a streaming cell with an *empty* body so we don't parse partial markdown.
self.add_agent_message(_config, String::new());
self.append_agent_message_delta(_config, text);
}
pub fn append_agent_reasoning_delta(&mut self, config: &Config, text: String) {
if let Some(entry) = self.entries.last_mut() {
if let HistoryCell::AgentReasoning { view } = &mut entry.cell {
if let Some(last) = view.lines.last() {
if last.spans.len() == 1 && last.spans[0].content.is_empty() {
view.lines.pop();
/// Append incremental reasoning text (same semantics as `append_agent_message_delta`).
pub fn append_agent_reasoning_delta(&mut self, _config: &Config, text: String) {
if text.is_empty() {
return;
}
self.streaming_agent_reasoning_buf.push_str(&text);
// Re-render incrementally at newline boundaries.
if text.contains('\n') {
let mut found_idx: Option<usize> = None;
for i in (0..self.entries.len()).rev() {
if matches!(self.entries[i].cell, HistoryCell::AgentReasoning { .. }) {
found_idx = Some(i);
break;
}
}
if let Some(idx) = found_idx {
let width = self.cached_width.get();
let rebuilt = HistoryCell::new_agent_reasoning(
_config,
self.streaming_agent_reasoning_buf.clone(),
);
let entry = &mut self.entries[idx];
entry.cell = rebuilt;
if let HistoryCell::AgentReasoning { view } = &mut entry.cell {
if let Some(last) = view.lines.last() {
if last.spans.len() == 1 && last.spans[0].content.is_empty() {
view.lines.pop();
}
}
if width > 0 {
entry.line_count.set(view.height(width));
}
}
append_markdown(&text, &mut view.lines, config);
view.lines.push(Line::from(""));
return;
}
}
if let Some(entry) = self.entries.last_mut() {
if let HistoryCell::AgentReasoning { view } = &mut entry.cell {
if view.lines.len() < 2 {
view.lines.push(Line::from(""));
}
if view.lines.len() > 1 {
if let Some(last) = view.lines.last() {
if last.spans.len() == 1 && last.spans[0].content.is_empty() {
view.lines.pop();
}
}
}
let mut first_part = true;
for part in text.split_inclusive('\n') {
let has_newline = part.ends_with('\n');
let content = part.trim_end_matches('\n');
if first_part {
if let Some(last_line) = view.lines.last_mut() {
last_line.spans.push(Span::raw(content.to_string()));
} else {
view.lines.push(Line::from(content.to_string()));
}
first_part = false;
} else {
let trimmed_content = if content.starts_with(' ')
&& matches!(view.lines.last(), Some(l) if l.spans.is_empty())
{
content.trim_start()
} else {
content
};
view.lines.push(Line::from(trimmed_content.to_string()));
}
if has_newline {
view.lines.push(Line::from(""));
}
}
// no separator until finalisation
let width = self.cached_width.get();
if width > 0 {
entry.line_count.set(view.height(width));
@@ -241,7 +418,49 @@ impl ConversationHistoryWidget {
return;
}
}
self.add_agent_reasoning(config, text);
self.add_agent_reasoning(_config, String::new());
self.append_agent_reasoning_delta(_config, text);
}
/// Replace the most recent AgentMessage cell with the fully accumulated `text`.
/// This should be called once the turn is complete so we can render proper markdown.
pub fn replace_last_agent_message(&mut self, config: &Config, text: String) {
self.streaming_agent_message_buf.clear();
// Find the most recent AgentMessage entry (search from end).
if let Some(idx) = self
.entries
.iter()
.rposition(|e| matches!(e.cell, HistoryCell::AgentMessage { .. }))
{
let width = self.cached_width.get();
let entry = &mut self.entries[idx];
entry.cell = HistoryCell::new_agent_message(config, text);
if width > 0 {
entry.line_count.set(entry.cell.height(width));
}
} else {
// No existing AgentMessage (shouldn't happen) append new.
self.add_agent_message(config, text);
}
}
/// Replace the most recent AgentReasoning cell with the fully accumulated `text`.
pub fn replace_last_agent_reasoning(&mut self, config: &Config, text: String) {
self.streaming_agent_reasoning_buf.clear();
if let Some(idx) = self
.entries
.iter()
.rposition(|e| matches!(e.cell, HistoryCell::AgentReasoning { .. }))
{
let width = self.cached_width.get();
let entry = &mut self.entries[idx];
entry.cell = HistoryCell::new_agent_reasoning(config, text);
if width > 0 {
entry.line_count.set(entry.cell.height(width));
}
} else {
self.add_agent_reasoning(config, text);
}
}
pub fn add_background_event(&mut self, message: String) {
@@ -496,7 +715,7 @@ impl WidgetRef for ConversationHistoryWidget {
{
// Choose a thumb color that stands out only when this pane has focus so that the
// users attention is naturally drawn to the active viewport. When unfocused we show
// user's attention is naturally drawn to the active viewport. When unfocused we show
// a low-contrast thumb so the scrollbar fades into the background without becoming
// invisible.
let thumb_style = if self.has_input_focus {