Merge task 31-display-context-remaining-percentage into integration branch (ours)

This commit is contained in:
Rai (Michael Pokorny)
2025-06-24 20:53:02 -07:00
7 changed files with 182 additions and 9 deletions

View File

@@ -23,10 +23,14 @@ Enhance the codex-rs TUI by adding a status indicator that displays the percenta
## Implementation
**How it was implemented**
- Extend the session state in `tui/src/app.rs` or relevant module to track token usage and context limit.
- After each send/receive event, recalculate `remaining = (limit - used) * 100 / limit`.
- Render the indicator via the status bar widget (`tui/src/status_indicator_widget.rs`), appending `"{remaining}% context left"`.
- Add tests in `tui/tests/` that simulate message additions and assert the status rendering shows correct percentages at key usage points.
- Added a `history_items: Vec<ResponseItem>` field to `ChatWidget` to accumulate the raw sequence of messages and function calls.
- Created a new module `tui/src/context.rs` mirroring the JS heuristics:
- `approximate_tokens_used(&[ResponseItem])`: counts characters in text and function-call items, divides by 4 and rounds up.
- `max_tokens_for_model(&str)`: uses a registry of known model limits and heuristic fallbacks (32k, 16k, 8k, 4k, default 128k).
- `calculate_context_percent_remaining(&[ResponseItem], &str)`: computes `(remaining / max) * 100`.
- Updated `ChatWidget::replay_items` and `ChatWidget::handle_codex_event` to push each incoming `ResponseItem` into `history_items`.
- Modified `ChatComposer::render_ref` to query `calculate_context_percent_remaining`, format and display "<N>% context left" after the input area, coloring it green/yellow/red per thresholds (>40%, 2540%, ≤25%).
- Added unit tests in `tui/tests/context_percent.rs` covering token counting, model heuristics, percent rounding, and boundary conditions.
## Notes

View File

@@ -2,8 +2,7 @@ use crossterm::event::KeyEvent;
use ratatui::buffer::Buffer;
use ratatui::layout::Alignment;
use ratatui::layout::Rect;
use ratatui::style::Style;
use ratatui::style::Stylize;
use ratatui::style::{Color, Style, Stylize};
use ratatui::text::Line;
use ratatui::widgets::BorderType;
use ratatui::widgets::Borders;
@@ -38,6 +37,8 @@ pub(crate) struct ChatComposer<'a> {
history: ChatComposerHistory,
/// Maximum number of visible lines in the chat input composer.
max_rows: usize,
/// Last computed context-left percentage
context_left_percent: f64,
}
impl ChatComposer<'_> {
@@ -52,6 +53,7 @@ impl ChatComposer<'_> {
app_event_tx,
history: ChatComposerHistory::new(),
max_rows,
context_left_percent: 100.0,
};
this.update_border(has_input_focus);
this
@@ -80,6 +82,11 @@ impl ChatComposer<'_> {
self.update_border(has_focus);
}
/// Update the context-left percentage for display.
pub fn set_context_left(&mut self, pct: f64) {
self.context_left_percent = pct;
}
/// Handle a key event coming from the main UI.
pub fn handle_key_event(&mut self, key_event: KeyEvent) -> (InputResult, bool) {
let result = match self.command_popup {
@@ -313,7 +320,9 @@ impl ChatComposer<'_> {
0
};
rows as u16 + BORDER_LINES + num_popup_rows
// Include an extra row for the context-left indicator when not in popup mode
let context_row = if self.command_popup.is_none() { 1 } else { 0 };
rows as u16 + BORDER_LINES + num_popup_rows + context_row
}
fn update_border(&mut self, has_focus: bool) {
@@ -375,5 +384,18 @@ impl WidgetRef for &ChatComposer<'_> {
} else {
self.textarea.render(area, buf);
}
// Render context-left indicator when not displaying a popup
if self.command_popup.is_none() {
let pct = self.context_left_percent.round();
let text = format!("{:.0}% context left", pct);
let color = if pct > 40.0 {
Color::Green
} else if pct > 25.0 {
Color::Yellow
} else {
Color::Red
};
buf.set_string(area.x + 1, area.y + area.height - 1, text, Style::default().fg(color));
}
}
}

View File

@@ -147,6 +147,11 @@ impl BottomPane<'_> {
}
}
/// Update the context-left percentage displayed in the composer.
pub fn set_context_percent(&mut self, pct: f64) {
self.composer.set_context_left(pct);
}
/// Launch interactive mount-add dialog (host, container, [mode]).
pub fn push_mount_add_interactive(&mut self) {
let view = MountAddView::new(self.app_event_tx.clone());

View File

@@ -18,6 +18,9 @@ use codex_core::protocol::McpToolCallEndEvent;
use codex_core::protocol::Op;
use codex_core::protocol::PatchApplyBeginEvent;
use codex_core::protocol::TaskCompleteEvent;
use codex_core::ReasoningItemReasoningSummary;
use codex_core::ResponseItem;
use codex_core::ContentItem;
use crossterm::event::KeyEvent;
use ratatui::buffer::Buffer;
use ratatui::layout::Constraint;
@@ -34,10 +37,10 @@ use crate::app_event_sender::AppEventSender;
use crate::bottom_pane::BottomPane;
use crate::bottom_pane::BottomPaneParams;
use crate::bottom_pane::InputResult;
use crate::context::calculate_context_percent_remaining;
use crate::conversation_history_widget::ConversationHistoryWidget;
use crate::history_cell::PatchEventType;
use crate::user_approval_widget::ApprovalRequest;
use codex_core::{ContentItem, ReasoningItemReasoningSummary, ResponseItem};
pub(crate) struct ChatWidget<'a> {
app_event_tx: AppEventSender,
@@ -47,6 +50,8 @@ pub(crate) struct ChatWidget<'a> {
input_focus: InputFocus,
config: Config,
initial_user_message: Option<UserMessage>,
/// raw ResponseItem stream for context-left calculation
history_items: Vec<ResponseItem>,
}
#[derive(Clone, Copy, Eq, PartialEq)]
@@ -133,6 +138,7 @@ impl ChatWidget<'_> {
initial_prompt.unwrap_or_default(),
initial_images,
),
history_items: Vec::new(),
}
}
@@ -211,6 +217,8 @@ impl ChatWidget<'_> {
/// Replay a previous session transcript into the conversation history.
pub fn replay_items(&mut self, items: Vec<ResponseItem>) {
// record raw items for context-left calculation
self.history_items.extend(items.iter().cloned());
for item in items {
match item {
ResponseItem::Message { role, content } => {
@@ -246,6 +254,9 @@ impl ChatWidget<'_> {
}
}
self.conversation_history.scroll_to_bottom();
// update context-left after replay
let pct = calculate_context_percent_remaining(&self.history_items, &self.config.model);
self.bottom_pane.set_context_percent(pct);
}
pub(crate) fn handle_codex_event(&mut self, event: Event) {
@@ -271,7 +282,12 @@ impl ChatWidget<'_> {
}
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
self.conversation_history
.add_agent_message(&self.config, message);
.add_agent_message(&self.config, message.clone());
// record raw item for context-left calculation
self.history_items.push(ResponseItem::Message {
role: "assistant".to_string(),
content: vec![ContentItem::OutputText { text: message.clone() }],
});
self.request_redraw();
}
EventMsg::AgentReasoning(AgentReasoningEvent { text }) => {
@@ -289,11 +305,17 @@ impl ChatWidget<'_> {
last_agent_message: _,
}) => {
self.bottom_pane.set_task_running(false);
// update context-left after turn completes
let pct = calculate_context_percent_remaining(&self.history_items, &self.config.model);
self.bottom_pane.set_context_percent(pct);
self.request_redraw();
}
EventMsg::Error(ErrorEvent { message }) => {
self.conversation_history.add_error(message);
self.bottom_pane.set_task_running(false);
// update context-left after error
let pct = calculate_context_percent_remaining(&self.history_items, &self.config.model);
self.bottom_pane.set_context_percent(pct);
}
EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent {
command,

View File

@@ -0,0 +1,65 @@
//! Utilities for computing approximate token usage and remaining context percentage
//! in the TUI, mirroring the JS heuristics in `calculateContextPercentRemaining`.
use codex_core::ResponseItem;
use codex_core::ContentItem;
/// Roughly estimate number of model tokens represented by the given response items.
/// Counts characters in text and function-call items, divides by 4 and rounds up.
pub fn approximate_tokens_used(items: &[ResponseItem]) -> usize {
let mut char_count = 0;
for item in items {
match item {
ResponseItem::Message { role, content } if role.eq_ignore_ascii_case("user")
|| role.eq_ignore_ascii_case("assistant") =>
{
for ci in content {
match ci {
ContentItem::InputText { text }
| ContentItem::OutputText { text } => char_count += text.len(),
_ => {}
}
}
}
ResponseItem::FunctionCall { name, arguments, .. } => {
char_count += name.len();
char_count += arguments.len();
}
ResponseItem::FunctionCallOutput { output, .. } => {
char_count += output.content.len();
}
_ => {}
}
}
(char_count + 3) / 4
}
/// Return the model's max context size in tokens, using known limits or heuristics.
pub fn max_tokens_for_model(model: &str) -> usize {
// Known OpenAI model limits
match model {
// 4k context models
m if m.eq_ignore_ascii_case("gpt-3.5-turbo") => 4096,
m if m.eq_ignore_ascii_case("gpt-4o") => 8192,
// 8k context
m if m.to_lowercase().contains("8k") => 8192,
// 16k context
m if m.to_lowercase().contains("16k") => 16384,
// 32k context
m if m.to_lowercase().contains("32k") => 32768,
// Fallback default
_ => 131072,
}
}
/// Compute the percentage of tokens remaining in context for a given model.
/// Returns a floating-point percent (0.0100.0).
pub fn calculate_context_percent_remaining(
items: &[ResponseItem],
model: &str,
) -> f64 {
let used = approximate_tokens_used(items);
let max = max_tokens_for_model(model);
let remaining = max.saturating_sub(used);
(remaining as f64) / (max as f64) * 100.0
}

View File

@@ -42,6 +42,7 @@ mod mouse_capture;
mod scroll_event_helper;
mod slash_command;
mod status_indicator_widget;
mod context;
mod text_block;
mod text_formatting;
mod tui;

View File

@@ -0,0 +1,54 @@
use codex_core::{ContentItem, ResponseItem};
use codex_tui::context::{approximate_tokens_used, calculate_context_percent_remaining, max_tokens_for_model};
#[test]
fn test_approximate_tokens_used_texts() {
// 4 chars -> 1 token
let items = vec![ResponseItem::Message {
role: "user".into(),
content: vec![ContentItem::InputText { text: "abcd".into() }],
}];
assert_eq!(approximate_tokens_used(&items), 1);
// 7 chars -> 2 tokens
let items = vec![ResponseItem::Message {
role: "assistant".into(),
content: vec![ContentItem::OutputText { text: "example".into() }],
}];
assert_eq!(approximate_tokens_used(&items), 2);
}
#[test]
fn test_approximate_tokens_used_function_calls() {
// name.len=2 + args.len=7 -> 9 chars -> ceil(9/4)=3
let items = vec![ResponseItem::FunctionCall {
name: "fn".into(),
arguments: "{\"a\":1}".into(),
call_id: "id".into(),
}];
assert_eq!(approximate_tokens_used(&items), 3);
}
#[test]
fn test_max_tokens_for_model_heuristics() {
assert_eq!(max_tokens_for_model("model-32k"), 32768);
assert_eq!(max_tokens_for_model("MY-16K-model"), 16384);
assert_eq!(max_tokens_for_model("foo-8k-bar"), 8192);
assert_eq!(max_tokens_for_model("unknown-model"), 131072);
}
#[test]
fn test_calculate_context_percent_remaining() {
// if used=0, remaining=max -> 100%
let items: Vec<ResponseItem> = vec![];
let pct = calculate_context_percent_remaining(&items, "foo");
assert!((pct - 100.0).abs() < 1e-6);
// used=1 of max=4k -> ~0.0249%
let items = vec![ResponseItem::Message {
role: "user".into(),
content: vec![ContentItem::InputText { text: "a".into() }],
}];
let pct = calculate_context_percent_remaining(&items, "4k-model");
assert!(pct < 100.0 && pct > 99.0);
}