feat: add context percent to status line (#17637)

Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
jif-oai
2026-04-14 14:27:24 +01:00
committed by GitHub
parent 34a9ca083e
commit e6947f85f6
5 changed files with 89 additions and 9 deletions

View File

@@ -8,14 +8,14 @@ expression: "render_lines(&view, 72)"
Type to search
>
[x] model-name Current model name
[x] current-dir Current working directory
[x] git-branch Current Git branch (omitted when unavaila
[ ] model-with-reasoning Current model name with reasoning level
[ ] project-root Project root directory (omitted when unav
[ ] context-remaining Percentage of context window remaining (o
[ ] context-used Percentage of context window used (omitte
[ ] five-hour-limit Remaining usage on 5-hour usage limit (om
[x] model-name Current model name
[x] current-dir Current working directory
[x] git-branch Current Git branch (omitted when unavail…
[ ] model-with-reasoning Current model name with reasoning level
[ ] project-root Project root directory (omitted when una…
[ ] context-remaining Percentage of context window remaining (…
[ ] context-remaining-... Percentage of context window remaining (
[ ] context-used Percentage of context window used (omitt
gpt-5-codex · ~/codex-rs · jif/statusline-preview
Use ↑↓ to navigate, ←→ to move, space to select, enter to confirm, esc

View File

@@ -66,6 +66,10 @@ pub(crate) enum StatusLineItem {
/// Percentage of context window remaining.
ContextRemaining,
/// Percentage of context window remaining.
#[strum(to_string = "context-remaining-percent")]
ContextRemainingPercent,
/// Percentage of context window used.
///
/// Also accepts the legacy `context-usage` config value.
@@ -115,6 +119,9 @@ impl StatusLineItem {
StatusLineItem::ContextRemaining => {
"Percentage of context window remaining (omitted when unknown)"
}
StatusLineItem::ContextRemainingPercent => {
"Percentage of context window remaining (omitted when unknown)"
}
StatusLineItem::ContextUsed => {
"Percentage of context window used (omitted when unknown)"
}
@@ -325,6 +332,18 @@ mod tests {
);
}
#[test]
fn context_remaining_percent_is_separate_selectable_id() {
assert_eq!(
StatusLineItem::ContextRemainingPercent.to_string(),
"context-remaining-percent"
);
assert_eq!(
"context-remaining-percent".parse::<StatusLineItem>(),
Ok(StatusLineItem::ContextRemainingPercent)
);
}
#[test]
fn preview_uses_runtime_values() {
let preview_data = StatusLinePreviewData::from_iter([

View File

@@ -0,0 +1,9 @@
---
source: tui/src/chatwidget/tests/status_and_layout.rs
expression: normalized_backend_snapshot(terminal.backend())
---
" "
" "
" Ask Codex to do anything "
" "
" gpt-5.4 xhigh fast · Context 100% left · /tmp/project "

View File

@@ -450,7 +450,7 @@ impl ChatWidget {
Some(format!("{} used", format_tokens_compact(total)))
}
}
StatusLineItem::ContextRemaining => self
StatusLineItem::ContextRemaining | StatusLineItem::ContextRemainingPercent => self
.status_line_context_remaining_percent()
.map(|remaining| format!("Context {remaining}% left")),
StatusLineItem::ContextUsed => self

View File

@@ -914,6 +914,24 @@ async fn status_line_legacy_context_usage_renders_context_used_percent() {
);
}
#[tokio::test]
async fn status_line_context_remaining_percent_renders_labeled_percent() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(/*model_override*/ None).await;
chat.thread_id = Some(ThreadId::new());
chat.config.tui_status_line = Some(vec!["context-remaining-percent".to_string()]);
chat.refresh_status_line();
assert_eq!(
status_line_text(&chat),
Some("Context 100% left".to_string())
);
assert!(
drain_insert_history(&mut rx).is_empty(),
"context-remaining-percent should remain a valid status line item"
);
}
#[tokio::test]
async fn status_line_branch_state_resets_when_git_branch_disabled() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(/*model_override*/ None).await;
@@ -1181,6 +1199,40 @@ async fn status_line_model_with_reasoning_fast_footer_snapshot() {
);
}
#[tokio::test]
async fn status_line_model_with_reasoning_context_remaining_percent_footer_snapshot() {
use ratatui::Terminal;
use ratatui::backend::TestBackend;
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.4")).await;
set_fast_mode_test_catalog(&mut chat);
assert!(get_available_model(&chat, "gpt-5.4").supports_fast_mode());
chat.show_welcome_banner = false;
chat.config.cwd = test_project_path().abs();
chat.config.tui_status_line = Some(vec![
"model-with-reasoning".to_string(),
"context-remaining-percent".to_string(),
"current-dir".to_string(),
]);
chat.set_reasoning_effort(Some(ReasoningEffortConfig::XHigh));
chat.set_service_tier(Some(ServiceTier::Fast));
set_chatgpt_auth(&mut chat);
set_fast_mode_test_catalog(&mut chat);
assert!(get_available_model(&chat, "gpt-5.4").supports_fast_mode());
chat.refresh_status_line();
let width = 80;
let height = chat.desired_height(width);
let mut terminal = Terminal::new(TestBackend::new(width, height)).expect("create terminal");
terminal
.draw(|f| chat.render(f.area(), f.buffer_mut()))
.expect("draw model-with-reasoning footer");
assert_chatwidget_snapshot!(
"status_line_model_with_reasoning_context_remaining_percent_footer",
normalized_backend_snapshot(terminal.backend())
);
}
#[tokio::test]
async fn runtime_metrics_websocket_timing_logs_and_final_separator_sums_totals() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(/*model_override*/ None).await;