feat: support multiple rate limits (#11260)

Added multi-limit support end-to-end by carrying limit_name in
rate-limit snapshots and handling multiple buckets instead of only
codex.
Extended /usage client parsing to consume additional_rate_limits
Updated TUI /status and in-memory state to store/render per-limit
snapshots
Extended app-server rate-limit read response: kept rate_limits and added
rate_limits_by_name.
Adjusted usage-limit error messaging for non-default codex limit buckets
This commit is contained in:
xl-openai
2026-02-10 20:09:31 -08:00
committed by GitHub
parent 641d5268fa
commit fdd0cd1de9
36 changed files with 1435 additions and 169 deletions

View File

@@ -1509,6 +1509,8 @@ async fn token_count_includes_rate_limits_snapshot() {
json!({
"info": null,
"rate_limits": {
"limit_id": "codex",
"limit_name": null,
"primary": {
"used_percent": 12.5,
"window_minutes": 10,
@@ -1558,6 +1560,8 @@ async fn token_count_includes_rate_limits_snapshot() {
"model_context_window": 258400
},
"rate_limits": {
"limit_id": "codex",
"limit_name": null,
"primary": {
"used_percent": 12.5,
"window_minutes": 10,
@@ -1630,6 +1634,8 @@ async fn usage_limit_error_emits_rate_limit_event() -> anyhow::Result<()> {
let codex = codex_fixture.codex.clone();
let expected_limits = json!({
"limit_id": "codex",
"limit_name": null,
"primary": {
"used_percent": 100.0,
"window_minutes": 15,

View File

@@ -486,6 +486,8 @@ async fn responses_websocket_usage_limit_error_emits_rate_limit_event() {
json!({
"info": null,
"rate_limits": {
"limit_id": "codex",
"limit_name": null,
"primary": {
"used_percent": 100.0,
"window_minutes": 15,