mirror of
https://github.com/openai/codex.git
synced 2026-04-26 15:45:02 +00:00
feat: support multiple rate limits (#11260)
Added multi-limit support end-to-end by carrying limit_name in rate-limit snapshots and handling multiple buckets instead of only codex. Extended /usage client parsing to consume additional_rate_limits Updated TUI /status and in-memory state to store/render per-limit snapshots Extended app-server rate-limit read response: kept rate_limits and added rate_limits_by_name. Adjusted usage-limit error messaging for non-default codex limit buckets
This commit is contained in:
@@ -1509,6 +1509,8 @@ async fn token_count_includes_rate_limits_snapshot() {
|
||||
json!({
|
||||
"info": null,
|
||||
"rate_limits": {
|
||||
"limit_id": "codex",
|
||||
"limit_name": null,
|
||||
"primary": {
|
||||
"used_percent": 12.5,
|
||||
"window_minutes": 10,
|
||||
@@ -1558,6 +1560,8 @@ async fn token_count_includes_rate_limits_snapshot() {
|
||||
"model_context_window": 258400
|
||||
},
|
||||
"rate_limits": {
|
||||
"limit_id": "codex",
|
||||
"limit_name": null,
|
||||
"primary": {
|
||||
"used_percent": 12.5,
|
||||
"window_minutes": 10,
|
||||
@@ -1630,6 +1634,8 @@ async fn usage_limit_error_emits_rate_limit_event() -> anyhow::Result<()> {
|
||||
let codex = codex_fixture.codex.clone();
|
||||
|
||||
let expected_limits = json!({
|
||||
"limit_id": "codex",
|
||||
"limit_name": null,
|
||||
"primary": {
|
||||
"used_percent": 100.0,
|
||||
"window_minutes": 15,
|
||||
|
||||
@@ -486,6 +486,8 @@ async fn responses_websocket_usage_limit_error_emits_rate_limit_event() {
|
||||
json!({
|
||||
"info": null,
|
||||
"rate_limits": {
|
||||
"limit_id": "codex",
|
||||
"limit_name": null,
|
||||
"primary": {
|
||||
"used_percent": 100.0,
|
||||
"window_minutes": 15,
|
||||
|
||||
Reference in New Issue
Block a user