change model cap to server overload (#11388)

# External (non-OpenAI) Pull Request Requirements

Before opening this Pull Request, please read the dedicated
"Contributing" markdown file or your PR may be closed:
https://github.com/openai/codex/blob/main/docs/contributing.md

If your PR conforms to our contribution guidelines, replace this text
with a detailed and high quality description of your changes.

Include a link to a bug report or enhancement request.
This commit is contained in:
willwang-openai
2026-02-11 17:16:27 -08:00
committed by GitHub
parent d3b078c282
commit 3f1b41689a
30 changed files with 190 additions and 1048 deletions

View File

@@ -4096,7 +4096,7 @@ async fn model_picker_hides_show_in_picker_false_models_from_cache() {
}
#[tokio::test]
async fn model_cap_error_does_not_switch_models() {
async fn server_overloaded_error_does_not_switch_models() {
let (mut chat, mut rx, mut op_rx) = make_chatwidget_manual(Some("boomslang")).await;
chat.set_model("boomslang");
while rx.try_recv().is_ok() {}
@@ -4105,11 +4105,8 @@ async fn model_cap_error_does_not_switch_models() {
chat.handle_codex_event(Event {
id: "err-1".to_string(),
msg: EventMsg::Error(ErrorEvent {
message: "model cap".to_string(),
codex_error_info: Some(CodexErrorInfo::ModelCap {
model: "boomslang".to_string(),
reset_after_seconds: Some(120),
}),
message: "server overloaded".to_string(),
codex_error_info: Some(CodexErrorInfo::ServerOverloaded),
}),
});
@@ -4117,7 +4114,7 @@ async fn model_cap_error_does_not_switch_models() {
if let AppEvent::UpdateModel(model) = event {
assert_eq!(
model, "boomslang",
"did not expect model switch on model-cap error"
"did not expect model switch on server-overloaded error"
);
}
}
@@ -4126,7 +4123,7 @@ async fn model_cap_error_does_not_switch_models() {
if let Op::OverrideTurnContext { model, .. } = event {
assert!(
model.is_none(),
"did not expect OverrideTurnContext model update on model-cap error"
"did not expect OverrideTurnContext model update on server-overloaded error"
);
}
}