Get model on session configured (#9191)

- Don't try to precompute model unless you know it from `config`
- Block `/model` on session configured
- Queue messages until session configured
- show "loading" in status until session configured
This commit is contained in:
Ahmed Ibrahim
2026-01-14 10:20:41 -08:00
committed by GitHub
parent 02f67bace8
commit 8e937fbba9
8 changed files with 306 additions and 67 deletions

View File

@@ -355,7 +355,7 @@ async fn helpers_are_available_and_do_not_panic() {
models_manager: thread_manager.get_models_manager(),
feedback: codex_feedback::CodexFeedback::new(),
is_first_run: true,
model: resolved_model,
model: Some(resolved_model),
};
let mut w = ChatWidget::new(init, thread_manager);
// Basic construction sanity.
@@ -400,7 +400,7 @@ async fn make_chatwidget_manual(
active_cell: None,
active_cell_revision: 0,
config: cfg,
model: resolved_model.clone(),
model: Some(resolved_model.clone()),
auth_manager: auth_manager.clone(),
models_manager: Arc::new(ModelsManager::new(codex_home, auth_manager)),
session_header: SessionHeader::new(resolved_model),
@@ -1057,6 +1057,7 @@ async fn alt_up_edits_most_recent_queued_message() {
#[tokio::test]
async fn enqueueing_history_prompt_multiple_times_is_stable() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await;
chat.thread_id = Some(ThreadId::new());
// Submit an initial prompt to seed history.
chat.bottom_pane.set_composer_text("repeat me".to_string());
@@ -1083,6 +1084,7 @@ async fn enqueueing_history_prompt_multiple_times_is_stable() {
#[tokio::test]
async fn streaming_final_answer_keeps_task_running_state() {
let (mut chat, _rx, mut op_rx) = make_chatwidget_manual(None).await;
chat.thread_id = Some(ThreadId::new());
chat.on_task_started();
chat.on_agent_message_delta("Final answer line\n".to_string());
@@ -2060,6 +2062,7 @@ async fn experimental_features_toggle_saves_on_exit() {
#[tokio::test]
async fn model_selection_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5-codex")).await;
chat.thread_id = Some(ThreadId::new());
chat.open_model_popup();
let popup = render_bottom_popup(&chat, 80);
@@ -2069,6 +2072,7 @@ async fn model_selection_popup_snapshot() {
#[tokio::test]
async fn model_picker_hides_show_in_picker_false_models_from_cache() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("test-visible-model")).await;
chat.thread_id = Some(ThreadId::new());
let preset = |slug: &str, show_in_picker: bool| ModelPreset {
id: slug.to_string(),
model: slug.to_string(),
@@ -2342,6 +2346,7 @@ async fn feedback_upload_consent_popup_snapshot() {
#[tokio::test]
async fn reasoning_popup_escape_returns_to_model_popup() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.thread_id = Some(ThreadId::new());
chat.open_model_popup();
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
@@ -3888,6 +3893,7 @@ printf 'fenced within fenced\n'
#[tokio::test]
async fn chatwidget_tall() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await;
chat.thread_id = Some(ThreadId::new());
chat.handle_codex_event(Event {
id: "t1".into(),
msg: EventMsg::TurnStarted(TurnStartedEvent {
@@ -3913,6 +3919,7 @@ async fn chatwidget_tall() {
#[tokio::test]
async fn review_queues_user_messages_snapshot() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None).await;
chat.thread_id = Some(ThreadId::new());
chat.handle_codex_event(Event {
id: "review-1".into(),