chore: migrate from Config::load_from_base_config_with_overrides to ConfigBuilder (#8276)

https://github.com/openai/codex/pull/8235 introduced `ConfigBuilder` and
this PR updates all call non-test call sites to use it instead of
`Config::load_from_base_config_with_overrides()`.

This is important because `load_from_base_config_with_overrides()` uses
an empty `ConfigRequirements`, which is a reasonable default for testing
so the tests are not influenced by the settings on the host. This method
is now guarded by `#[cfg(test)]` so it cannot be used by business logic.

Because `ConfigBuilder::build()` is `async`, many of the test methods
had to be migrated to be `async`, as well. On the bright side, this made
it possible to eliminate a bunch of `block_on_future()` stuff.
This commit is contained in:
Michael Bolin
2025-12-18 16:12:52 -08:00
committed by GitHub
parent 2d9826098e
commit 3d4ced3ff5
42 changed files with 1081 additions and 1176 deletions

View File

@@ -2134,8 +2134,8 @@ mod tests {
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
fn make_test_app() -> App {
let (chat_widget, app_event_tx, _rx, _op_rx) = make_chatwidget_manual_with_sender();
async fn make_test_app() -> App {
let (chat_widget, app_event_tx, _rx, _op_rx) = make_chatwidget_manual_with_sender().await;
let config = chat_widget.config_ref().clone();
let current_model = chat_widget.get_model_family().get_model_slug().to_string();
let server = Arc::new(ConversationManager::with_models_provider(
@@ -2173,12 +2173,12 @@ mod tests {
}
}
fn make_test_app_with_channels() -> (
async fn make_test_app_with_channels() -> (
App,
tokio::sync::mpsc::UnboundedReceiver<AppEvent>,
tokio::sync::mpsc::UnboundedReceiver<Op>,
) {
let (chat_widget, app_event_tx, rx, op_rx) = make_chatwidget_manual_with_sender();
let (chat_widget, app_event_tx, rx, op_rx) = make_chatwidget_manual_with_sender().await;
let config = chat_widget.config_ref().clone();
let current_model = chat_widget.get_model_family().get_model_slug().to_string();
let server = Arc::new(ConversationManager::with_models_provider(
@@ -2224,8 +2224,8 @@ mod tests {
codex_core::openai_models::model_presets::all_model_presets().clone()
}
#[test]
fn model_migration_prompt_only_shows_for_deprecated_models() {
#[tokio::test]
async fn model_migration_prompt_only_shows_for_deprecated_models() {
let seen = BTreeMap::new();
assert!(should_show_model_migration_prompt(
"gpt-5",
@@ -2259,8 +2259,8 @@ mod tests {
));
}
#[test]
fn model_migration_prompt_respects_hide_flag_and_self_target() {
#[tokio::test]
async fn model_migration_prompt_respects_hide_flag_and_self_target() {
let mut seen = BTreeMap::new();
seen.insert("gpt-5".to_string(), "gpt-5.1".to_string());
assert!(!should_show_model_migration_prompt(
@@ -2277,9 +2277,9 @@ mod tests {
));
}
#[test]
fn update_reasoning_effort_updates_config() {
let mut app = make_test_app();
#[tokio::test]
async fn update_reasoning_effort_updates_config() {
let mut app = make_test_app().await;
app.config.model_reasoning_effort = Some(ReasoningEffortConfig::Medium);
app.chat_widget
.set_reasoning_effort(Some(ReasoningEffortConfig::Medium));
@@ -2296,9 +2296,9 @@ mod tests {
);
}
#[test]
fn backtrack_selection_with_duplicate_history_targets_unique_turn() {
let mut app = make_test_app();
#[tokio::test]
async fn backtrack_selection_with_duplicate_history_targets_unique_turn() {
let mut app = make_test_app().await;
let user_cell = |text: &str| -> Arc<dyn HistoryCell> {
Arc::new(UserHistoryCell {
@@ -2363,12 +2363,12 @@ mod tests {
assert_eq!(prefill, "follow-up (edited)");
}
#[test]
fn transcript_selection_moves_with_scroll() {
#[tokio::test]
async fn transcript_selection_moves_with_scroll() {
use ratatui::buffer::Buffer;
use ratatui::layout::Rect;
let mut app = make_test_app();
let mut app = make_test_app().await;
app.transcript_total_lines = 3;
let area = Rect {
@@ -2427,7 +2427,7 @@ mod tests {
#[tokio::test]
async fn new_session_requests_shutdown_for_previous_conversation() {
let (mut app, mut app_event_rx, mut op_rx) = make_test_app_with_channels();
let (mut app, mut app_event_rx, mut op_rx) = make_test_app_with_channels().await;
let conversation_id = ConversationId::new();
let event = SessionConfiguredEvent {
@@ -2461,13 +2461,13 @@ mod tests {
}
}
#[test]
fn session_summary_skip_zero_usage() {
#[tokio::test]
async fn session_summary_skip_zero_usage() {
assert!(session_summary(TokenUsage::default(), None).is_none());
}
#[test]
fn render_lines_to_ansi_pads_user_rows_to_full_width() {
#[tokio::test]
async fn render_lines_to_ansi_pads_user_rows_to_full_width() {
let line: Line<'static> = Line::from("hi");
let lines = vec![line];
let line_meta = vec![TranscriptLineMeta::CellLine {
@@ -2482,8 +2482,8 @@ mod tests {
assert!(rendered[0].contains("hi"));
}
#[test]
fn session_summary_includes_resume_hint() {
#[tokio::test]
async fn session_summary_includes_resume_hint() {
let usage = TokenUsage {
input_tokens: 10,
output_tokens: 2,