This commit is contained in:
Ahmed Ibrahim
2025-12-15 20:38:19 -08:00
parent 2b9b689f9e
commit ce023c0341

View File

@@ -16,7 +16,6 @@ use codex_core::protocol::EventMsg;
use codex_core::protocol::ExecCommandSource;
use codex_core::protocol::Op;
use codex_core::protocol::SandboxPolicy;
use codex_core::version::VERSION_FILENAME;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::openai_models::ClientVersion;
use codex_protocol::openai_models::ConfigShellToolType;
@@ -42,17 +41,14 @@ use core_test_support::skip_if_no_network;
use core_test_support::skip_if_sandbox;
use core_test_support::wait_for_event;
use core_test_support::wait_for_event_match;
use pretty_assertions::assert_eq;
use serde_json::json;
use tempfile::TempDir;
use tokio::time::Duration;
use tokio::time::Instant;
use tokio::time::sleep;
use wiremock::BodyPrintLimit;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path_regex;
const REMOTE_MODEL_SLUG: &str = "codex-test";
@@ -304,52 +300,102 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn remote_models_invalid_payload_emits_error() -> Result<()> {
async fn remote_models_preserve_builtin_presets() -> Result<()> {
skip_if_no_network!(Ok(()));
skip_if_sandbox!(Ok(()));
let server = MockServer::builder()
.body_print_limit(BodyPrintLimit::Limited(80_000))
.start()
.await;
let server = MockServer::start().await;
let remote_model = test_remote_model("remote-alpha", ModelVisibility::List, 0);
let models_mock = mount_models_once(
&server,
ModelsResponse {
models: vec![remote_model.clone()],
etag: String::new(),
},
)
.await;
Mock::given(method("GET"))
.and(path_regex(".*/models$"))
.respond_with(
ResponseTemplate::new(200)
.insert_header("content-type", "application/json")
.set_body_json(json!({
"models": "invalid",
"etag": "etag",
})),
)
.up_to_n_times(1)
.mount(&server)
.await;
let codex_home = TempDir::new()?;
let mut config = load_default_config_for_test(&codex_home);
config.features.enable(Feature::RemoteModels);
let RemoteModelsHarness { codex, .. } = build_remote_models_harness(&server, |config| {
let version_file = config.codex_home.join(VERSION_FILENAME);
std::fs::write(
&version_file,
r#"{"latest_version":"0.0.1","last_checked_at":"2025-01-01T00:00:00Z","dismissed_version":null}"#,
)
.expect("write version.json");
config.features.enable(Feature::RemoteModels);
config.model = Some("gpt-5.1".to_string());
})
.await?;
let error_event = wait_for_event(&codex, |msg| matches!(msg, EventMsg::Error(_))).await;
let EventMsg::Error(error_event) = error_event else {
unreachable!();
let auth = CodexAuth::from_api_key("dummy");
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let manager = ModelsManager::with_provider(
codex_core::auth::AuthManager::from_auth_for_testing(auth),
provider,
);
manager
.refresh_available_models(&config)
.await
.expect("refresh succeeds");
let available = manager.list_models(&config).await;
let remote = available
.iter()
.find(|model| model.model == "remote-alpha")
.expect("remote model should be listed");
let mut expected_remote: ModelPreset = remote_model.into();
expected_remote.is_default = true;
assert_eq!(*remote, expected_remote);
assert!(
error_event
.message
.contains("failed to refresh available models"),
"unexpected error message: {}",
error_event.message
available
.iter()
.any(|model| model.model == "gpt-5.1-codex-max"),
"builtin presets should remain available after refresh"
);
assert_eq!(
models_mock.requests().len(),
1,
"expected a single /models request"
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn remote_models_hide_picker_only_models() -> Result<()> {
skip_if_no_network!(Ok(()));
skip_if_sandbox!(Ok(()));
let server = MockServer::start().await;
let remote_model = test_remote_model("codex-auto-balanced", ModelVisibility::Hide, 0);
mount_models_once(
&server,
ModelsResponse {
models: vec![remote_model],
etag: String::new(),
},
)
.await;
let codex_home = TempDir::new()?;
let mut config = load_default_config_for_test(&codex_home);
config.features.enable(Feature::RemoteModels);
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let manager = ModelsManager::with_provider(
codex_core::auth::AuthManager::from_auth_for_testing(auth),
provider,
);
let selected = manager.get_model(&None, &config).await;
assert_eq!(selected, "gpt-5.1-codex-max");
let available = manager.list_models(&config).await;
assert!(
available
.iter()
.all(|model| model.model != "codex-auto-balanced"),
"hidden models should not appear in the picker list"
);
Ok(())
@@ -419,3 +465,32 @@ where
conversation_manager,
})
}
fn test_remote_model(slug: &str, visibility: ModelVisibility, priority: i32) -> ModelInfo {
ModelInfo {
slug: slug.to_string(),
display_name: format!("{slug} display"),
description: Some(format!("{slug} description")),
default_reasoning_level: ReasoningEffort::Medium,
supported_reasoning_levels: vec![ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: ReasoningEffort::Medium.to_string(),
}],
shell_type: ConfigShellToolType::ShellCommand,
visibility,
minimal_client_version: ClientVersion(0, 1, 0),
supported_in_api: true,
priority,
upgrade: None,
base_instructions: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
context_window: None,
reasoning_summary_format: ReasoningSummaryFormat::None,
experimental_supported_tools: Vec::new(),
}
}