test(core): annotate custom model literals

This commit is contained in:
Friel
2026-03-28 11:48:18 -07:00
parent a68b1bd423
commit 9118d71ec7
5 changed files with 61 additions and 46 deletions

View File

@@ -248,17 +248,17 @@ async fn interrupting_regular_turn_waiting_on_startup_prewarm_emits_turn_aborted
fn test_model_client_session() -> crate::client::ModelClientSession {
crate::client::ModelClient::new(
None,
/*auth_manager*/ None,
ThreadId::try_from("00000000-0000-4000-8000-000000000001")
.expect("test thread id should be valid"),
crate::model_provider_info::ModelProviderInfo::create_openai_provider(
/* base_url */ None,
/*base_url*/ None,
),
codex_protocol::protocol::SessionSource::Exec,
None,
false,
false,
None,
/*model_verbosity*/ None,
/*enable_request_compression*/ false,
/*include_timing_metrics*/ false,
/*beta_features_header*/ None,
)
.new_session()
}
@@ -314,7 +314,7 @@ fn make_connector(id: &str, name: &str) -> AppInfo {
#[test]
fn assistant_message_stream_parsers_can_be_seeded_from_output_item_added_text() {
let mut parsers = AssistantMessageStreamParsers::new(false);
let mut parsers = AssistantMessageStreamParsers::new(/*plan_mode*/ false);
let item_id = "msg-1";
let seeded = parsers.seed_item_text(item_id, "hello <oai-mem-citation>doc");
@@ -331,7 +331,7 @@ fn assistant_message_stream_parsers_can_be_seeded_from_output_item_added_text()
#[test]
fn assistant_message_stream_parsers_seed_buffered_prefix_stays_out_of_finish_tail() {
let mut parsers = AssistantMessageStreamParsers::new(false);
let mut parsers = AssistantMessageStreamParsers::new(/*plan_mode*/ false);
let item_id = "msg-1";
let seeded = parsers.seed_item_text(item_id, "hello <oai-mem-");
@@ -348,7 +348,7 @@ fn assistant_message_stream_parsers_seed_buffered_prefix_stays_out_of_finish_tai
#[test]
fn assistant_message_stream_parsers_seed_plan_parser_across_added_and_delta_boundaries() {
let mut parsers = AssistantMessageStreamParsers::new(true);
let mut parsers = AssistantMessageStreamParsers::new(/*plan_mode*/ true);
let item_id = "msg-1";
let seeded = parsers.seed_item_text(item_id, "Intro\n<proposed");
@@ -450,7 +450,7 @@ fn validated_network_policy_amendment_host_rejects_mismatch() {
async fn start_managed_network_proxy_applies_execpolicy_network_rules() -> anyhow::Result<()> {
let spec = crate::config::NetworkProxySpec::from_config_and_constraints(
NetworkProxyConfig::default(),
None,
/*requirements*/ None,
&SandboxPolicy::new_workspace_write_policy(),
)?;
let mut exec_policy = Policy::empty();
@@ -458,16 +458,16 @@ async fn start_managed_network_proxy_applies_execpolicy_network_rules() -> anyho
"example.com",
NetworkRuleProtocol::Https,
Decision::Allow,
None,
/*justification*/ None,
)?;
let (started_proxy, _) = Session::start_managed_network_proxy(
&spec,
&exec_policy,
&SandboxPolicy::new_workspace_write_policy(),
None,
None,
false,
/*network_policy_decider*/ None,
/*blocked_request_observer*/ None,
/*managed_network_requirements_enabled*/ false,
crate::config::NetworkProxyAuditMetadata::default(),
)
.await?;
@@ -509,9 +509,9 @@ async fn start_managed_network_proxy_ignores_invalid_execpolicy_network_rules()
&spec,
&exec_policy,
&SandboxPolicy::new_workspace_write_policy(),
None,
None,
false,
/*network_policy_decider*/ None,
/*blocked_request_observer*/ None,
/*managed_network_requirements_enabled*/ false,
crate::config::NetworkProxyAuditMetadata::default(),
)
.await?;

View File

@@ -425,12 +425,15 @@ async fn guardian_subagent_does_not_inherit_parent_exec_policy_rules() {
let models_manager = Arc::new(ModelsManager::new(
config.codex_home.clone(),
auth_manager.clone(),
None,
/*model_catalog*/ None,
HashMap::new(),
CollaborationModesConfig::default(),
));
let plugins_manager = Arc::new(PluginsManager::new(config.codex_home.clone()));
let skills_manager = Arc::new(SkillsManager::new(config.codex_home.clone(), true));
let skills_manager = Arc::new(SkillsManager::new(
config.codex_home.clone(),
/*bundled_skills_enabled*/ true,
));
let mcp_manager = Arc::new(McpManager::new(Arc::clone(&plugins_manager)));
let skills_watcher = Arc::new(SkillsWatcher::noop());

View File

@@ -176,7 +176,7 @@ async fn get_model_info_uses_custom_catalog() {
.build()
.await
.expect("load default test config");
let mut overlay = remote_model("gpt-overlay", "Overlay", 0);
let mut overlay = remote_model("gpt-overlay", "Overlay", /*priority*/ 0);
overlay.supports_image_detail_original = true;
let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
@@ -210,7 +210,7 @@ async fn get_model_info_matches_namespaced_suffix() {
.build()
.await
.expect("load default test config");
let mut remote = remote_model("gpt-image", "Image", 0);
let mut remote = remote_model("gpt-image", "Image", /*priority*/ 0);
remote.supports_image_detail_original = true;
let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
let manager = ModelsManager::new(
@@ -266,8 +266,8 @@ async fn get_model_info_rejects_multi_segment_namespace_suffix_matching() {
async fn refresh_available_models_sorts_by_priority() {
let server = MockServer::start().await;
let remote_models = vec![
remote_model("priority-low", "Low", 1),
remote_model("priority-high", "High", 0),
remote_model("priority-low", "Low", /*priority*/ 1),
remote_model("priority-high", "High", /*priority*/ 0),
];
let models_mock = mount_models_once(
&server,
@@ -317,7 +317,7 @@ async fn refresh_available_models_sorts_by_priority() {
#[tokio::test]
async fn refresh_available_models_uses_cache_when_fresh() {
let server = MockServer::start().await;
let remote_models = vec![remote_model("cached", "Cached", 5)];
let remote_models = vec![remote_model("cached", "Cached", /*priority*/ 5)];
let models_mock = mount_models_once(
&server,
ModelsResponse {
@@ -358,7 +358,7 @@ async fn refresh_available_models_uses_cache_when_fresh() {
#[tokio::test]
async fn refresh_available_models_refetches_when_cache_stale() {
let server = MockServer::start().await;
let initial_models = vec![remote_model("stale", "Stale", 1)];
let initial_models = vec![remote_model("stale", "Stale", /*priority*/ 1)];
let initial_mock = mount_models_once(
&server,
ModelsResponse {
@@ -391,7 +391,7 @@ async fn refresh_available_models_refetches_when_cache_stale() {
.await
.expect("cache manipulation succeeds");
let updated_models = vec![remote_model("fresh", "Fresh", 9)];
let updated_models = vec![remote_model("fresh", "Fresh", /*priority*/ 9)];
server.reset().await;
let refreshed_mock = mount_models_once(
&server,
@@ -421,7 +421,7 @@ async fn refresh_available_models_refetches_when_cache_stale() {
#[tokio::test]
async fn refresh_available_models_refetches_when_version_mismatch() {
let server = MockServer::start().await;
let initial_models = vec![remote_model("old", "Old", 1)];
let initial_models = vec![remote_model("old", "Old", /*priority*/ 1)];
let initial_mock = mount_models_once(
&server,
ModelsResponse {
@@ -454,7 +454,7 @@ async fn refresh_available_models_refetches_when_version_mismatch() {
.await
.expect("cache mutation succeeds");
let updated_models = vec![remote_model("new", "New", 2)];
let updated_models = vec![remote_model("new", "New", /*priority*/ 2)];
server.reset().await;
let refreshed_mock = mount_models_once(
&server,
@@ -484,7 +484,11 @@ async fn refresh_available_models_refetches_when_version_mismatch() {
#[tokio::test]
async fn refresh_available_models_drops_removed_remote_models() {
let server = MockServer::start().await;
let initial_models = vec![remote_model("remote-old", "Remote Old", 1)];
let initial_models = vec![remote_model(
"remote-old",
"Remote Old",
/*priority*/ 1,
)];
let initial_mock = mount_models_once(
&server,
ModelsResponse {
@@ -510,7 +514,11 @@ async fn refresh_available_models_drops_removed_remote_models() {
.expect("initial refresh succeeds");
server.reset().await;
let refreshed_models = vec![remote_model("remote-new", "Remote New", 1)];
let refreshed_models = vec![remote_model(
"remote-new",
"Remote New",
/*priority*/ 1,
)];
let refreshed_mock = mount_models_once(
&server,
ModelsResponse {
@@ -554,7 +562,7 @@ async fn refresh_available_models_skips_network_without_chatgpt_auth() {
let models_mock = mount_models_once(
&server,
ModelsResponse {
models: vec![remote_model(dynamic_slug, "No Auth", 1)],
models: vec![remote_model(dynamic_slug, "No Auth", /*priority*/ 1)],
},
)
.await;
@@ -699,8 +707,10 @@ fn build_available_models_picks_default_after_hiding_hidden_models() {
provider,
);
let hidden_model = remote_model_with_visibility("hidden", "Hidden", 0, "hide");
let visible_model = remote_model_with_visibility("visible", "Visible", 1, "list");
let hidden_model =
remote_model_with_visibility("hidden", "Hidden", /*priority*/ 0, "hide");
let visible_model =
remote_model_with_visibility("visible", "Visible", /*priority*/ 1, "list");
let expected_hidden = ModelPreset::from(hidden_model.clone());
let mut expected_visible = ModelPreset::from(visible_model.clone());
@@ -735,7 +745,7 @@ async fn get_model_info_uses_custom_alias_metadata_and_request_model() {
codex_home.path().to_path_buf(),
auth_manager,
Some(ModelsResponse {
models: vec![remote_model("gpt-5.4", "GPT 5.4", 0)],
models: vec![remote_model("gpt-5.4", "GPT 5.4", /*priority*/ 0)],
}),
HashMap::from([(alias.clone(), custom_model)]),
CollaborationModesConfig::default(),
@@ -776,7 +786,7 @@ async fn get_model_info_prefers_custom_alias_context_over_global_config() {
codex_home.path().to_path_buf(),
auth_manager,
Some(ModelsResponse {
models: vec![remote_model("gpt-5.4", "GPT 5.4", 0)],
models: vec![remote_model("gpt-5.4", "GPT 5.4", /*priority*/ 0)],
}),
HashMap::from([(alias.clone(), custom_model)]),
CollaborationModesConfig::default(),
@@ -813,8 +823,8 @@ async fn get_model_info_prefers_active_config_alias_over_startup_snapshot() {
auth_manager,
Some(ModelsResponse {
models: vec![
remote_model("gpt-5.4", "GPT 5.4", 0),
remote_model("gpt-5.4-updated", "GPT 5.4 Updated", 1),
remote_model("gpt-5.4", "GPT 5.4", /*priority*/ 0),
remote_model("gpt-5.4-updated", "GPT 5.4 Updated", /*priority*/ 1),
],
}),
HashMap::from([(
@@ -855,7 +865,9 @@ fn build_available_models_includes_custom_aliases() {
},
)]);
let available = manager.build_available_models(vec![remote_model("gpt-5.4", "GPT 5.4", 0)]);
let available = manager.build_available_models(vec![remote_model(
"gpt-5.4", "GPT 5.4", /*priority*/ 0,
)]);
let alias = available
.iter()
.find(|preset| preset.model == "gpt-5.4 1m")
@@ -885,8 +897,8 @@ fn build_available_models_lists_custom_aliases_before_remote_models() {
)]);
let available = manager.build_available_models(vec![
remote_model("gpt-5.4", "GPT 5.4", 0),
remote_model("gpt-5.3", "GPT 5.3", 1),
remote_model("gpt-5.4", "GPT 5.4", /*priority*/ 0),
remote_model("gpt-5.3", "GPT 5.3", /*priority*/ 1),
]);
assert_eq!(

View File

@@ -295,7 +295,7 @@ async fn new_uses_configured_openai_provider_for_model_refresh() {
&config,
auth_manager,
SessionSource::Exec,
None,
/*model_catalog*/ None,
HashMap::new(),
CollaborationModesConfig::default(),
Arc::new(codex_exec_server::EnvironmentManager::new(

View File

@@ -19,7 +19,7 @@ async fn offline_model_info_without_tool_output_override() {
let manager = ModelsManager::new(
config.codex_home.clone(),
auth_manager,
None,
/*model_catalog*/ None,
HashMap::new(),
CollaborationModesConfig::default(),
);
@@ -28,7 +28,7 @@ async fn offline_model_info_without_tool_output_override() {
assert_eq!(
model_info.truncation_policy,
TruncationPolicyConfig::bytes(10_000)
TruncationPolicyConfig::bytes(/*limit*/ 10_000)
);
}
@@ -43,7 +43,7 @@ async fn offline_model_info_with_tool_output_override() {
let manager = ModelsManager::new(
config.codex_home.clone(),
auth_manager,
None,
/*model_catalog*/ None,
HashMap::new(),
CollaborationModesConfig::default(),
);
@@ -52,7 +52,7 @@ async fn offline_model_info_with_tool_output_override() {
assert_eq!(
model_info.truncation_policy,
TruncationPolicyConfig::tokens(123)
TruncationPolicyConfig::tokens(/*limit*/ 123)
);
}
@@ -97,7 +97,7 @@ async fn custom_model_alias_applies_request_model_and_context_overrides() {
default_verbosity: None,
supports_search_tool: false,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
truncation_policy: TruncationPolicyConfig::bytes(/*limit*/ 10_000),
supports_parallel_tool_calls: false,
supports_image_detail_original: false,
context_window: Some(272_000),