feat(core): support custom model aliases in config.toml

Preserve custom model alias support on the current origin/main base and collapse the branch back to a single commit for easier future restacks.
This commit is contained in:
Friel
2026-03-14 13:31:40 -07:00
parent 65f631c3d6
commit d6f8e3aeeb
28 changed files with 732 additions and 139 deletions

View File

@@ -486,6 +486,8 @@ impl TestCodexBuilder {
&config,
codex_core::test_support::auth_manager_from_auth(auth.clone()),
SessionSource::Exec,
config.model_catalog.clone(),
config.custom_models.clone(),
CollaborationModesConfig::default(),
Arc::clone(&environment_manager),
)

View File

@@ -825,6 +825,8 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
&config,
auth_manager,
SessionSource::Exec,
config.model_catalog.clone(),
config.custom_models.clone(),
CollaborationModesConfig {
default_mode_request_user_input: config
.features

View File

@@ -2,8 +2,11 @@ use codex_core::CodexAuth;
use codex_core::models_manager::collaboration_mode_presets::CollaborationModesConfig;
use codex_core::models_manager::manager::ModelsManager;
use codex_protocol::openai_models::TruncationPolicyConfig;
use codex_protocol::openai_models::WebSearchToolType;
use codex_protocol::openai_models::default_input_modalities;
use core_test_support::load_default_config_for_test;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
use tempfile::TempDir;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@@ -16,7 +19,8 @@ async fn offline_model_info_without_tool_output_override() {
let manager = ModelsManager::new(
config.codex_home.clone(),
auth_manager,
/*model_catalog*/ None,
None,
HashMap::new(),
CollaborationModesConfig::default(),
);
@@ -24,7 +28,7 @@ async fn offline_model_info_without_tool_output_override() {
assert_eq!(
model_info.truncation_policy,
TruncationPolicyConfig::bytes(/*limit*/ 10_000)
TruncationPolicyConfig::bytes(10_000)
);
}
@@ -39,7 +43,8 @@ async fn offline_model_info_with_tool_output_override() {
let manager = ModelsManager::new(
config.codex_home.clone(),
auth_manager,
/*model_catalog*/ None,
None,
HashMap::new(),
CollaborationModesConfig::default(),
);
@@ -47,6 +52,72 @@ async fn offline_model_info_with_tool_output_override() {
assert_eq!(
model_info.truncation_policy,
TruncationPolicyConfig::tokens(/*limit*/ 123)
TruncationPolicyConfig::tokens(123)
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn custom_model_alias_applies_request_model_and_context_overrides() {
let codex_home = TempDir::new().expect("create temp dir");
let mut config = load_default_config_for_test(&codex_home).await;
config.custom_models.insert(
"gpt-5.4 1m".to_string(),
codex_core::config::CustomModelConfig {
model: "gpt-5.4".to_string(),
model_context_window: Some(1_000_000),
model_auto_compact_token_limit: Some(900_000),
},
);
let auth_manager = codex_core::test_support::auth_manager_from_auth(
CodexAuth::create_dummy_chatgpt_auth_for_testing(),
);
let manager = ModelsManager::new(
config.codex_home.clone(),
auth_manager,
Some(codex_protocol::openai_models::ModelsResponse {
models: vec![codex_protocol::openai_models::ModelInfo {
slug: "gpt-5.4".to_string(),
request_model: None,
display_name: "GPT-5.4".to_string(),
description: Some("desc".to_string()),
default_reasoning_level: None,
supported_reasoning_levels: Vec::new(),
shell_type: codex_protocol::openai_models::ConfigShellToolType::ShellCommand,
visibility: codex_protocol::openai_models::ModelVisibility::List,
supported_in_api: true,
priority: 1,
availability_nux: None,
upgrade: None,
base_instructions: "base".to_string(),
model_messages: None,
supports_reasoning_summaries: false,
default_reasoning_summary: codex_protocol::config_types::ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
supports_search_tool: false,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
supports_image_detail_original: false,
context_window: Some(272_000),
auto_compact_token_limit: None,
effective_context_window_percent: 95,
experimental_supported_tools: Vec::new(),
input_modalities: default_input_modalities(),
web_search_tool_type: WebSearchToolType::Text,
prefer_websockets: false,
used_fallback_model_metadata: false,
}],
}),
config.custom_models.clone(),
CollaborationModesConfig::default(),
);
let model_info = manager.get_model_info("gpt-5.4 1m", &config).await;
assert_eq!(model_info.slug, "gpt-5.4 1m");
assert_eq!(model_info.request_model.as_deref(), Some("gpt-5.4"));
assert_eq!(model_info.context_window, Some(1_000_000));
assert_eq!(model_info.auto_compact_token_limit, Some(900_000));
}

View File

@@ -68,6 +68,7 @@ fn test_model_info(
) -> ModelInfo {
ModelInfo {
slug: slug.to_string(),
request_model: None,
display_name: display_name.to_string(),
description: Some(description.to_string()),
default_reasoning_level: Some(ReasoningEffort::Medium),
@@ -884,6 +885,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
let base_model = ModelInfo {
slug: large_model_slug.to_string(),
request_model: None,
display_name: "Larger Model".to_string(),
description: Some("larger context window model".to_string()),
default_reasoning_level: Some(ReasoningEffort::Medium),

View File

@@ -317,6 +317,7 @@ struct ModelsCache {
fn test_remote_model(slug: &str, priority: i32) -> ModelInfo {
ModelInfo {
slug: slug.to_string(),
request_model: None,
display_name: "Remote Test".to_string(),
description: Some("remote model".to_string()),
default_reasoning_level: Some(ReasoningEffort::Medium),

View File

@@ -633,6 +633,7 @@ async fn remote_model_friendly_personality_instructions_with_feature() -> anyhow
let friendly_personality_message = "Friendly variant";
let remote_model = ModelInfo {
slug: remote_slug.to_string(),
request_model: None,
display_name: "Remote default personality test".to_string(),
description: Some("Remote model with default personality template".to_string()),
default_reasoning_level: Some(ReasoningEffort::Medium),
@@ -749,6 +750,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -
let remote_pragmatic_message = "Pragmatic from remote template";
let remote_model = ModelInfo {
slug: remote_slug.to_string(),
request_model: None,
display_name: "Remote personality test".to_string(),
description: Some("Remote model with personality template".to_string()),
default_reasoning_level: Some(ReasoningEffort::Medium),

View File

@@ -7,6 +7,7 @@ use anyhow::Result;
use codex_core::CodexAuth;
use codex_core::ModelProviderInfo;
use codex_core::built_in_model_providers;
use codex_core::config::CustomModelConfig;
use codex_core::models_manager::manager::ModelsManager;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_protocol::config_types::ReasoningSummary;
@@ -268,6 +269,74 @@ async fn namespaced_model_slug_uses_catalog_metadata_without_fallback_warning()
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn custom_model_alias_sends_base_model_slug() -> Result<()> {
skip_if_no_network!(Ok(()));
skip_if_sandbox!(Ok(()));
let server = MockServer::start().await;
let alias = "gpt-5.4 1m";
let base_model = "gpt-5.4";
let response_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
)
.await;
let mut builder = test_codex()
.with_model(alias)
.with_auth(CodexAuth::from_api_key("Test API Key"))
.with_config(move |config| {
config.custom_models.insert(
alias.to_string(),
CustomModelConfig {
model: base_model.to_string(),
model_context_window: Some(1_000_000),
model_auto_compact_token_limit: Some(900_000),
},
);
config.model_catalog = Some(ModelsResponse {
models: vec![test_remote_model(base_model, ModelVisibility::List, 1)],
});
});
let TestCodex {
codex,
cwd,
config,
session_configured,
..
} = builder.build(&server).await?;
assert_eq!(session_configured.model, alias);
codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "check custom alias model routing".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: cwd.path().to_path_buf(),
approval_policy: config.permissions.approval_policy.value(),
sandbox_policy: config.permissions.sandbox_policy.get().clone(),
model: alias.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
let body = response_mock.single_request().body_json();
assert_eq!(body["model"].as_str(), Some(base_model));
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
skip_if_no_network!(Ok(()));
@@ -280,6 +349,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
let remote_model = ModelInfo {
slug: REMOTE_MODEL_SLUG.to_string(),
request_model: None,
display_name: "Remote Test".to_string(),
description: Some("A remote model that requires the test shell".to_string()),
default_reasoning_level: Some(ReasoningEffort::Medium),
@@ -524,6 +594,7 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
let remote_base = "Use the remote base instructions only.";
let remote_model = ModelInfo {
slug: model.to_string(),
request_model: None,
display_name: "Parallel Remote".to_string(),
description: Some("A remote model with custom instructions".to_string()),
default_reasoning_level: Some(ReasoningEffort::Medium),
@@ -1004,6 +1075,7 @@ fn test_remote_model_with_policy(
) -> ModelInfo {
ModelInfo {
slug: slug.to_string(),
request_model: None,
display_name: format!("{slug} display"),
description: Some(format!("{slug} description")),
default_reasoning_level: Some(ReasoningEffort::Medium),

View File

@@ -394,6 +394,7 @@ async fn stdio_image_responses_are_sanitized_for_text_only_model() -> anyhow::Re
ModelsResponse {
models: vec![ModelInfo {
slug: text_only_model_slug.to_string(),
request_model: None,
display_name: "RMCP Text Only".to_string(),
description: Some("Test model without image input support".to_string()),
default_reasoning_level: None,

View File

@@ -56,6 +56,7 @@ fn test_model_info(
) -> ModelInfo {
ModelInfo {
slug: slug.to_string(),
request_model: None,
display_name: display_name.to_string(),
description: Some(description.to_string()),
default_reasoning_level: Some(default_reasoning_level),

View File

@@ -1335,6 +1335,7 @@ async fn view_image_tool_returns_unsupported_message_for_text_only_model() -> an
let model_slug = "text-only-view-image-test-model";
let text_only_model = ModelInfo {
slug: model_slug.to_string(),
request_model: None,
display_name: "Text-only view_image test model".to_string(),
description: Some("Remote model for view_image unsupported-path coverage".to_string()),
default_reasoning_level: Some(ReasoningEffort::Medium),