Fix app-server write_models_cache to treat models with less priority number as higher priority. (#8844)

Rank models with p0 higher than p1. This shouldn't result in any
behavioral changes. Just reordering.
This commit is contained in:
Ahmed Ibrahim
2026-01-07 11:22:13 -08:00
committed by pap
parent 520f0ceb4d
commit 74bd722ac0
2 changed files with 47 additions and 48 deletions

View File

@@ -40,7 +40,6 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
}
}
// todo(aibrahim): fix the priorities to be the opposite here.
/// Write a models_cache.json file to the codex home directory.
/// This prevents ModelsManager from making network requests to refresh models.
/// The cache will be treated as fresh (within TTL) and used instead of fetching from the network.
@@ -51,14 +50,14 @@ pub fn write_models_cache(codex_home: &Path) -> std::io::Result<()> {
.iter()
.filter(|preset| preset.show_in_picker)
.collect();
// Convert presets to ModelInfo, assigning priorities (higher = earlier in list)
// Priority is used for sorting, so first model gets highest priority
// Convert presets to ModelInfo, assigning priorities (lower = earlier in list).
// Priority is used for sorting, so the first model gets the lowest priority.
let models: Vec<ModelInfo> = presets
.iter()
.enumerate()
.map(|(idx, preset)| {
// Higher priority = earlier in list, so reverse the index
let priority = (presets.len() - idx) as i32;
// Lower priority = earlier in list.
let priority = idx as i32;
preset_to_info(preset, priority)
})
.collect();

View File

@@ -48,29 +48,23 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
let expected_models = vec![
Model {
id: "gpt-5.2".to_string(),
model: "gpt-5.2".to_string(),
display_name: "gpt-5.2".to_string(),
description:
"Latest frontier model with improvements across knowledge, reasoning and coding"
.to_string(),
id: "gpt-5.2-codex".to_string(),
model: "gpt-5.2-codex".to_string(),
display_name: "gpt-5.2-codex".to_string(),
description: "Latest frontier agentic coding model.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward \
queries and short explanations"
.to_string(),
description: "Fast responses with lighter reasoning".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for \
general-purpose tasks"
description: "Balances speed and reasoning depth for everyday tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
description: "Greater reasoning depth for complex problems".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::XHigh,
@@ -80,25 +74,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
default_reasoning_effort: ReasoningEffort::Medium,
is_default: true,
},
Model {
id: "gpt-5.1-codex-mini".to_string(),
model: "gpt-5.1-codex-mini".to_string(),
display_name: "gpt-5.1-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "gpt-5.1-codex-max".to_string(),
model: "gpt-5.1-codex-max".to_string(),
@@ -127,23 +102,48 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
is_default: false,
},
Model {
id: "gpt-5.2-codex".to_string(),
model: "gpt-5.2-codex".to_string(),
display_name: "gpt-5.2-codex".to_string(),
description: "Latest frontier agentic coding model.".to_string(),
id: "gpt-5.1-codex-mini".to_string(),
model: "gpt-5.1-codex-mini".to_string(),
display_name: "gpt-5.1-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "gpt-5.2".to_string(),
model: "gpt-5.2".to_string(),
display_name: "gpt-5.2".to_string(),
description:
"Latest frontier model with improvements across knowledge, reasoning and coding"
.to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Fast responses with lighter reasoning".to_string(),
description: "Balances speed with some reasoning; useful for straightforward \
queries and short explanations"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Balances speed and reasoning depth for everyday tasks"
description: "Provides a solid balance of reasoning depth and latency for \
general-purpose tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Greater reasoning depth for complex problems".to_string(),
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::XHigh,
@@ -187,7 +187,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(first_response)?;
assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, "gpt-5.2");
assert_eq!(first_items[0].id, "gpt-5.2-codex");
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
let second_request = mcp
@@ -209,7 +209,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(second_response)?;
assert_eq!(second_items.len(), 1);
assert_eq!(second_items[0].id, "gpt-5.1-codex-mini");
assert_eq!(second_items[0].id, "gpt-5.1-codex-max");
let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?;
let third_request = mcp
@@ -231,7 +231,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(third_response)?;
assert_eq!(third_items.len(), 1);
assert_eq!(third_items[0].id, "gpt-5.1-codex-max");
assert_eq!(third_items[0].id, "gpt-5.1-codex-mini");
let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?;
let fourth_request = mcp
@@ -253,7 +253,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(fourth_response)?;
assert_eq!(fourth_items.len(), 1);
assert_eq!(fourth_items[0].id, "gpt-5.2-codex");
assert_eq!(fourth_items[0].id, "gpt-5.2");
assert!(fourth_cursor.is_none());
Ok(())
}