remove preference ranking as we don't get models dynamically

This commit is contained in:
pap
2025-07-28 23:21:49 +01:00
parent b294004ea9
commit 9db5c7af9e
3 changed files with 10 additions and 43 deletions

View File

@@ -73,43 +73,12 @@ pub(crate) fn get_model_info(name: &str) -> Option<ModelInfo> {
/// Return a curated list of commonly-used OpenAI model names for selection UIs.
pub fn get_all_model_names() -> Vec<&'static str> {
vec![
// codex models
"codex-mini-latest",
// reasoning models
"o3",
"o4-mini",
// GPT-4.1 family
"gpt-4.1",
// GPT-4o family (canonical plus a few snapshots)
"gpt-4o",
]
}
/// Sort order by preference groups, then alphabetically.
pub fn preference_rank(name: &str) -> u8 {
// Lower is more preferred.
if name == "codex-mini-latest" {
return 0;
}
if name.starts_with("o4-mini") {
return 1;
}
if name == "o3" || name.starts_with("o3") {
return 2;
}
if name == "gpt-4.1" || name.starts_with("gpt-4.1") {
return 3;
}
// Handle both spellings just in case.
if name == "gpt4-o" || name.starts_with("gpt-4o") {
return 4;
}
5
}
/// Compare two model names by preference ranking.
pub fn compare_models_by_preference(a: &str, b: &str) -> std::cmp::Ordering {
preference_rank(a)
.cmp(&preference_rank(b))
.then_with(|| a.cmp(b))
}

View File

@@ -21,7 +21,6 @@ use crate::app_event_sender::AppEventSender;
use super::BottomPane;
use super::BottomPaneView;
use codex_core::openai_model_info::compare_models_by_preference;
/// Simple dropdown to select a model.
pub(crate) struct ModelSelectionView {
@@ -54,16 +53,15 @@ impl ModelSelectionView {
/// sort preference, and search filter.
fn build_display_rows(&self) -> Vec<DisplayRow> {
// Determine candidate list excluding the current model (it is always pinned first).
let mut others: Vec<&str> = self
let others: Vec<&str> = self
.options
.iter()
.map(|s| s.as_str())
.filter(|m| *m != self.current_model)
.collect();
// If not searching, apply preference sort; otherwise, we'll score by fuzzy match.
// If not searching, maintain provided ordering; otherwise, we'll score by fuzzy match.
if self.query.is_empty() {
others.sort_by(|a, b| compare_models_by_preference(a, b));
let mut rows: Vec<DisplayRow> = Vec::new();
// Pinned current model always first.
rows.push(DisplayRow::Model {
@@ -99,11 +97,10 @@ impl ModelSelectionView {
matches.push((name.to_string(), indices, score));
}
}
// Sort by score (ascending => better). If equal, fall back to preference then alphabetical.
// Sort by score (ascending => better). If equal, fall back to alphabetical and match tightness.
matches.sort_by(|(a_name, a_idx, a_score), (b_name, b_idx, b_score)| {
a_score
.cmp(b_score)
.then_with(|| compare_models_by_preference(a_name, b_name))
.then_with(|| a_name.cmp(b_name))
.then_with(|| a_idx.len().cmp(&b_idx.len()))
});
@@ -415,8 +412,7 @@ impl<'a> BottomPaneView<'a> for ModelSelectionView {
unique.push(m);
}
}
// Sort by preference for stable display when query is empty.
unique.sort_by(|a, b| compare_models_by_preference(a, b));
// Preserve provided ordering without applying preference ranking.
self.options = unique;
// Clamp selection to available rows.

View File

@@ -504,14 +504,16 @@ impl ChatWidget<'_> {
let config_path = self.config.codex_home.join("config.toml");
if let Ok(contents) = std::fs::read_to_string(&config_path) {
if let Ok(cfg) = toml::from_str::<ConfigToml>(&contents) {
if let Some(m) = cfg.model {
options.push(m);
}
let mut config_models: Vec<String> = Vec::new();
if let Some(m) = cfg.model { config_models.push(m); }
for (_name, profile) in cfg.profiles.into_iter() {
if let Some(m) = profile.model {
options.push(m);
config_models.push(m);
}
}
// Keep it simple and deterministic: alphabetical ordering for config models.
config_models.sort();
options.extend(config_models);
}
}