Compare commits

...

1 Commits

Author SHA1 Message Date
Owen Lin
8e8659d4e4 fix(app-server): filter out codex-auto-* models and ensure gpt-5.2-codex is default 2026-01-13 16:07:22 -08:00
15 changed files with 185 additions and 68 deletions

View File

@@ -875,6 +875,10 @@ pub struct ModelListParams {
pub cursor: Option<String>,
/// Optional page size; defaults to a reasonable server-side value.
pub limit: Option<u32>,
/// Determines whether to include codex-auto-* models in the response.
/// Defaults to false.
pub include_codex_auto_models: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]

View File

@@ -2250,10 +2250,16 @@ impl CodexMessageProcessor {
request_id: RequestId,
params: ModelListParams,
) {
let ModelListParams { limit, cursor } = params;
let ModelListParams {
limit,
cursor,
include_codex_auto_models,
} = params;
let include_codex_auto_models = include_codex_auto_models == Some(true);
let mut config = (*config).clone();
config.features.enable(Feature::RemoteModels);
let models = supported_models(thread_manager, &config).await;
let models = supported_models(thread_manager, &config, include_codex_auto_models).await;
let total = models.len();
if total == 0 {

View File

@@ -7,9 +7,13 @@ use codex_core::config::Config;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ReasoningEffortPreset;
pub async fn supported_models(thread_manager: Arc<ThreadManager>, config: &Config) -> Vec<Model> {
pub async fn supported_models(
thread_manager: Arc<ThreadManager>,
config: &Config,
include_codex_auto_models: bool,
) -> Vec<Model> {
thread_manager
.list_models(config)
.list_models(config, include_codex_auto_models)
.await
.into_iter()
.filter(|preset| preset.show_in_picker)

View File

@@ -32,6 +32,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
.send_list_models_request(ModelListParams {
limit: Some(100),
cursor: None,
include_codex_auto_models: None,
})
.await?;
@@ -172,6 +173,7 @@ async fn list_models_pagination_works() -> Result<()> {
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: None,
include_codex_auto_models: None,
})
.await?;
@@ -194,6 +196,7 @@ async fn list_models_pagination_works() -> Result<()> {
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(next_cursor.clone()),
include_codex_auto_models: None,
})
.await?;
@@ -216,6 +219,7 @@ async fn list_models_pagination_works() -> Result<()> {
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(third_cursor.clone()),
include_codex_auto_models: None,
})
.await?;
@@ -238,6 +242,7 @@ async fn list_models_pagination_works() -> Result<()> {
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(fourth_cursor.clone()),
include_codex_auto_models: None,
})
.await?;
@@ -270,6 +275,7 @@ async fn list_models_rejects_invalid_cursor() -> Result<()> {
.send_list_models_request(ModelListParams {
limit: None,
cursor: Some("invalid".to_string()),
include_codex_auto_models: None,
})
.await?;

File diff suppressed because one or more lines are too long

View File

@@ -14,6 +14,8 @@ use tokio::sync::RwLock;
use tokio::sync::TryLockError;
use tokio::time::timeout;
use tracing::error;
use tracing::info;
use tracing::warn;
use super::cache;
use super::cache::ModelsCache;
@@ -35,6 +37,7 @@ const MODELS_REFRESH_TIMEOUT: Duration = Duration::from_secs(5);
const OPENAI_DEFAULT_API_MODEL: &str = "gpt-5.1-codex-max";
const OPENAI_DEFAULT_CHATGPT_MODEL: &str = "gpt-5.2-codex";
const CODEX_AUTO_BALANCED_MODEL: &str = "codex-auto-balanced";
const CODEX_AUTO_PREFIX: &str = "codex-auto-";
/// Coordinates remote model discovery plus cached metadata on disk.
#[derive(Debug)]
@@ -122,17 +125,25 @@ impl ModelsManager {
Ok(())
}
pub async fn list_models(&self, config: &Config) -> Vec<ModelPreset> {
pub async fn list_models(
&self,
config: &Config,
include_codex_auto_models: bool,
) -> Vec<ModelPreset> {
if let Err(err) = self.refresh_available_models_with_cache(config).await {
error!("failed to refresh available models: {err}");
}
let remote_models = self.remote_models(config).await;
self.build_available_models(remote_models)
self.build_available_models(remote_models, include_codex_auto_models)
}
pub fn try_list_models(&self, config: &Config) -> Result<Vec<ModelPreset>, TryLockError> {
pub fn try_list_models(
&self,
config: &Config,
include_codex_auto_models: bool,
) -> Result<Vec<ModelPreset>, TryLockError> {
let remote_models = self.try_get_remote_models(config)?;
Ok(self.build_available_models(remote_models))
Ok(self.build_available_models(remote_models, include_codex_auto_models))
}
/// Look up the requested model metadata while applying remote metadata overrides.
@@ -162,7 +173,7 @@ impl ModelsManager {
let remote_models = self.remote_models(config).await;
if auth_mode == Some(AuthMode::ChatGPT) {
let has_auto_balanced = self
.build_available_models(remote_models)
.build_available_models(remote_models, true)
.iter()
.any(|model| model.model == CODEX_AUTO_BALANCED_MODEL && model.show_in_picker);
if has_auto_balanced {
@@ -249,7 +260,11 @@ impl ModelsManager {
}
/// Merge remote model metadata into picker-ready presets, preserving existing entries.
fn build_available_models(&self, mut remote_models: Vec<ModelInfo>) -> Vec<ModelPreset> {
fn build_available_models(
&self,
mut remote_models: Vec<ModelInfo>,
include_codex_auto_models: bool,
) -> Vec<ModelPreset> {
remote_models.sort_by(|a, b| a.priority.cmp(&b.priority));
let remote_presets: Vec<ModelPreset> = remote_models.into_iter().map(Into::into).collect();
@@ -257,21 +272,79 @@ impl ModelsManager {
let mut merged_presets = Self::merge_presets(remote_presets, existing_presets);
merged_presets = self.filter_visible_models(merged_presets);
let has_default = merged_presets.iter().any(|preset| preset.is_default);
if !has_default {
if let Some(default) = merged_presets
.iter_mut()
.find(|preset| preset.show_in_picker)
{
default.is_default = true;
} else if let Some(default) = merged_presets.first_mut() {
default.is_default = true;
}
if !include_codex_auto_models {
merged_presets.retain(|preset| !preset.model.starts_with(CODEX_AUTO_PREFIX));
}
self.apply_default_model(&mut merged_presets);
merged_presets
}
fn preferred_default_model(&self, models: &[ModelPreset]) -> Option<&'static str> {
let chatgpt_mode = self.auth_manager.get_auth_mode() == Some(AuthMode::ChatGPT);
if !chatgpt_mode {
return None;
}
let has_auto_balanced = models
.iter()
.any(|model| model.model == CODEX_AUTO_BALANCED_MODEL);
if has_auto_balanced {
return None;
}
Some(OPENAI_DEFAULT_CHATGPT_MODEL)
}
fn apply_default_model(&self, models: &mut Vec<ModelPreset>) {
if models.is_empty() {
return;
}
let preferred_default = self.preferred_default_model(models);
let default_index = if let Some(preferred_default) = preferred_default {
if let Some(index) = models
.iter()
.position(|model| model.model == preferred_default)
{
if index != 0 {
let preferred = models.remove(index);
models.insert(0, preferred);
}
Some(0)
} else {
None
}
} else {
None
};
let default_index = default_index.unwrap_or_else(|| {
let default_count = models.iter().filter(|model| model.is_default).count();
if default_count == 1 {
models
.iter()
.position(|model| model.is_default)
.unwrap_or(0)
} else {
if default_count == 0 {
info!("no default model found; setting the first model as default");
} else {
warn!(
"multiple default models found ({default_count}); setting the first model as default",
);
}
0
}
});
for model in models.iter_mut() {
model.is_default = false;
}
if let Some(model) = models.get_mut(default_index) {
model.is_default = true;
}
}
fn filter_visible_models(&self, models: Vec<ModelPreset>) -> Vec<ModelPreset> {
let chatgpt_mode = self.auth_manager.get_auth_mode() == Some(AuthMode::ChatGPT);
models
@@ -440,7 +513,7 @@ mod tests {
let cached_remote = manager.remote_models(&config).await;
assert_eq!(cached_remote, remote_models);
let available = manager.list_models(&config).await;
let available = manager.list_models(&config, true).await;
let high_idx = available
.iter()
.position(|model| model.model == "priority-high")
@@ -453,10 +526,12 @@ mod tests {
high_idx < low_idx,
"higher priority should be listed before lower priority"
);
assert!(
available[high_idx].is_default,
"highest priority should be default"
);
let default_model = available
.iter()
.find(|model| model.is_default)
.expect("default model should be present");
assert_eq!(default_model.model, OPENAI_DEFAULT_CHATGPT_MODEL);
assert!(!available[high_idx].is_default);
assert!(!available[low_idx].is_default);
assert_eq!(
models_mock.requests().len(),
@@ -641,7 +716,7 @@ mod tests {
.expect("second refresh succeeds");
let available = manager
.try_list_models(&config)
.try_list_models(&config, true)
.expect("models should be available");
assert!(
available.iter().any(|preset| preset.model == "remote-new"),
@@ -676,11 +751,11 @@ mod tests {
let hidden_model = remote_model_with_visibility("hidden", "Hidden", 0, "hide");
let visible_model = remote_model_with_visibility("visible", "Visible", 1, "list");
let expected_hidden = ModelPreset::from(hidden_model.clone());
let mut expected_visible = ModelPreset::from(visible_model.clone());
expected_visible.is_default = true;
let mut expected_hidden = ModelPreset::from(hidden_model.clone());
expected_hidden.is_default = true;
let expected_visible = ModelPreset::from(visible_model.clone());
let available = manager.build_available_models(vec![hidden_model, visible_model]);
let available = manager.build_available_models(vec![hidden_model, visible_model], true);
assert_eq!(available, vec![expected_hidden, expected_visible]);
}

View File

@@ -138,8 +138,15 @@ impl ThreadManager {
self.state.models_manager.clone()
}
pub async fn list_models(&self, config: &Config) -> Vec<ModelPreset> {
self.state.models_manager.list_models(config).await
pub async fn list_models(
&self,
config: &Config,
include_codex_auto_models: bool,
) -> Vec<ModelPreset> {
self.state
.models_manager
.list_models(config, include_codex_auto_models)
.await
}
pub async fn list_thread_ids(&self) -> Vec<ThreadId> {

View File

@@ -17,7 +17,7 @@ async fn list_models_returns_api_key_models() -> Result<()> {
CodexAuth::from_api_key("sk-test"),
built_in_model_providers()["openai"].clone(),
);
let models = manager.list_models(&config).await;
let models = manager.list_models(&config, true).await;
let expected_models = expected_models_for_api_key();
assert_eq!(expected_models, models);
@@ -33,7 +33,7 @@ async fn list_models_returns_chatgpt_models() -> Result<()> {
CodexAuth::create_dummy_chatgpt_auth_for_testing(),
built_in_model_providers()["openai"].clone(),
);
let models = manager.list_models(&config).await;
let models = manager.list_models(&config, true).await;
let expected_models = expected_models_for_chatgpt();
assert_eq!(expected_models, models);

View File

@@ -428,14 +428,19 @@ async fn remote_models_preserve_builtin_presets() -> Result<()> {
.await
.expect("refresh succeeds");
let available = manager.list_models(&config).await;
let available = manager.list_models(&config, true).await;
let remote = available
.iter()
.find(|model| model.model == "remote-alpha")
.expect("remote model should be listed");
let mut expected_remote: ModelPreset = remote_model.into();
expected_remote.is_default = true;
expected_remote.is_default = false;
assert_eq!(*remote, expected_remote);
let default_model = available
.iter()
.find(|model| model.is_default)
.expect("default model should be present");
assert_eq!(default_model.model, "gpt-5.2-codex");
assert!(
available
.iter()
@@ -553,7 +558,7 @@ async fn remote_models_hide_picker_only_models() -> Result<()> {
let selected = manager.get_model(&None, &config).await;
assert_eq!(selected, "gpt-5.2-codex");
let available = manager.list_models(&config).await;
let available = manager.list_models(&config, true).await;
let hidden = available
.iter()
.find(|model| model.model == "codex-auto-balanced")
@@ -571,7 +576,7 @@ async fn wait_for_model_available(
let deadline = Instant::now() + Duration::from_secs(2);
loop {
if let Some(model) = {
let guard = manager.list_models(config).await;
let guard = manager.list_models(config, true).await;
guard.iter().find(|model| model.model == slug).cloned()
} {
return model;

View File

@@ -189,7 +189,7 @@ async fn handle_model_migration_prompt_if_needed(
app_event_tx: &AppEventSender,
models_manager: Arc<ModelsManager>,
) -> Option<AppExitInfo> {
let available_models = models_manager.list_models(config).await;
let available_models = models_manager.list_models(config, true).await;
let upgrade = available_models
.iter()
.find(|preset| preset.model == model)

View File

@@ -2379,7 +2379,10 @@ impl ChatWidget {
}
fn lower_cost_preset(&self) -> Option<ModelPreset> {
let models = self.models_manager.try_list_models(&self.config).ok()?;
let models = self
.models_manager
.try_list_models(&self.config, true)
.ok()?;
models
.iter()
.find(|preset| preset.show_in_picker && preset.model == NUDGE_MODEL_SLUG)
@@ -2485,16 +2488,18 @@ impl ChatWidget {
/// Open a popup to choose a quick auto model. Selecting "All models"
/// opens the full picker with every available preset.
pub(crate) fn open_model_popup(&mut self) {
let presets: Vec<ModelPreset> = match self.models_manager.try_list_models(&self.config) {
Ok(models) => models,
Err(_) => {
self.add_info_message(
"Models are being updated; please try /model again in a moment.".to_string(),
None,
);
return;
}
};
let presets: Vec<ModelPreset> =
match self.models_manager.try_list_models(&self.config, true) {
Ok(models) => models,
Err(_) => {
self.add_info_message(
"Models are being updated; please try /model again in a moment."
.to_string(),
None,
);
return;
}
};
self.open_model_popup_with_presets(presets);
}

View File

@@ -984,7 +984,7 @@ fn active_blob(chat: &ChatWidget) -> String {
fn get_available_model(chat: &ChatWidget, model: &str) -> ModelPreset {
let models = chat
.models_manager
.try_list_models(&chat.config)
.try_list_models(&chat.config, true)
.expect("models lock available");
models
.iter()

View File

@@ -221,7 +221,7 @@ async fn handle_model_migration_prompt_if_needed(
app_event_tx: &AppEventSender,
models_manager: Arc<ModelsManager>,
) -> Option<AppExitInfo> {
let available_models = models_manager.list_models(config).await;
let available_models = models_manager.list_models(config, true).await;
let upgrade = available_models
.iter()
.find(|preset| preset.model == model)

View File

@@ -2175,7 +2175,10 @@ impl ChatWidget {
}
fn lower_cost_preset(&self) -> Option<ModelPreset> {
let models = self.models_manager.try_list_models(&self.config).ok()?;
let models = self
.models_manager
.try_list_models(&self.config, true)
.ok()?;
models
.iter()
.find(|preset| preset.show_in_picker && preset.model == NUDGE_MODEL_SLUG)
@@ -2281,16 +2284,18 @@ impl ChatWidget {
/// Open a popup to choose a quick auto model. Selecting "All models"
/// opens the full picker with every available preset.
pub(crate) fn open_model_popup(&mut self) {
let presets: Vec<ModelPreset> = match self.models_manager.try_list_models(&self.config) {
Ok(models) => models,
Err(_) => {
self.add_info_message(
"Models are being updated; please try /model again in a moment.".to_string(),
None,
);
return;
}
};
let presets: Vec<ModelPreset> =
match self.models_manager.try_list_models(&self.config, true) {
Ok(models) => models,
Err(_) => {
self.add_info_message(
"Models are being updated; please try /model again in a moment."
.to_string(),
None,
);
return;
}
};
self.open_model_popup_with_presets(presets);
}

View File

@@ -935,7 +935,7 @@ fn active_blob(chat: &ChatWidget) -> String {
fn get_available_model(chat: &ChatWidget, model: &str) -> ModelPreset {
let models = chat
.models_manager
.try_list_models(&chat.config)
.try_list_models(&chat.config, true)
.expect("models lock available");
models
.iter()