mirror of
https://github.com/openai/codex.git
synced 2026-05-01 01:47:18 +00:00
fix(app-server): filter out codex-auto-* models and ensure gpt-5.2-codex is default
This commit is contained in:
@@ -2250,10 +2250,16 @@ impl CodexMessageProcessor {
|
||||
request_id: RequestId,
|
||||
params: ModelListParams,
|
||||
) {
|
||||
let ModelListParams { limit, cursor } = params;
|
||||
let ModelListParams {
|
||||
limit,
|
||||
cursor,
|
||||
include_codex_auto_models,
|
||||
} = params;
|
||||
let include_codex_auto_models = include_codex_auto_models == Some(true);
|
||||
let mut config = (*config).clone();
|
||||
config.features.enable(Feature::RemoteModels);
|
||||
let models = supported_models(thread_manager, &config).await;
|
||||
let models = supported_models(thread_manager, &config, include_codex_auto_models).await;
|
||||
|
||||
let total = models.len();
|
||||
|
||||
if total == 0 {
|
||||
|
||||
@@ -7,9 +7,13 @@ use codex_core::config::Config;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
|
||||
pub async fn supported_models(thread_manager: Arc<ThreadManager>, config: &Config) -> Vec<Model> {
|
||||
pub async fn supported_models(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config: &Config,
|
||||
include_codex_auto_models: bool,
|
||||
) -> Vec<Model> {
|
||||
thread_manager
|
||||
.list_models(config)
|
||||
.list_models(config, include_codex_auto_models)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|preset| preset.show_in_picker)
|
||||
|
||||
@@ -32,6 +32,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(100),
|
||||
cursor: None,
|
||||
include_codex_auto_models: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -172,6 +173,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: None,
|
||||
include_codex_auto_models: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -194,6 +196,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: Some(next_cursor.clone()),
|
||||
include_codex_auto_models: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -216,6 +219,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: Some(third_cursor.clone()),
|
||||
include_codex_auto_models: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -238,6 +242,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: Some(fourth_cursor.clone()),
|
||||
include_codex_auto_models: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -270,6 +275,7 @@ async fn list_models_rejects_invalid_cursor() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: None,
|
||||
cursor: Some("invalid".to_string()),
|
||||
include_codex_auto_models: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user