Update models.json (#18586)

- Replace the active models-manager catalog with the deleted core
catalog contents.
- Replace stale hardcoded test model slugs with current bundled model
slugs.
- Keep this as a stacked change on top of the cleanup PR.
This commit is contained in:
Ahmed Ibrahim
2026-04-20 10:27:01 -07:00
committed by GitHub
parent 5d5d610740
commit 316cf0e90b
63 changed files with 540 additions and 1016 deletions

View File

@@ -894,7 +894,7 @@ async fn turn_start_accepts_collaboration_mode_override_v2() -> Result<()> {
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
model: Some("gpt-5.3-codex".to_string()),
..Default::default()
})
.await?;
@@ -977,7 +977,7 @@ async fn turn_start_uses_thread_feature_overrides_for_collaboration_mode_instruc
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
model: Some("gpt-5.3-codex".to_string()),
config: Some(HashMap::from([(
"features.default_mode_request_user_input".to_string(),
json!(true),
@@ -1268,7 +1268,7 @@ async fn turn_start_uses_migrated_pragmatic_personality_without_override_v2() ->
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
model: Some("gpt-5.3-codex".to_string()),
..Default::default()
})
.await?;
@@ -2033,7 +2033,7 @@ async fn turn_start_emits_spawn_agent_item_with_model_metadata_v2() -> Result<()
const CHILD_PROMPT: &str = "child: do work";
const PARENT_PROMPT: &str = "spawn a child and continue";
const SPAWN_CALL_ID: &str = "spawn-call-1";
const REQUESTED_MODEL: &str = "gpt-5.1";
const REQUESTED_MODEL: &str = "gpt-5.2";
const REQUESTED_REASONING_EFFORT: ReasoningEffort = ReasoningEffort::Low;
let server = responses::start_mock_server().await;
@@ -2088,7 +2088,7 @@ async fn turn_start_emits_spawn_agent_item_with_model_metadata_v2() -> Result<()
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
model: Some("gpt-5.3-codex".to_string()),
..Default::default()
})
.await?;
@@ -2227,9 +2227,9 @@ async fn turn_start_emits_spawn_agent_item_with_effective_role_model_metadata_v2
const CHILD_PROMPT: &str = "child: do work";
const PARENT_PROMPT: &str = "spawn a child and continue";
const SPAWN_CALL_ID: &str = "spawn-call-1";
const REQUESTED_MODEL: &str = "gpt-5.1";
const REQUESTED_MODEL: &str = "gpt-5.2";
const REQUESTED_REASONING_EFFORT: ReasoningEffort = ReasoningEffort::Low;
const ROLE_MODEL: &str = "gpt-5.1-codex-max";
const ROLE_MODEL: &str = "gpt-5.4";
const ROLE_REASONING_EFFORT: ReasoningEffort = ReasoningEffort::High;
let server = responses::start_mock_server().await;
@@ -2302,7 +2302,7 @@ config_file = "./custom-role.toml"
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
model: Some("gpt-5.3-codex".to_string()),
..Default::default()
})
.await?;