mirror of
https://github.com/openai/codex.git
synced 2026-04-30 01:16:54 +00:00
Update models.json (#18586)
- Replace the active models-manager catalog with the deleted core catalog contents. - Replace stale hardcoded test model slugs with current bundled model slugs. - Keep this as a stacked change on top of the cleanup PR.
This commit is contained in:
@@ -21,8 +21,8 @@ use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
const REQUESTED_MODEL: &str = "gpt-5.1-codex-max";
|
||||
const SERVER_MODEL: &str = "gpt-5.2-codex";
|
||||
const REQUESTED_MODEL: &str = "gpt-5.4";
|
||||
const SERVER_MODEL: &str = "gpt-5.3-codex";
|
||||
|
||||
#[tokio::test]
|
||||
async fn openai_model_header_mismatch_emits_model_rerouted_notification_v2() -> Result<()> {
|
||||
|
||||
@@ -125,7 +125,7 @@ stream_max_retries = 0
|
||||
async fn start_thread(mcp: &mut McpProcess) -> Result<String> {
|
||||
let req_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1".to_string()),
|
||||
model: Some("gpt-5.2".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -131,7 +131,7 @@ async fn thread_resume_rejects_unmaterialized_thread() -> Result<()> {
|
||||
// Start a thread.
|
||||
let start_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
model: Some("gpt-5.4".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -207,7 +207,7 @@ async fn thread_resume_tracks_thread_initialized_analytics() -> Result<()> {
|
||||
|
||||
let payload = wait_for_analytics_payload(&server, DEFAULT_READ_TIMEOUT).await?;
|
||||
let event = thread_initialized_event(&payload)?;
|
||||
assert_basic_thread_initialized_event(event, &thread.id, "gpt-5.2-codex", "resumed");
|
||||
assert_basic_thread_initialized_event(event, &thread.id, "gpt-5.3-codex", "resumed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -556,7 +556,7 @@ async fn thread_resume_prefers_persisted_git_metadata_for_local_threads() -> Res
|
||||
&config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "gpt-5.2-codex"
|
||||
model = "gpt-5.3-codex"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
@@ -922,7 +922,7 @@ async fn thread_resume_keeps_in_flight_turn_streaming() -> Result<()> {
|
||||
|
||||
let start_id = primary
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
model: Some("gpt-5.4".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -1029,7 +1029,7 @@ async fn thread_resume_rejects_history_when_thread_is_running() -> Result<()> {
|
||||
|
||||
let start_id = primary
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
model: Some("gpt-5.4".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -1145,7 +1145,7 @@ async fn thread_resume_rejects_mismatched_path_when_thread_is_running() -> Resul
|
||||
|
||||
let start_id = primary
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
model: Some("gpt-5.4".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -1251,7 +1251,7 @@ async fn thread_resume_rejoins_running_thread_even_with_override_mismatch() -> R
|
||||
|
||||
let start_id = primary
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
model: Some("gpt-5.4".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -1320,7 +1320,7 @@ async fn thread_resume_rejoins_running_thread_even_with_override_mismatch() -> R
|
||||
.await??;
|
||||
let ThreadResumeResponse { thread, model, .. } =
|
||||
to_response::<ThreadResumeResponse>(resume_resp)?;
|
||||
assert_eq!(model, "gpt-5.1-codex-max");
|
||||
assert_eq!(model, "gpt-5.4");
|
||||
// The running-thread resume response is queued onto the thread listener task.
|
||||
// If the in-flight turn completes before that queued command runs, the response
|
||||
// can legitimately observe the thread as idle.
|
||||
@@ -1364,7 +1364,7 @@ async fn thread_resume_replays_pending_command_execution_request_approval() -> R
|
||||
|
||||
let start_id = primary
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
model: Some("gpt-5.4".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -1501,7 +1501,7 @@ async fn thread_resume_replays_pending_file_change_request_approval() -> Result<
|
||||
|
||||
let start_id = primary
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
model: Some("gpt-5.4".to_string()),
|
||||
cwd: Some(workspace.to_string_lossy().into_owned()),
|
||||
..Default::default()
|
||||
})
|
||||
@@ -1851,7 +1851,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
|
||||
|
||||
let start_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
model: Some("gpt-5.4".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -1971,7 +1971,7 @@ async fn start_materialized_thread_and_restart(
|
||||
|
||||
let start_id = first_mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1-codex-max".to_string()),
|
||||
model: Some("gpt-5.4".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -2045,7 +2045,7 @@ async fn thread_resume_accepts_personality_override() -> Result<()> {
|
||||
|
||||
let start_id = primary
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.2-codex".to_string()),
|
||||
model: Some("gpt-5.3-codex".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -2083,7 +2083,7 @@ async fn thread_resume_accepts_personality_override() -> Result<()> {
|
||||
let resume_id = secondary
|
||||
.send_thread_resume_request(ThreadResumeParams {
|
||||
thread_id: thread.id,
|
||||
model: Some("gpt-5.2-codex".to_string()),
|
||||
model: Some("gpt-5.3-codex".to_string()),
|
||||
personality: Some(Personality::Friendly),
|
||||
..Default::default()
|
||||
})
|
||||
@@ -2145,7 +2145,7 @@ fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "gpt-5.2-codex"
|
||||
model = "gpt-5.3-codex"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
@@ -2182,7 +2182,7 @@ fn create_config_toml_with_chatgpt_base_url(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "gpt-5.2-codex"
|
||||
model = "gpt-5.3-codex"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
chatgpt_base_url = "{chatgpt_base_url}"
|
||||
@@ -2213,7 +2213,7 @@ fn create_config_toml_with_required_broken_mcp(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "gpt-5.2-codex"
|
||||
model = "gpt-5.3-codex"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ async fn thread_start_creates_thread_and_emits_started() -> Result<()> {
|
||||
// Start a v2 thread with an explicit model override.
|
||||
let req_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1".to_string()),
|
||||
model: Some("gpt-5.2".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -414,7 +414,7 @@ async fn thread_start_ephemeral_remains_pathless() -> Result<()> {
|
||||
|
||||
let req_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.1".to_string()),
|
||||
model: Some("gpt-5.2".to_string()),
|
||||
ephemeral: Some(true),
|
||||
..Default::default()
|
||||
})
|
||||
|
||||
@@ -894,7 +894,7 @@ async fn turn_start_accepts_collaboration_mode_override_v2() -> Result<()> {
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.2-codex".to_string()),
|
||||
model: Some("gpt-5.3-codex".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -977,7 +977,7 @@ async fn turn_start_uses_thread_feature_overrides_for_collaboration_mode_instruc
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.2-codex".to_string()),
|
||||
model: Some("gpt-5.3-codex".to_string()),
|
||||
config: Some(HashMap::from([(
|
||||
"features.default_mode_request_user_input".to_string(),
|
||||
json!(true),
|
||||
@@ -1268,7 +1268,7 @@ async fn turn_start_uses_migrated_pragmatic_personality_without_override_v2() ->
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.2-codex".to_string()),
|
||||
model: Some("gpt-5.3-codex".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -2033,7 +2033,7 @@ async fn turn_start_emits_spawn_agent_item_with_model_metadata_v2() -> Result<()
|
||||
const CHILD_PROMPT: &str = "child: do work";
|
||||
const PARENT_PROMPT: &str = "spawn a child and continue";
|
||||
const SPAWN_CALL_ID: &str = "spawn-call-1";
|
||||
const REQUESTED_MODEL: &str = "gpt-5.1";
|
||||
const REQUESTED_MODEL: &str = "gpt-5.2";
|
||||
const REQUESTED_REASONING_EFFORT: ReasoningEffort = ReasoningEffort::Low;
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
@@ -2088,7 +2088,7 @@ async fn turn_start_emits_spawn_agent_item_with_model_metadata_v2() -> Result<()
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.2-codex".to_string()),
|
||||
model: Some("gpt-5.3-codex".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -2227,9 +2227,9 @@ async fn turn_start_emits_spawn_agent_item_with_effective_role_model_metadata_v2
|
||||
const CHILD_PROMPT: &str = "child: do work";
|
||||
const PARENT_PROMPT: &str = "spawn a child and continue";
|
||||
const SPAWN_CALL_ID: &str = "spawn-call-1";
|
||||
const REQUESTED_MODEL: &str = "gpt-5.1";
|
||||
const REQUESTED_MODEL: &str = "gpt-5.2";
|
||||
const REQUESTED_REASONING_EFFORT: ReasoningEffort = ReasoningEffort::Low;
|
||||
const ROLE_MODEL: &str = "gpt-5.1-codex-max";
|
||||
const ROLE_MODEL: &str = "gpt-5.4";
|
||||
const ROLE_REASONING_EFFORT: ReasoningEffort = ReasoningEffort::High;
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
@@ -2302,7 +2302,7 @@ config_file = "./custom-role.toml"
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("gpt-5.2-codex".to_string()),
|
||||
model: Some("gpt-5.3-codex".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
|
||||
Reference in New Issue
Block a user