mirror of
https://github.com/openai/codex.git
synced 2026-05-01 01:47:18 +00:00
Update models.json (#18586)
- Replace the active models-manager catalog with the deleted core catalog contents. - Replace stale hardcoded test model slugs with current bundled model slugs. - Keep this as a stacked change on top of the cleanup PR.
This commit is contained in:
@@ -537,7 +537,7 @@ async fn resume_replays_legacy_js_repl_image_rollout_shapes() {
|
||||
.await;
|
||||
|
||||
let codex_home = Arc::new(TempDir::new().unwrap());
|
||||
let mut builder = test_codex().with_model("gpt-5.1");
|
||||
let mut builder = test_codex().with_model("gpt-5.4");
|
||||
let test = builder
|
||||
.resume(&server, codex_home, session_path.clone())
|
||||
.await
|
||||
@@ -688,7 +688,7 @@ async fn resume_replays_image_tool_outputs_with_detail() {
|
||||
.await;
|
||||
|
||||
let codex_home = Arc::new(TempDir::new().unwrap());
|
||||
let mut builder = test_codex().with_model("gpt-5.1");
|
||||
let mut builder = test_codex().with_model("gpt-5.4");
|
||||
let test = builder
|
||||
.resume(&server, codex_home, session_path.clone())
|
||||
.await
|
||||
@@ -1493,7 +1493,7 @@ async fn includes_configured_effort_in_request() -> anyhow::Result<()> {
|
||||
)
|
||||
.await;
|
||||
let TestCodex { codex, .. } = test_codex()
|
||||
.with_model("gpt-5.1-codex")
|
||||
.with_model("gpt-5.4")
|
||||
.with_config(|config| {
|
||||
config.model_reasoning_effort = Some(ReasoningEffort::Medium);
|
||||
})
|
||||
@@ -1538,10 +1538,7 @@ async fn includes_no_effort_in_request() -> anyhow::Result<()> {
|
||||
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
|
||||
)
|
||||
.await;
|
||||
let TestCodex { codex, .. } = test_codex()
|
||||
.with_model("gpt-5.1-codex")
|
||||
.build(&server)
|
||||
.await?;
|
||||
let TestCodex { codex, .. } = test_codex().with_model("gpt-5.4").build(&server).await?;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
@@ -1582,7 +1579,7 @@ async fn includes_default_reasoning_effort_in_request_when_defined_by_model_info
|
||||
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
|
||||
)
|
||||
.await;
|
||||
let TestCodex { codex, .. } = test_codex().with_model("gpt-5.1").build(&server).await?;
|
||||
let TestCodex { codex, .. } = test_codex().with_model("gpt-5.4").build(&server).await?;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
@@ -1627,15 +1624,12 @@ async fn user_turn_collaboration_mode_overrides_model_and_effort() -> anyhow::Re
|
||||
config,
|
||||
session_configured,
|
||||
..
|
||||
} = test_codex()
|
||||
.with_model("gpt-5.1-codex")
|
||||
.build(&server)
|
||||
.await?;
|
||||
} = test_codex().with_model("gpt-5.4").build(&server).await?;
|
||||
|
||||
let collaboration_mode = CollaborationMode {
|
||||
mode: ModeKind::Default,
|
||||
settings: Settings {
|
||||
model: "gpt-5.1".to_string(),
|
||||
model: "gpt-5.4".to_string(),
|
||||
reasoning_effort: Some(ReasoningEffort::High),
|
||||
developer_instructions: None,
|
||||
},
|
||||
@@ -1668,7 +1662,7 @@ async fn user_turn_collaboration_mode_overrides_model_and_effort() -> anyhow::Re
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let request_body = resp_mock.single_request().body_json();
|
||||
assert_eq!(request_body["model"].as_str(), Some("gpt-5.1"));
|
||||
assert_eq!(request_body["model"].as_str(), Some("gpt-5.4"));
|
||||
assert_eq!(
|
||||
request_body
|
||||
.get("reasoning")
|
||||
@@ -1742,8 +1736,8 @@ async fn user_turn_explicit_reasoning_summary_overrides_model_catalog_default()
|
||||
let model = model_catalog
|
||||
.models
|
||||
.iter_mut()
|
||||
.find(|model| model.slug == "gpt-5.1")
|
||||
.expect("gpt-5.1 exists in bundled models.json");
|
||||
.find(|model| model.slug == "gpt-5.4")
|
||||
.expect("gpt-5.4 exists in bundled models.json");
|
||||
model.supports_reasoning_summaries = true;
|
||||
model.default_reasoning_summary = ReasoningSummary::Detailed;
|
||||
|
||||
@@ -1753,7 +1747,7 @@ async fn user_turn_explicit_reasoning_summary_overrides_model_catalog_default()
|
||||
session_configured,
|
||||
..
|
||||
} = test_codex()
|
||||
.with_model("gpt-5.1")
|
||||
.with_model("gpt-5.4")
|
||||
.with_config(move |config| {
|
||||
config.model_catalog = Some(model_catalog);
|
||||
})
|
||||
@@ -1856,13 +1850,13 @@ async fn reasoning_summary_none_overrides_model_catalog_default() -> anyhow::Res
|
||||
let model = model_catalog
|
||||
.models
|
||||
.iter_mut()
|
||||
.find(|model| model.slug == "gpt-5.1")
|
||||
.expect("gpt-5.1 exists in bundled models.json");
|
||||
.find(|model| model.slug == "gpt-5.4")
|
||||
.expect("gpt-5.4 exists in bundled models.json");
|
||||
model.supports_reasoning_summaries = true;
|
||||
model.default_reasoning_summary = ReasoningSummary::Detailed;
|
||||
|
||||
let TestCodex { codex, .. } = test_codex()
|
||||
.with_model("gpt-5.1")
|
||||
.with_model("gpt-5.4")
|
||||
.with_config(move |config| {
|
||||
config.model_reasoning_summary = Some(ReasoningSummary::None);
|
||||
config.model_catalog = Some(model_catalog);
|
||||
@@ -1905,7 +1899,7 @@ async fn includes_default_verbosity_in_request() -> anyhow::Result<()> {
|
||||
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
|
||||
)
|
||||
.await;
|
||||
let TestCodex { codex, .. } = test_codex().with_model("gpt-5.1").build(&server).await?;
|
||||
let TestCodex { codex, .. } = test_codex().with_model("gpt-5.4").build(&server).await?;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
@@ -1946,7 +1940,7 @@ async fn configured_verbosity_not_sent_for_models_without_support() -> anyhow::R
|
||||
)
|
||||
.await;
|
||||
let TestCodex { codex, .. } = test_codex()
|
||||
.with_model("gpt-5.1-codex")
|
||||
.with_model("test-no-verbosity")
|
||||
.with_config(|config| {
|
||||
config.model_verbosity = Some(Verbosity::High);
|
||||
})
|
||||
@@ -1991,7 +1985,7 @@ async fn configured_verbosity_is_sent() -> anyhow::Result<()> {
|
||||
)
|
||||
.await;
|
||||
let TestCodex { codex, .. } = test_codex()
|
||||
.with_model("gpt-5.1")
|
||||
.with_model("gpt-5.4")
|
||||
.with_config(|config| {
|
||||
config.model_verbosity = Some(Verbosity::High);
|
||||
})
|
||||
@@ -2412,7 +2406,7 @@ async fn token_count_includes_rate_limits_snapshot() {
|
||||
"reasoning_output_tokens": 0,
|
||||
"total_tokens": 123
|
||||
},
|
||||
// Default model is gpt-5.1-codex-max in tests → 95% usable context window
|
||||
// Default model is gpt-5.4 in tests → 95% usable context window
|
||||
"model_context_window": 258400
|
||||
},
|
||||
"rate_limits": {
|
||||
@@ -2577,7 +2571,7 @@ async fn context_window_error_sets_total_tokens_to_model_window() -> anyhow::Res
|
||||
|
||||
let TestCodex { codex, .. } = test_codex()
|
||||
.with_config(|config| {
|
||||
config.model = Some("gpt-5.1".to_string());
|
||||
config.model = Some("gpt-5.4".to_string());
|
||||
config.model_context_window = Some(272_000);
|
||||
})
|
||||
.build(&server)
|
||||
|
||||
Reference in New Issue
Block a user