mirror of
https://github.com/openai/codex.git
synced 2026-04-29 00:55:38 +00:00
Use model catalog default for reasoning summary fallback (#12873)
## Summary - make `Config.model_reasoning_summary` optional so unset means use model default - resolve the optional config value to a concrete summary when building `TurnContext` - add protocol support for `default_reasoning_summary` in model metadata ## Validation - `cargo test -p codex-core --lib client::tests -- --nocapture` --------- Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
@@ -31,6 +31,7 @@ use codex_protocol::models::ReasoningItemContent;
|
||||
use codex_protocol::models::ReasoningItemReasoningSummary;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::models::WebSearchAction;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::Op;
|
||||
@@ -980,7 +981,9 @@ async fn user_turn_collaboration_mode_overrides_model_and_effort() -> anyhow::Re
|
||||
sandbox_policy: config.permissions.sandbox_policy.get().clone(),
|
||||
model: session_configured.model.clone(),
|
||||
effort: Some(ReasoningEffort::Low),
|
||||
summary: config.model_reasoning_summary,
|
||||
summary: config
|
||||
.model_reasoning_summary
|
||||
.unwrap_or(ReasoningSummary::Auto),
|
||||
collaboration_mode: Some(collaboration_mode),
|
||||
final_output_json_schema: None,
|
||||
personality: None,
|
||||
@@ -1014,7 +1017,7 @@ async fn configured_reasoning_summary_is_sent() -> anyhow::Result<()> {
|
||||
.await;
|
||||
let TestCodex { codex, .. } = test_codex()
|
||||
.with_config(|config| {
|
||||
config.model_reasoning_summary = ReasoningSummary::Concise;
|
||||
config.model_reasoning_summary = Some(ReasoningSummary::Concise);
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
@@ -1058,7 +1061,7 @@ async fn reasoning_summary_is_omitted_when_disabled() -> anyhow::Result<()> {
|
||||
.await;
|
||||
let TestCodex { codex, .. } = test_codex()
|
||||
.with_config(|config| {
|
||||
config.model_reasoning_summary = ReasoningSummary::None;
|
||||
config.model_reasoning_summary = Some(ReasoningSummary::None);
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
@@ -1089,6 +1092,60 @@ async fn reasoning_summary_is_omitted_when_disabled() -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn reasoning_summary_none_overrides_model_catalog_default() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let resp_mock = mount_sse_once(
|
||||
&server,
|
||||
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
|
||||
)
|
||||
.await;
|
||||
|
||||
let mut model_catalog: ModelsResponse =
|
||||
serde_json::from_str(include_str!("../../models.json")).expect("valid models.json");
|
||||
let model = model_catalog
|
||||
.models
|
||||
.iter_mut()
|
||||
.find(|model| model.slug == "gpt-5.1")
|
||||
.expect("gpt-5.1 exists in bundled models.json");
|
||||
model.supports_reasoning_summaries = true;
|
||||
model.default_reasoning_summary = ReasoningSummary::Detailed;
|
||||
|
||||
let TestCodex { codex, .. } = test_codex()
|
||||
.with_model("gpt-5.1")
|
||||
.with_config(move |config| {
|
||||
config.model_reasoning_summary = Some(ReasoningSummary::None);
|
||||
config.model_catalog = Some(model_catalog);
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello".into(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let request_body = resp_mock.single_request().body_json();
|
||||
pretty_assertions::assert_eq!(
|
||||
request_body
|
||||
.get("reasoning")
|
||||
.and_then(|reasoning| reasoning.get("summary")),
|
||||
None
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn includes_default_verbosity_in_request() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
@@ -1441,7 +1498,14 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
|
||||
});
|
||||
|
||||
let mut stream = client_session
|
||||
.stream(&prompt, &model_info, &otel_manager, effort, summary, None)
|
||||
.stream(
|
||||
&prompt,
|
||||
&model_info,
|
||||
&otel_manager,
|
||||
effort,
|
||||
summary.unwrap_or(ReasoningSummary::Auto),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.expect("responses stream to start");
|
||||
|
||||
|
||||
@@ -169,7 +169,10 @@ async fn collaboration_instructions_added_on_user_turn() -> Result<()> {
|
||||
sandbox_policy: test.config.permissions.sandbox_policy.get().clone(),
|
||||
model: test.session_configured.model.clone(),
|
||||
effort: None,
|
||||
summary: test.config.model_reasoning_summary,
|
||||
summary: test
|
||||
.config
|
||||
.model_reasoning_summary
|
||||
.unwrap_or(codex_protocol::config_types::ReasoningSummary::Auto),
|
||||
collaboration_mode: Some(collaboration_mode),
|
||||
final_output_json_schema: None,
|
||||
personality: None,
|
||||
@@ -275,7 +278,10 @@ async fn user_turn_overrides_collaboration_instructions_after_override() -> Resu
|
||||
sandbox_policy: test.config.permissions.sandbox_policy.get().clone(),
|
||||
model: test.session_configured.model.clone(),
|
||||
effort: None,
|
||||
summary: test.config.model_reasoning_summary,
|
||||
summary: test
|
||||
.config
|
||||
.model_reasoning_summary
|
||||
.unwrap_or(codex_protocol::config_types::ReasoningSummary::Auto),
|
||||
collaboration_mode: Some(turn_mode),
|
||||
final_output_json_schema: None,
|
||||
personality: None,
|
||||
|
||||
@@ -234,6 +234,7 @@ async fn model_change_from_image_to_text_strips_prior_image_content() -> Result<
|
||||
base_instructions: "base instructions".to_string(),
|
||||
model_messages: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_summary: ReasoningSummary::Auto,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
@@ -391,6 +392,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
|
||||
base_instructions: "base instructions".to_string(),
|
||||
model_messages: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_summary: ReasoningSummary::Auto,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
|
||||
@@ -336,6 +336,7 @@ fn test_remote_model(slug: &str, priority: i32) -> ModelInfo {
|
||||
base_instructions: "base instructions".to_string(),
|
||||
model_messages: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_summary: ReasoningSummary::Auto,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
|
||||
@@ -599,6 +599,7 @@ async fn remote_model_friendly_personality_instructions_with_feature() -> anyhow
|
||||
}),
|
||||
}),
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_summary: ReasoningSummary::Auto,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
@@ -706,6 +707,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -
|
||||
}),
|
||||
}),
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_summary: ReasoningSummary::Auto,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
|
||||
@@ -761,7 +761,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
|
||||
sandbox_policy: default_sandbox_policy.clone(),
|
||||
model: default_model.clone(),
|
||||
effort: default_effort,
|
||||
summary: default_summary,
|
||||
summary: default_summary.unwrap_or(ReasoningSummary::Auto),
|
||||
collaboration_mode: None,
|
||||
final_output_json_schema: None,
|
||||
personality: None,
|
||||
@@ -780,7 +780,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
|
||||
sandbox_policy: default_sandbox_policy.clone(),
|
||||
model: default_model.clone(),
|
||||
effort: default_effort,
|
||||
summary: default_summary,
|
||||
summary: default_summary.unwrap_or(ReasoningSummary::Auto),
|
||||
collaboration_mode: None,
|
||||
final_output_json_schema: None,
|
||||
personality: None,
|
||||
@@ -875,7 +875,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
|
||||
sandbox_policy: default_sandbox_policy.clone(),
|
||||
model: default_model,
|
||||
effort: default_effort,
|
||||
summary: default_summary,
|
||||
summary: default_summary.unwrap_or(ReasoningSummary::Auto),
|
||||
collaboration_mode: None,
|
||||
final_output_json_schema: None,
|
||||
personality: None,
|
||||
|
||||
@@ -175,7 +175,9 @@ async fn remote_models_long_model_slug_is_sent_with_high_reasoning() -> Result<(
|
||||
sandbox_policy: config.permissions.sandbox_policy.get().clone(),
|
||||
model: requested_model.to_string(),
|
||||
effort: None,
|
||||
summary: config.model_reasoning_summary,
|
||||
summary: config
|
||||
.model_reasoning_summary
|
||||
.unwrap_or(ReasoningSummary::Auto),
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
@@ -227,7 +229,9 @@ async fn namespaced_model_slug_uses_catalog_metadata_without_fallback_warning()
|
||||
sandbox_policy: config.permissions.sandbox_policy.get().clone(),
|
||||
model: requested_model.to_string(),
|
||||
effort: None,
|
||||
summary: config.model_reasoning_summary,
|
||||
summary: config
|
||||
.model_reasoning_summary
|
||||
.unwrap_or(ReasoningSummary::Auto),
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
@@ -284,6 +288,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
|
||||
base_instructions: "base instructions".to_string(),
|
||||
model_messages: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_summary: ReasoningSummary::Auto,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
@@ -520,6 +525,7 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
|
||||
base_instructions: remote_base.to_string(),
|
||||
model_messages: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_summary: ReasoningSummary::Auto,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
@@ -980,6 +986,7 @@ fn test_remote_model_with_policy(
|
||||
base_instructions: "base instructions".to_string(),
|
||||
model_messages: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_summary: ReasoningSummary::Auto,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
|
||||
@@ -4,6 +4,7 @@ use codex_core::CodexAuth;
|
||||
use codex_core::NewThread;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::config_types::ModeKind;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::protocol::ResumedHistory;
|
||||
@@ -34,7 +35,9 @@ fn resume_history(
|
||||
personality: None,
|
||||
collaboration_mode: None,
|
||||
effort: config.model_reasoning_effort,
|
||||
summary: config.model_reasoning_summary,
|
||||
summary: config
|
||||
.model_reasoning_summary
|
||||
.unwrap_or(ReasoningSummary::Auto),
|
||||
user_instructions: None,
|
||||
developer_instructions: None,
|
||||
final_output_json_schema: None,
|
||||
|
||||
@@ -398,6 +398,7 @@ async fn stdio_image_responses_are_sanitized_for_text_only_model() -> anyhow::Re
|
||||
base_instructions: "base instructions".to_string(),
|
||||
model_messages: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_summary: ReasoningSummary::Auto,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
|
||||
@@ -675,6 +675,7 @@ async fn view_image_tool_returns_unsupported_message_for_text_only_model() -> an
|
||||
base_instructions: "base instructions".to_string(),
|
||||
model_messages: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_summary: ReasoningSummary::Auto,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
|
||||
Reference in New Issue
Block a user