mirror of
https://github.com/openai/codex.git
synced 2026-02-01 22:47:52 +00:00
Remove reasoning format (#8484)
This isn't very useful parameter. logic: ``` if model puts `**` in their reasoning, trim it and visualize the header. if couldn't trim: don't render if model doesn't support: don't render ``` We can simplify to: ``` if could trim, visualize header. if not, don't render ```
This commit is contained in:
@@ -5,7 +5,6 @@ use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
@@ -35,7 +34,6 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,7 +227,6 @@ mod tests {
|
||||
"truncation_policy": {"mode": "bytes", "limit": 10_000},
|
||||
"supports_parallel_tool_calls": false,
|
||||
"context_window": null,
|
||||
"reasoning_summary_format": "none",
|
||||
"experimental_supported_tools": [],
|
||||
}))
|
||||
.unwrap(),
|
||||
|
||||
@@ -10,7 +10,6 @@ use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use http::HeaderMap;
|
||||
use http::Method;
|
||||
@@ -85,7 +84,6 @@ async fn models_client_hits_models_endpoint() {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}],
|
||||
etag: String::new(),
|
||||
|
||||
@@ -38,7 +38,6 @@ use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::TrustLevel;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_rmcp_client::OAuthCredentialsStoreMode;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_absolute_path::AbsolutePathBufGuard;
|
||||
@@ -303,9 +302,6 @@ pub struct Config {
|
||||
/// Optional override to force-enable reasoning summaries for the configured model.
|
||||
pub model_supports_reasoning_summaries: Option<bool>,
|
||||
|
||||
/// Optional override to force reasoning summary format for the configured model.
|
||||
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
|
||||
|
||||
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
|
||||
@@ -786,9 +782,6 @@ pub struct ConfigToml {
|
||||
/// Override to force-enable reasoning summaries for the configured model.
|
||||
pub model_supports_reasoning_summaries: Option<bool>,
|
||||
|
||||
/// Override to force reasoning summary format for the configured model.
|
||||
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
|
||||
|
||||
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
|
||||
@@ -1379,7 +1372,6 @@ impl Config {
|
||||
.or(cfg.model_reasoning_summary)
|
||||
.unwrap_or_default(),
|
||||
model_supports_reasoning_summaries: cfg.model_supports_reasoning_summaries,
|
||||
model_reasoning_summary_format: cfg.model_reasoning_summary_format.clone(),
|
||||
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
|
||||
chatgpt_base_url: config_profile
|
||||
.chatgpt_base_url
|
||||
@@ -3189,7 +3181,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3273,7 +3264,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: None,
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3372,7 +3362,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: None,
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3457,7 +3446,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: Some(Verbosity::High),
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
|
||||
@@ -354,7 +354,6 @@ mod tests {
|
||||
"truncation_policy": {"mode": "bytes", "limit": 10_000},
|
||||
"supports_parallel_tool_calls": false,
|
||||
"context_window": null,
|
||||
"reasoning_summary_format": "none",
|
||||
"experimental_supported_tools": [],
|
||||
}))
|
||||
.expect("valid model")
|
||||
|
||||
@@ -3,7 +3,6 @@ use codex_protocol::openai_models::ApplyPatchToolType;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
@@ -48,9 +47,6 @@ pub struct ModelFamily {
|
||||
// The reasoning effort to use for this model family when none is explicitly chosen.
|
||||
pub default_reasoning_effort: Option<ReasoningEffort>,
|
||||
|
||||
// Define if we need a special handling of reasoning summary
|
||||
pub reasoning_summary_format: ReasoningSummaryFormat,
|
||||
|
||||
/// Whether this model supports parallel tool calls when using the
|
||||
/// Responses API.
|
||||
pub supports_parallel_tool_calls: bool,
|
||||
@@ -88,9 +84,6 @@ impl ModelFamily {
|
||||
if let Some(supports_reasoning_summaries) = config.model_supports_reasoning_summaries {
|
||||
self.supports_reasoning_summaries = supports_reasoning_summaries;
|
||||
}
|
||||
if let Some(reasoning_summary_format) = config.model_reasoning_summary_format.as_ref() {
|
||||
self.reasoning_summary_format = reasoning_summary_format.clone();
|
||||
}
|
||||
if let Some(context_window) = config.model_context_window {
|
||||
self.context_window = Some(context_window);
|
||||
}
|
||||
@@ -128,7 +121,6 @@ impl ModelFamily {
|
||||
truncation_policy,
|
||||
supports_parallel_tool_calls,
|
||||
context_window,
|
||||
reasoning_summary_format,
|
||||
experimental_supported_tools,
|
||||
} = model;
|
||||
|
||||
@@ -144,7 +136,6 @@ impl ModelFamily {
|
||||
self.truncation_policy = truncation_policy.into();
|
||||
self.supports_parallel_tool_calls = supports_parallel_tool_calls;
|
||||
self.context_window = context_window;
|
||||
self.reasoning_summary_format = reasoning_summary_format;
|
||||
self.experimental_supported_tools = experimental_supported_tools;
|
||||
}
|
||||
|
||||
@@ -175,7 +166,6 @@ macro_rules! model_family {
|
||||
context_window: Some(CONTEXT_WINDOW_272K),
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
||||
@@ -250,7 +240,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
||||
experimental_supported_tools: vec![
|
||||
"grep_files".to_string(),
|
||||
@@ -270,7 +259,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -299,7 +287,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -312,7 +299,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -325,7 +311,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -341,7 +326,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -416,7 +400,6 @@ fn derive_default_model_family(model: &str) -> ModelFamily {
|
||||
context_window: None,
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
||||
@@ -460,7 +443,6 @@ mod tests {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}
|
||||
}
|
||||
@@ -524,7 +506,6 @@ mod tests {
|
||||
experimental_supported_tools: vec!["local".to_string()],
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
context_window: Some(100),
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
);
|
||||
|
||||
let updated = family.with_remote_overrides(vec![ModelInfo {
|
||||
@@ -549,7 +530,6 @@ mod tests {
|
||||
truncation_policy: TruncationPolicyConfig::tokens(2_000),
|
||||
supports_parallel_tool_calls: true,
|
||||
context_window: Some(400_000),
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
experimental_supported_tools: vec!["alpha".to_string(), "beta".to_string()],
|
||||
}]);
|
||||
|
||||
@@ -568,10 +548,6 @@ mod tests {
|
||||
assert_eq!(updated.truncation_policy, TruncationPolicy::Tokens(2_000));
|
||||
assert!(updated.supports_parallel_tool_calls);
|
||||
assert_eq!(updated.context_window, Some(400_000));
|
||||
assert_eq!(
|
||||
updated.reasoning_summary_format,
|
||||
ReasoningSummaryFormat::Experimental
|
||||
);
|
||||
assert_eq!(
|
||||
updated.experimental_supported_tools,
|
||||
vec!["alpha".to_string(), "beta".to_string()]
|
||||
|
||||
@@ -14,7 +14,6 @@ use codex_core::models_manager::manager::ModelsManager;
|
||||
use codex_otel::otel_manager::OtelManager;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_protocol::protocol::SubAgentSource;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
@@ -246,7 +245,6 @@ async fn responses_respects_model_family_overrides_from_config() {
|
||||
config.model_provider_id = provider.name.clone();
|
||||
config.model_provider = provider.clone();
|
||||
config.model_supports_reasoning_summaries = Some(true);
|
||||
config.model_reasoning_summary_format = Some(ReasoningSummaryFormat::Experimental);
|
||||
config.model_reasoning_summary = ReasoningSummary::Detailed;
|
||||
let effort = config.model_reasoning_effort;
|
||||
let summary = config.model_reasoning_summary;
|
||||
|
||||
@@ -24,7 +24,6 @@ use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
@@ -83,7 +82,6 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
};
|
||||
|
||||
@@ -222,7 +220,6 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
};
|
||||
mount_models_once(
|
||||
@@ -486,7 +483,6 @@ fn test_remote_model(slug: &str, visibility: ModelVisibility, priority: i32) ->
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,14 +125,6 @@ pub enum ApplyPatchToolType {
|
||||
Function,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Eq, Default, Hash, TS, JsonSchema, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ReasoningSummaryFormat {
|
||||
#[default]
|
||||
None,
|
||||
Experimental,
|
||||
}
|
||||
|
||||
/// Server-provided truncation policy metadata for a model.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
@@ -188,7 +180,6 @@ pub struct ModelInfo {
|
||||
pub truncation_policy: TruncationPolicyConfig,
|
||||
pub supports_parallel_tool_calls: bool,
|
||||
pub context_window: Option<i64>,
|
||||
pub reasoning_summary_format: ReasoningSummaryFormat,
|
||||
pub experimental_supported_tools: Vec<String>,
|
||||
}
|
||||
|
||||
|
||||
@@ -537,14 +537,11 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
fn on_agent_reasoning_final(&mut self) {
|
||||
let reasoning_summary_format = self.get_model_family().reasoning_summary_format;
|
||||
// At the end of a reasoning block, record transcript-only content.
|
||||
self.full_reasoning_buffer.push_str(&self.reasoning_buffer);
|
||||
if !self.full_reasoning_buffer.is_empty() {
|
||||
let cell = history_cell::new_reasoning_summary_block(
|
||||
self.full_reasoning_buffer.clone(),
|
||||
reasoning_summary_format,
|
||||
);
|
||||
let cell =
|
||||
history_cell::new_reasoning_summary_block(self.full_reasoning_buffer.clone());
|
||||
self.add_boxed_history(cell);
|
||||
}
|
||||
self.reasoning_buffer.clear();
|
||||
|
||||
@@ -33,7 +33,6 @@ use codex_core::protocol::McpAuthStatus;
|
||||
use codex_core::protocol::McpInvocation;
|
||||
use codex_core::protocol::SessionConfiguredEvent;
|
||||
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::plan_tool::PlanItemArg;
|
||||
use codex_protocol::plan_tool::StepStatus;
|
||||
use codex_protocol::plan_tool::UpdatePlanArgs;
|
||||
@@ -1661,39 +1660,28 @@ pub(crate) fn new_view_image_tool_call(path: PathBuf, cwd: &Path) -> PlainHistor
|
||||
PlainHistoryCell { lines }
|
||||
}
|
||||
|
||||
pub(crate) fn new_reasoning_summary_block(
|
||||
full_reasoning_buffer: String,
|
||||
reasoning_summary_format: ReasoningSummaryFormat,
|
||||
) -> Box<dyn HistoryCell> {
|
||||
if reasoning_summary_format == ReasoningSummaryFormat::Experimental {
|
||||
// Experimental format is following:
|
||||
// ** header **
|
||||
//
|
||||
// reasoning summary
|
||||
//
|
||||
// So we need to strip header from reasoning summary
|
||||
let full_reasoning_buffer = full_reasoning_buffer.trim();
|
||||
if let Some(open) = full_reasoning_buffer.find("**") {
|
||||
let after_open = &full_reasoning_buffer[(open + 2)..];
|
||||
if let Some(close) = after_open.find("**") {
|
||||
let after_close_idx = open + 2 + close + 2;
|
||||
// if we don't have anything beyond `after_close_idx`
|
||||
// then we don't have a summary to inject into history
|
||||
if after_close_idx < full_reasoning_buffer.len() {
|
||||
let header_buffer = full_reasoning_buffer[..after_close_idx].to_string();
|
||||
let summary_buffer = full_reasoning_buffer[after_close_idx..].to_string();
|
||||
return Box::new(ReasoningSummaryCell::new(
|
||||
header_buffer,
|
||||
summary_buffer,
|
||||
false,
|
||||
));
|
||||
}
|
||||
pub(crate) fn new_reasoning_summary_block(full_reasoning_buffer: String) -> Box<dyn HistoryCell> {
|
||||
let full_reasoning_buffer = full_reasoning_buffer.trim();
|
||||
if let Some(open) = full_reasoning_buffer.find("**") {
|
||||
let after_open = &full_reasoning_buffer[(open + 2)..];
|
||||
if let Some(close) = after_open.find("**") {
|
||||
let after_close_idx = open + 2 + close + 2;
|
||||
// if we don't have anything beyond `after_close_idx`
|
||||
// then we don't have a summary to inject into history
|
||||
if after_close_idx < full_reasoning_buffer.len() {
|
||||
let header_buffer = full_reasoning_buffer[..after_close_idx].to_string();
|
||||
let summary_buffer = full_reasoning_buffer[after_close_idx..].to_string();
|
||||
return Box::new(ReasoningSummaryCell::new(
|
||||
header_buffer,
|
||||
summary_buffer,
|
||||
false,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
Box::new(ReasoningSummaryCell::new(
|
||||
"".to_string(),
|
||||
full_reasoning_buffer,
|
||||
full_reasoning_buffer.to_string(),
|
||||
true,
|
||||
))
|
||||
}
|
||||
@@ -1759,7 +1747,6 @@ mod tests {
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use codex_core::config::types::McpServerConfig;
|
||||
use codex_core::config::types::McpServerTransportConfig;
|
||||
use codex_core::models_manager::manager::ModelsManager;
|
||||
use codex_core::protocol::McpAuthStatus;
|
||||
use codex_protocol::parse_command::ParsedCommand;
|
||||
use dirs::home_dir;
|
||||
@@ -2672,10 +2659,8 @@ mod tests {
|
||||
}
|
||||
#[test]
|
||||
fn reasoning_summary_block() {
|
||||
let reasoning_format = ReasoningSummaryFormat::Experimental;
|
||||
let cell = new_reasoning_summary_block(
|
||||
"**High level reasoning**\n\nDetailed reasoning goes here.".to_string(),
|
||||
reasoning_format,
|
||||
);
|
||||
|
||||
let rendered_display = render_lines(&cell.display_lines(80));
|
||||
@@ -2687,11 +2672,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn reasoning_summary_block_returns_reasoning_cell_when_feature_disabled() {
|
||||
let reasoning_format = ReasoningSummaryFormat::Experimental;
|
||||
let cell = new_reasoning_summary_block(
|
||||
"Detailed reasoning goes here.".to_string(),
|
||||
reasoning_format,
|
||||
);
|
||||
let cell = new_reasoning_summary_block("Detailed reasoning goes here.".to_string());
|
||||
|
||||
let rendered = render_transcript(cell.as_ref());
|
||||
assert_eq!(rendered, vec!["• Detailed reasoning goes here."]);
|
||||
@@ -2702,17 +2683,8 @@ mod tests {
|
||||
let mut config = test_config().await;
|
||||
config.model = Some("gpt-3.5-turbo".to_string());
|
||||
config.model_supports_reasoning_summaries = Some(true);
|
||||
config.model_reasoning_summary_format = Some(ReasoningSummaryFormat::Experimental);
|
||||
let model_family =
|
||||
ModelsManager::construct_model_family_offline(&config.model.clone().unwrap(), &config);
|
||||
assert_eq!(
|
||||
model_family.reasoning_summary_format,
|
||||
ReasoningSummaryFormat::Experimental
|
||||
);
|
||||
|
||||
let cell = new_reasoning_summary_block(
|
||||
"**High level reasoning**\n\nDetailed reasoning goes here.".to_string(),
|
||||
model_family.reasoning_summary_format,
|
||||
);
|
||||
|
||||
let rendered_display = render_lines(&cell.display_lines(80));
|
||||
@@ -2721,11 +2693,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn reasoning_summary_block_falls_back_when_header_is_missing() {
|
||||
let reasoning_format = ReasoningSummaryFormat::Experimental;
|
||||
let cell = new_reasoning_summary_block(
|
||||
"**High level reasoning without closing".to_string(),
|
||||
reasoning_format,
|
||||
);
|
||||
let cell =
|
||||
new_reasoning_summary_block("**High level reasoning without closing".to_string());
|
||||
|
||||
let rendered = render_transcript(cell.as_ref());
|
||||
assert_eq!(rendered, vec!["• **High level reasoning without closing"]);
|
||||
@@ -2733,18 +2702,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn reasoning_summary_block_falls_back_when_summary_is_missing() {
|
||||
let reasoning_format = ReasoningSummaryFormat::Experimental;
|
||||
let cell = new_reasoning_summary_block(
|
||||
"**High level reasoning without closing**".to_string(),
|
||||
reasoning_format.clone(),
|
||||
);
|
||||
let cell =
|
||||
new_reasoning_summary_block("**High level reasoning without closing**".to_string());
|
||||
|
||||
let rendered = render_transcript(cell.as_ref());
|
||||
assert_eq!(rendered, vec!["• High level reasoning without closing"]);
|
||||
|
||||
let cell = new_reasoning_summary_block(
|
||||
"**High level reasoning without closing**\n\n ".to_string(),
|
||||
reasoning_format,
|
||||
);
|
||||
|
||||
let rendered = render_transcript(cell.as_ref());
|
||||
@@ -2753,10 +2718,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn reasoning_summary_block_splits_header_and_summary_when_present() {
|
||||
let reasoning_format = ReasoningSummaryFormat::Experimental;
|
||||
let cell = new_reasoning_summary_block(
|
||||
"**High level plan**\n\nWe should fix the bug next.".to_string(),
|
||||
reasoning_format,
|
||||
);
|
||||
|
||||
let rendered_display = render_lines(&cell.display_lines(80));
|
||||
|
||||
@@ -503,14 +503,11 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
fn on_agent_reasoning_final(&mut self) {
|
||||
let reasoning_summary_format = self.get_model_family().reasoning_summary_format;
|
||||
// At the end of a reasoning block, record transcript-only content.
|
||||
self.full_reasoning_buffer.push_str(&self.reasoning_buffer);
|
||||
if !self.full_reasoning_buffer.is_empty() {
|
||||
let cell = history_cell::new_reasoning_summary_block(
|
||||
self.full_reasoning_buffer.clone(),
|
||||
reasoning_summary_format,
|
||||
);
|
||||
let cell =
|
||||
history_cell::new_reasoning_summary_block(self.full_reasoning_buffer.clone());
|
||||
self.add_boxed_history(cell);
|
||||
}
|
||||
self.reasoning_buffer.clear();
|
||||
|
||||
@@ -29,7 +29,6 @@ use codex_core::protocol::McpAuthStatus;
|
||||
use codex_core::protocol::McpInvocation;
|
||||
use codex_core::protocol::SessionConfiguredEvent;
|
||||
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::plan_tool::PlanItemArg;
|
||||
use codex_protocol::plan_tool::StepStatus;
|
||||
use codex_protocol::plan_tool::UpdatePlanArgs;
|
||||
@@ -1544,39 +1543,34 @@ pub(crate) fn new_view_image_tool_call(path: PathBuf, cwd: &Path) -> PlainHistor
|
||||
PlainHistoryCell { lines }
|
||||
}
|
||||
|
||||
pub(crate) fn new_reasoning_summary_block(
|
||||
full_reasoning_buffer: String,
|
||||
reasoning_summary_format: ReasoningSummaryFormat,
|
||||
) -> Box<dyn HistoryCell> {
|
||||
if reasoning_summary_format == ReasoningSummaryFormat::Experimental {
|
||||
// Experimental format is following:
|
||||
// ** header **
|
||||
//
|
||||
// reasoning summary
|
||||
//
|
||||
// So we need to strip header from reasoning summary
|
||||
let full_reasoning_buffer = full_reasoning_buffer.trim();
|
||||
if let Some(open) = full_reasoning_buffer.find("**") {
|
||||
let after_open = &full_reasoning_buffer[(open + 2)..];
|
||||
if let Some(close) = after_open.find("**") {
|
||||
let after_close_idx = open + 2 + close + 2;
|
||||
// if we don't have anything beyond `after_close_idx`
|
||||
// then we don't have a summary to inject into history
|
||||
if after_close_idx < full_reasoning_buffer.len() {
|
||||
let header_buffer = full_reasoning_buffer[..after_close_idx].to_string();
|
||||
let summary_buffer = full_reasoning_buffer[after_close_idx..].to_string();
|
||||
return Box::new(ReasoningSummaryCell::new(
|
||||
header_buffer,
|
||||
summary_buffer,
|
||||
false,
|
||||
));
|
||||
}
|
||||
pub(crate) fn new_reasoning_summary_block(full_reasoning_buffer: String) -> Box<dyn HistoryCell> {
|
||||
// Experimental format is following:
|
||||
// ** header **
|
||||
//
|
||||
// reasoning summary
|
||||
//
|
||||
// So we need to strip header from reasoning summary
|
||||
let full_reasoning_buffer = full_reasoning_buffer.trim();
|
||||
if let Some(open) = full_reasoning_buffer.find("**") {
|
||||
let after_open = &full_reasoning_buffer[(open + 2)..];
|
||||
if let Some(close) = after_open.find("**") {
|
||||
let after_close_idx = open + 2 + close + 2;
|
||||
// if we don't have anything beyond `after_close_idx`
|
||||
// then we don't have a summary to inject into history
|
||||
if after_close_idx < full_reasoning_buffer.len() {
|
||||
let header_buffer = full_reasoning_buffer[..after_close_idx].to_string();
|
||||
let summary_buffer = full_reasoning_buffer[after_close_idx..].to_string();
|
||||
return Box::new(ReasoningSummaryCell::new(
|
||||
header_buffer,
|
||||
summary_buffer,
|
||||
false,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
Box::new(ReasoningSummaryCell::new(
|
||||
"".to_string(),
|
||||
full_reasoning_buffer,
|
||||
full_reasoning_buffer.to_string(),
|
||||
true,
|
||||
))
|
||||
}
|
||||
@@ -1642,7 +1636,6 @@ mod tests {
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use codex_core::config::types::McpServerConfig;
|
||||
use codex_core::config::types::McpServerTransportConfig;
|
||||
use codex_core::models_manager::manager::ModelsManager;
|
||||
use codex_core::protocol::McpAuthStatus;
|
||||
use codex_protocol::parse_command::ParsedCommand;
|
||||
use dirs::home_dir;
|
||||
@@ -2489,10 +2482,8 @@ mod tests {
|
||||
}
|
||||
#[test]
|
||||
fn reasoning_summary_block() {
|
||||
let reasoning_format = ReasoningSummaryFormat::Experimental;
|
||||
let cell = new_reasoning_summary_block(
|
||||
"**High level reasoning**\n\nDetailed reasoning goes here.".to_string(),
|
||||
reasoning_format,
|
||||
);
|
||||
|
||||
let rendered_display = render_lines(&cell.display_lines(80));
|
||||
@@ -2504,11 +2495,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn reasoning_summary_block_returns_reasoning_cell_when_feature_disabled() {
|
||||
let reasoning_format = ReasoningSummaryFormat::Experimental;
|
||||
let cell = new_reasoning_summary_block(
|
||||
"Detailed reasoning goes here.".to_string(),
|
||||
reasoning_format,
|
||||
);
|
||||
let cell = new_reasoning_summary_block("Detailed reasoning goes here.".to_string());
|
||||
|
||||
let rendered = render_transcript(cell.as_ref());
|
||||
assert_eq!(rendered, vec!["• Detailed reasoning goes here."]);
|
||||
@@ -2519,17 +2506,9 @@ mod tests {
|
||||
let mut config = test_config().await;
|
||||
config.model = Some("gpt-3.5-turbo".to_string());
|
||||
config.model_supports_reasoning_summaries = Some(true);
|
||||
config.model_reasoning_summary_format = Some(ReasoningSummaryFormat::Experimental);
|
||||
let model_family =
|
||||
ModelsManager::construct_model_family_offline(&config.model.clone().unwrap(), &config);
|
||||
assert_eq!(
|
||||
model_family.reasoning_summary_format,
|
||||
ReasoningSummaryFormat::Experimental
|
||||
);
|
||||
|
||||
let cell = new_reasoning_summary_block(
|
||||
"**High level reasoning**\n\nDetailed reasoning goes here.".to_string(),
|
||||
model_family.reasoning_summary_format,
|
||||
);
|
||||
|
||||
let rendered_display = render_lines(&cell.display_lines(80));
|
||||
@@ -2538,11 +2517,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn reasoning_summary_block_falls_back_when_header_is_missing() {
|
||||
let reasoning_format = ReasoningSummaryFormat::Experimental;
|
||||
let cell = new_reasoning_summary_block(
|
||||
"**High level reasoning without closing".to_string(),
|
||||
reasoning_format,
|
||||
);
|
||||
let cell =
|
||||
new_reasoning_summary_block("**High level reasoning without closing".to_string());
|
||||
|
||||
let rendered = render_transcript(cell.as_ref());
|
||||
assert_eq!(rendered, vec!["• **High level reasoning without closing"]);
|
||||
@@ -2550,18 +2526,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn reasoning_summary_block_falls_back_when_summary_is_missing() {
|
||||
let reasoning_format = ReasoningSummaryFormat::Experimental;
|
||||
let cell = new_reasoning_summary_block(
|
||||
"**High level reasoning without closing**".to_string(),
|
||||
reasoning_format.clone(),
|
||||
);
|
||||
let cell =
|
||||
new_reasoning_summary_block("**High level reasoning without closing**".to_string());
|
||||
|
||||
let rendered = render_transcript(cell.as_ref());
|
||||
assert_eq!(rendered, vec!["• High level reasoning without closing"]);
|
||||
|
||||
let cell = new_reasoning_summary_block(
|
||||
"**High level reasoning without closing**\n\n ".to_string(),
|
||||
reasoning_format,
|
||||
);
|
||||
|
||||
let rendered = render_transcript(cell.as_ref());
|
||||
@@ -2570,10 +2542,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn reasoning_summary_block_splits_header_and_summary_when_present() {
|
||||
let reasoning_format = ReasoningSummaryFormat::Experimental;
|
||||
let cell = new_reasoning_summary_block(
|
||||
"**High level plan**\n\nWe should fix the bug next.".to_string(),
|
||||
reasoning_format,
|
||||
);
|
||||
|
||||
let rendered_display = render_lines(&cell.display_lines(80));
|
||||
|
||||
@@ -1060,7 +1060,6 @@ Valid values:
|
||||
| `model_reasoning_summary` | `auto` \| `concise` \| `detailed` \| `none` | Reasoning summaries. |
|
||||
| `model_verbosity` | `low` \| `medium` \| `high` | GPT‑5 text verbosity (Responses API). |
|
||||
| `model_supports_reasoning_summaries` | boolean | Force‑enable reasoning summaries. |
|
||||
| `model_reasoning_summary_format` | `none` \| `experimental` | Force reasoning summary format. |
|
||||
| `chatgpt_base_url` | string | Base URL for ChatGPT auth flow. |
|
||||
| `experimental_instructions_file` | string (path) | Replace built‑in instructions (experimental). |
|
||||
| `experimental_use_exec_command_tool` | boolean | Use experimental exec command tool. |
|
||||
|
||||
@@ -49,9 +49,6 @@ model_verbosity = "medium"
|
||||
# Force-enable reasoning summaries for current model (default: false)
|
||||
model_supports_reasoning_summaries = false
|
||||
|
||||
# Force reasoning summary format: none | experimental (default: none)
|
||||
model_reasoning_summary_format = "none"
|
||||
|
||||
################################################################################
|
||||
# Instruction Overrides
|
||||
################################################################################
|
||||
|
||||
Reference in New Issue
Block a user