remove model_family from `config (#7571)

- Remove `model_family` from `config`
- Make sure to still override config elements related to `model_family`
like supporting reasoning
This commit is contained in:
Ahmed Ibrahim
2025-12-04 11:57:58 -08:00
committed by GitHub
parent ce0b38c056
commit 9b2055586d
18 changed files with 339 additions and 158 deletions

View File

@@ -27,7 +27,6 @@ use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::OLLAMA_OSS_PROVIDER_ID;
use crate::model_provider_info::built_in_model_providers;
use crate::openai_model_info::get_model_info;
use crate::openai_models::model_family::ModelFamily;
use crate::openai_models::model_family::find_family_for_model;
use crate::project_doc::DEFAULT_PROJECT_DOC_FILENAME;
use crate::project_doc::LOCAL_PROJECT_DOC_FILENAME;
@@ -80,9 +79,6 @@ pub struct Config {
/// Model used specifically for review sessions. Defaults to "gpt-5.1-codex-max".
pub review_model: String,
// todo(aibrahim): remove this field
pub model_family: ModelFamily,
/// Size of the context window for the model, in tokens.
pub model_context_window: Option<i64>,
@@ -195,6 +191,7 @@ pub struct Config {
/// Additional filenames to try when looking for project-level docs.
pub project_doc_fallback_filenames: Vec<String>,
// todo(aibrahim): this should be used in the override model family
/// Token budget applied when storing tool/function outputs in the context manager.
pub tool_output_token_limit: Option<usize>,
@@ -225,6 +222,12 @@ pub struct Config {
/// request using the Responses API.
pub model_reasoning_summary: ReasoningSummary,
/// Optional override to force-enable reasoning summaries for the configured model.
pub model_supports_reasoning_summaries: Option<bool>,
/// Optional override to force reasoning summary format for the configured model.
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
pub model_verbosity: Option<Verbosity>,
@@ -1108,14 +1111,7 @@ impl Config {
.or(cfg.model)
.unwrap_or_else(default_model);
let mut model_family = find_family_for_model(&model);
if let Some(supports_reasoning_summaries) = cfg.model_supports_reasoning_summaries {
model_family.supports_reasoning_summaries = supports_reasoning_summaries;
}
if let Some(model_reasoning_summary_format) = cfg.model_reasoning_summary_format {
model_family.reasoning_summary_format = model_reasoning_summary_format;
}
let model_family = find_family_for_model(&model);
let openai_model_info = get_model_info(&model_family);
let model_context_window = cfg
@@ -1172,7 +1168,6 @@ impl Config {
let config = Self {
model,
review_model,
model_family,
model_context_window,
model_auto_compact_token_limit,
model_provider_id,
@@ -1228,6 +1223,8 @@ impl Config {
.model_reasoning_summary
.or(cfg.model_reasoning_summary)
.unwrap_or_default(),
model_supports_reasoning_summaries: cfg.model_supports_reasoning_summaries,
model_reasoning_summary_format: cfg.model_reasoning_summary_format.clone(),
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
chatgpt_base_url: config_profile
.chatgpt_base_url
@@ -2953,7 +2950,6 @@ model_verbosity = "high"
Config {
model: "o3".to_string(),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
model_family: find_family_for_model("o3"),
model_context_window: Some(200_000),
model_auto_compact_token_limit: Some(180_000),
model_provider_id: "openai".to_string(),
@@ -2981,6 +2977,8 @@ model_verbosity = "high"
show_raw_agent_reasoning: false,
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: None,
model_reasoning_summary_format: None,
model_verbosity: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
@@ -3027,7 +3025,6 @@ model_verbosity = "high"
let expected_gpt3_profile_config = Config {
model: "gpt-3.5-turbo".to_string(),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
model_family: find_family_for_model("gpt-3.5-turbo"),
model_context_window: Some(16_385),
model_auto_compact_token_limit: Some(14_746),
model_provider_id: "openai-chat-completions".to_string(),
@@ -3055,6 +3052,8 @@ model_verbosity = "high"
show_raw_agent_reasoning: false,
model_reasoning_effort: None,
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: None,
model_reasoning_summary_format: None,
model_verbosity: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
@@ -3116,7 +3115,6 @@ model_verbosity = "high"
let expected_zdr_profile_config = Config {
model: "o3".to_string(),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
model_family: find_family_for_model("o3"),
model_context_window: Some(200_000),
model_auto_compact_token_limit: Some(180_000),
model_provider_id: "openai".to_string(),
@@ -3144,6 +3142,8 @@ model_verbosity = "high"
show_raw_agent_reasoning: false,
model_reasoning_effort: None,
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: None,
model_reasoning_summary_format: None,
model_verbosity: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
@@ -3191,7 +3191,6 @@ model_verbosity = "high"
let expected_gpt5_profile_config = Config {
model: "gpt-5.1".to_string(),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
model_family: find_family_for_model("gpt-5.1"),
model_context_window: Some(272_000),
model_auto_compact_token_limit: Some(244_800),
model_provider_id: "openai".to_string(),
@@ -3219,6 +3218,8 @@ model_verbosity = "high"
show_raw_agent_reasoning: false,
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: None,
model_reasoning_summary_format: None,
model_verbosity: Some(Verbosity::High),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,