Use current model for review (#9179)

Instead of having a hard-coded default review model, use the current
model for running `/review` unless one is specified in the config.

Also inherit current reasoning effort
This commit is contained in:
pakrym-oai
2026-01-14 08:59:41 -08:00
committed by GitHub
parent e1447c3009
commit 92472e7baa
5 changed files with 75 additions and 26 deletions

View File

@@ -76,8 +76,6 @@ pub use constraint::ConstraintResult;
pub use service::ConfigService;
pub use service::ConfigServiceError;
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex-max";
pub use codex_git::GhostSnapshotConfig;
/// Maximum number of bytes of the documentation that will be embedded. Larger
@@ -108,8 +106,8 @@ pub struct Config {
/// Optional override of model selection.
pub model: Option<String>,
/// Model used specifically for review sessions. Defaults to "gpt-5.1-codex-max".
pub review_model: String,
/// Model used specifically for review sessions.
pub review_model: Option<String>,
/// Size of the context window for the model, in tokens.
pub model_context_window: Option<i64>,
@@ -1413,10 +1411,7 @@ impl Config {
)?;
let compact_prompt = compact_prompt.or(file_compact_prompt);
// Default review model when not set in config; allow CLI override to take precedence.
let review_model = override_review_model
.or(cfg.review_model)
.unwrap_or_else(default_review_model);
let review_model = override_review_model.or(cfg.review_model);
let check_for_update_on_startup = cfg.check_for_update_on_startup.unwrap_or(true);
@@ -1647,10 +1642,6 @@ impl Config {
}
}
fn default_review_model() -> String {
OPENAI_DEFAULT_REVIEW_MODEL.to_string()
}
/// Returns the path to the Codex configuration directory, which can be
/// specified by the `CODEX_HOME` environment variable. If not set, defaults to
/// `~/.codex`.
@@ -3486,7 +3477,7 @@ model_verbosity = "high"
assert_eq!(
Config {
model: Some("o3".to_string()),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
model_provider_id: "openai".to_string(),
@@ -3573,7 +3564,7 @@ model_verbosity = "high"
)?;
let expected_gpt3_profile_config = Config {
model: Some("gpt-3.5-turbo".to_string()),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
model_provider_id: "openai-chat-completions".to_string(),
@@ -3675,7 +3666,7 @@ model_verbosity = "high"
)?;
let expected_zdr_profile_config = Config {
model: Some("o3".to_string()),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
model_provider_id: "openai".to_string(),
@@ -3763,7 +3754,7 @@ model_verbosity = "high"
)?;
let expected_gpt5_profile_config = Config {
model: Some("gpt-5.1".to_string()),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
model_provider_id: "openai".to_string(),