Use current model for review (#9179)

Instead of having a hard-coded default review model, use the current
model for running `/review` unless one is specified in the config.

Also inherit current reasoning effort
This commit is contained in:
pakrym-oai
2026-01-14 08:59:41 -08:00
committed by GitHub
parent e1447c3009
commit 92472e7baa
5 changed files with 75 additions and 26 deletions

View File

@@ -3421,7 +3421,9 @@ impl CodexMessageProcessor {
})?;
let mut config = self.config.as_ref().clone();
config.model = Some(self.config.review_model.clone());
if let Some(review_model) = &config.review_model {
config.model = Some(review_model.clone());
}
let NewThread {
thread_id,

View File

@@ -2383,7 +2383,10 @@ async fn spawn_review_thread(
sub_id: String,
resolved: crate::review_prompts::ResolvedReviewRequest,
) {
let model = config.review_model.clone();
let model = config
.review_model
.clone()
.unwrap_or_else(|| parent_turn_context.client.get_model());
let review_model_info = sess
.services
.models_manager
@@ -2407,14 +2410,13 @@ async fn spawn_review_thread(
// Build perturn client with the requested model/family.
let mut per_turn_config = (*config).clone();
per_turn_config.model_reasoning_effort = Some(ReasoningEffortConfig::Low);
per_turn_config.model_reasoning_summary = ReasoningSummaryConfig::Detailed;
per_turn_config.model = Some(model.clone());
per_turn_config.features = review_features.clone();
let otel_manager = parent_turn_context.client.get_otel_manager().with_model(
config.review_model.as_str(),
review_model_info.slug.as_str(),
);
let otel_manager = parent_turn_context
.client
.get_otel_manager()
.with_model(model.as_str(), review_model_info.slug.as_str());
let per_turn_config = Arc::new(per_turn_config);
let client = ModelClient::new(

View File

@@ -76,8 +76,6 @@ pub use constraint::ConstraintResult;
pub use service::ConfigService;
pub use service::ConfigServiceError;
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex-max";
pub use codex_git::GhostSnapshotConfig;
/// Maximum number of bytes of the documentation that will be embedded. Larger
@@ -108,8 +106,8 @@ pub struct Config {
/// Optional override of model selection.
pub model: Option<String>,
/// Model used specifically for review sessions. Defaults to "gpt-5.1-codex-max".
pub review_model: String,
/// Model used specifically for review sessions.
pub review_model: Option<String>,
/// Size of the context window for the model, in tokens.
pub model_context_window: Option<i64>,
@@ -1413,10 +1411,7 @@ impl Config {
)?;
let compact_prompt = compact_prompt.or(file_compact_prompt);
// Default review model when not set in config; allow CLI override to take precedence.
let review_model = override_review_model
.or(cfg.review_model)
.unwrap_or_else(default_review_model);
let review_model = override_review_model.or(cfg.review_model);
let check_for_update_on_startup = cfg.check_for_update_on_startup.unwrap_or(true);
@@ -1647,10 +1642,6 @@ impl Config {
}
}
fn default_review_model() -> String {
OPENAI_DEFAULT_REVIEW_MODEL.to_string()
}
/// Returns the path to the Codex configuration directory, which can be
/// specified by the `CODEX_HOME` environment variable. If not set, defaults to
/// `~/.codex`.
@@ -3486,7 +3477,7 @@ model_verbosity = "high"
assert_eq!(
Config {
model: Some("o3".to_string()),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
model_provider_id: "openai".to_string(),
@@ -3573,7 +3564,7 @@ model_verbosity = "high"
)?;
let expected_gpt3_profile_config = Config {
model: Some("gpt-3.5-turbo".to_string()),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
model_provider_id: "openai-chat-completions".to_string(),
@@ -3675,7 +3666,7 @@ model_verbosity = "high"
)?;
let expected_zdr_profile_config = Config {
model: Some("o3".to_string()),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
model_provider_id: "openai".to_string(),
@@ -3763,7 +3754,7 @@ model_verbosity = "high"
)?;
let expected_gpt5_profile_config = Config {
model: Some("gpt-5.1".to_string()),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
model_provider_id: "openai".to_string(),

View File

@@ -92,7 +92,11 @@ async fn start_review_conversation(
// Set explicit review rubric for the sub-agent
sub_agent_config.base_instructions = Some(crate::REVIEW_PROMPT.to_string());
sub_agent_config.model = Some(config.review_model.clone());
let model = config
.review_model
.clone()
.unwrap_or_else(|| ctx.client.get_model());
sub_agent_config.model = Some(model);
(run_codex_thread_one_shot(
sub_agent_config,
session.auth_manager(),

View File

@@ -393,7 +393,7 @@ async fn review_uses_custom_review_model_from_config() {
// Choose a review model different from the main model; ensure it is used.
let codex = new_conversation_for_server(&server, &codex_home, |cfg| {
cfg.model = Some("gpt-4.1".to_string());
cfg.review_model = "gpt-5.1".to_string();
cfg.review_model = Some("gpt-5.1".to_string());
})
.await;
@@ -431,6 +431,56 @@ async fn review_uses_custom_review_model_from_config() {
server.verify().await;
}
/// Ensure that when `review_model` is not set in the config, the review request
/// uses the session model.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn review_uses_session_model_when_review_model_unset() {
skip_if_no_network!();
// Minimal stream: just a completed event
let sse_raw = r#"[
{"type":"response.completed", "response": {"id": "__ID__"}}
]"#;
let (server, request_log) = start_responses_server_with_sse(sse_raw, 1).await;
let codex_home = TempDir::new().unwrap();
let codex = new_conversation_for_server(&server, &codex_home, |cfg| {
cfg.model = Some("gpt-4.1".to_string());
cfg.review_model = None;
})
.await;
codex
.submit(Op::Review {
review_request: ReviewRequest {
target: ReviewTarget::Custom {
instructions: "use session model".to_string(),
},
user_facing_hint: None,
},
})
.await
.unwrap();
let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await;
let _closed = wait_for_event(&codex, |ev| {
matches!(
ev,
EventMsg::ExitedReviewMode(ExitedReviewModeEvent {
review_output: None
})
)
})
.await;
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
let request = request_log.single_request();
assert_eq!(request.path(), "/v1/responses");
let body = request.body_json();
assert_eq!(body["model"].as_str().unwrap(), "gpt-4.1");
server.verify().await;
}
/// When a review session begins, it must not prepend prior chat history from
/// the parent session. The request `input` should contain only the review
/// prompt from the user.