Update Model Info (#7853)

This commit is contained in:
Ahmed Ibrahim
2025-12-11 14:06:07 -08:00
committed by GitHub
parent 3e81ed4b91
commit b7fa7ca8e9
15 changed files with 259 additions and 40 deletions

View File

@@ -6,6 +6,8 @@ use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelVisibility;
use codex_protocol::openai_models::ReasoningSummaryFormat;
use codex_protocol::openai_models::TruncationPolicyConfig;
use serde_json::json;
use std::path::Path;
@@ -28,6 +30,15 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
priority,
upgrade: preset.upgrade.as_ref().map(|u| u.id.clone()),
base_instructions: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
context_window: None,
reasoning_summary_format: ReasoningSummaryFormat::None,
experimental_supported_tools: Vec::new(),
}
}

View File

@@ -219,6 +219,16 @@ mod tests {
"supported_in_api": true,
"priority": 1,
"upgrade": null,
"base_instructions": null,
"supports_reasoning_summaries": false,
"support_verbosity": false,
"default_verbosity": null,
"apply_patch_tool_type": null,
"truncation_policy": {"mode": "bytes", "limit": 10_000},
"supports_parallel_tool_calls": false,
"context_window": null,
"reasoning_summary_format": "none",
"experimental_supported_tools": [],
}))
.unwrap(),
],

View File

@@ -11,6 +11,8 @@ use codex_protocol::openai_models::ModelVisibility;
use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::ReasoningSummaryFormat;
use codex_protocol::openai_models::TruncationPolicyConfig;
use http::HeaderMap;
use http::Method;
use wiremock::Mock;
@@ -78,6 +80,15 @@ async fn models_client_hits_models_endpoint() {
priority: 1,
upgrade: None,
base_instructions: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
context_window: None,
reasoning_summary_format: ReasoningSummaryFormat::None,
experimental_supported_tools: Vec::new(),
}],
etag: String::new(),
};

View File

@@ -7,7 +7,6 @@ use crate::config::types::Notifications;
use crate::config::types::OtelConfig;
use crate::config::types::OtelConfigToml;
use crate::config::types::OtelExporterKind;
use crate::config::types::ReasoningSummaryFormat;
use crate::config::types::SandboxWorkspaceWrite;
use crate::config::types::ShellEnvironmentPolicy;
use crate::config::types::ShellEnvironmentPolicyToml;
@@ -39,6 +38,7 @@ use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::config_types::Verbosity;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningSummaryFormat;
use codex_rmcp_client::OAuthCredentialsStoreMode;
use codex_utils_absolute_path::AbsolutePathBufGuard;
use dirs::home_dir;

View File

@@ -522,14 +522,6 @@ impl From<ShellEnvironmentPolicyToml> for ShellEnvironmentPolicy {
}
}
#[derive(Deserialize, Debug, Clone, PartialEq, Eq, Default, Hash)]
#[serde(rename_all = "kebab-case")]
pub enum ReasoningSummaryFormat {
#[default]
None,
Experimental,
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -1,12 +1,12 @@
use codex_protocol::config_types::Verbosity;
use codex_protocol::openai_models::ApplyPatchToolType;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningSummaryFormat;
use crate::config::Config;
use crate::config::types::ReasoningSummaryFormat;
use crate::tools::handlers::apply_patch::ApplyPatchToolType;
use crate::truncate::TruncationPolicy;
use codex_protocol::openai_models::ConfigShellToolType;
/// The `instructions` field in the payload sent to a model should always start
/// with this content.
@@ -101,14 +101,53 @@ impl ModelFamily {
pub fn with_remote_overrides(mut self, remote_models: Vec<ModelInfo>) -> Self {
for model in remote_models {
if model.slug == self.slug {
self.default_reasoning_effort = Some(model.default_reasoning_level);
self.shell_type = model.shell_type;
self.base_instructions = model.base_instructions.unwrap_or(self.base_instructions);
self.apply_remote_overrides(model);
}
}
self
}
fn apply_remote_overrides(&mut self, model: ModelInfo) {
let ModelInfo {
slug: _,
display_name: _,
description: _,
default_reasoning_level,
supported_reasoning_levels: _,
shell_type,
visibility: _,
minimal_client_version: _,
supported_in_api: _,
priority: _,
upgrade: _,
base_instructions,
supports_reasoning_summaries,
support_verbosity,
default_verbosity,
apply_patch_tool_type,
truncation_policy,
supports_parallel_tool_calls,
context_window,
reasoning_summary_format,
experimental_supported_tools,
} = model;
self.default_reasoning_effort = Some(default_reasoning_level);
self.shell_type = shell_type;
if let Some(base) = base_instructions {
self.base_instructions = base;
}
self.supports_reasoning_summaries = supports_reasoning_summaries;
self.support_verbosity = support_verbosity;
self.default_verbosity = default_verbosity;
self.apply_patch_tool_type = apply_patch_tool_type;
self.truncation_policy = truncation_policy.into();
self.supports_parallel_tool_calls = supports_parallel_tool_calls;
self.context_window = context_window;
self.reasoning_summary_format = reasoning_summary_format;
self.experimental_supported_tools = experimental_supported_tools;
}
pub fn auto_compact_token_limit(&self) -> Option<i64> {
self.auto_compact_token_limit
.or(self.context_window.map(Self::default_auto_compact_limit))
@@ -356,6 +395,7 @@ mod tests {
use codex_protocol::openai_models::ClientVersion;
use codex_protocol::openai_models::ModelVisibility;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::TruncationPolicyConfig;
fn remote(slug: &str, effort: ReasoningEffort, shell: ConfigShellToolType) -> ModelInfo {
ModelInfo {
@@ -374,6 +414,15 @@ mod tests {
priority: 1,
upgrade: None,
base_instructions: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
context_window: None,
reasoning_summary_format: ReasoningSummaryFormat::None,
experimental_supported_tools: Vec::new(),
}
}
@@ -422,4 +471,73 @@ mod tests {
);
assert_eq!(updated.shell_type, family.shell_type);
}
#[test]
fn remote_overrides_apply_extended_metadata() {
let family = model_family!(
"gpt-5.1",
"gpt-5.1",
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,
apply_patch_tool_type: Some(ApplyPatchToolType::Function),
supports_parallel_tool_calls: false,
experimental_supported_tools: vec!["local".to_string()],
truncation_policy: TruncationPolicy::Bytes(10_000),
context_window: Some(100),
reasoning_summary_format: ReasoningSummaryFormat::None,
);
let updated = family.with_remote_overrides(vec![ModelInfo {
slug: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: Some("desc".to_string()),
default_reasoning_level: ReasoningEffort::High,
supported_reasoning_levels: vec![ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "High".to_string(),
}],
shell_type: ConfigShellToolType::ShellCommand,
visibility: ModelVisibility::List,
minimal_client_version: ClientVersion(0, 1, 0),
supported_in_api: true,
priority: 10,
upgrade: None,
base_instructions: Some("Remote instructions".to_string()),
supports_reasoning_summaries: true,
support_verbosity: true,
default_verbosity: Some(Verbosity::High),
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
truncation_policy: TruncationPolicyConfig::tokens(2_000),
supports_parallel_tool_calls: true,
context_window: Some(400_000),
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
experimental_supported_tools: vec!["alpha".to_string(), "beta".to_string()],
}]);
assert_eq!(
updated.default_reasoning_effort,
Some(ReasoningEffort::High)
);
assert!(updated.supports_reasoning_summaries);
assert!(updated.support_verbosity);
assert_eq!(updated.default_verbosity, Some(Verbosity::High));
assert_eq!(updated.shell_type, ConfigShellToolType::ShellCommand);
assert_eq!(
updated.apply_patch_tool_type,
Some(ApplyPatchToolType::Freeform)
);
assert_eq!(updated.truncation_policy, TruncationPolicy::Tokens(2_000));
assert!(updated.supports_parallel_tool_calls);
assert_eq!(updated.context_window, Some(400_000));
assert_eq!(
updated.reasoning_summary_format,
ReasoningSummaryFormat::Experimental
);
assert_eq!(
updated.experimental_supported_tools,
vec!["alpha".to_string(), "beta".to_string()]
);
assert_eq!(updated.base_instructions, "Remote instructions");
}
}

View File

@@ -206,7 +206,7 @@ impl ModelsManager {
/// Convert remote model metadata into picker-ready presets, marking defaults.
async fn build_available_models(&self) {
let mut available_models = self.remote_models.read().await.clone();
available_models.sort_by(|a, b| b.priority.cmp(&a.priority));
available_models.sort_by(|a, b| a.priority.cmp(&b.priority));
let mut model_presets: Vec<ModelPreset> = available_models
.into_iter()
.map(Into::into)
@@ -279,6 +279,15 @@ mod tests {
"priority": priority,
"upgrade": null,
"base_instructions": null,
"supports_reasoning_summaries": false,
"support_verbosity": false,
"default_verbosity": null,
"apply_patch_tool_type": null,
"truncation_policy": {"mode": "bytes", "limit": 10_000},
"supports_parallel_tool_calls": false,
"context_window": null,
"reasoning_summary_format": "none",
"experimental_supported_tools": [],
}))
.expect("valid model")
}
@@ -306,7 +315,7 @@ mod tests {
let server = MockServer::start().await;
let remote_models = vec![
remote_model("priority-low", "Low", 1),
remote_model("priority-high", "High", 10),
remote_model("priority-high", "High", 0),
];
let models_mock = mount_models_once(
&server,

View File

@@ -26,8 +26,6 @@ use crate::tools::sandboxing::ToolCtx;
use crate::tools::spec::ApplyPatchToolArgs;
use crate::tools::spec::JsonSchema;
use async_trait::async_trait;
use serde::Deserialize;
use serde::Serialize;
pub struct ApplyPatchHandler;
@@ -161,13 +159,6 @@ impl ToolHandler for ApplyPatchHandler {
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
#[serde(rename_all = "snake_case")]
pub enum ApplyPatchToolType {
Freeform,
Function,
}
#[allow(clippy::too_many_arguments)]
pub(crate) async fn intercept_apply_patch(
command: &[String],

View File

@@ -4,10 +4,10 @@ use crate::features::Feature;
use crate::features::Features;
use crate::openai_models::model_family::ModelFamily;
use crate::tools::handlers::PLAN_TOOL;
use crate::tools::handlers::apply_patch::ApplyPatchToolType;
use crate::tools::handlers::apply_patch::create_apply_patch_freeform_tool;
use crate::tools::handlers::apply_patch::create_apply_patch_json_tool;
use crate::tools::registry::ToolRegistryBuilder;
use codex_protocol::openai_models::ApplyPatchToolType;
use codex_protocol::openai_models::ConfigShellToolType;
use serde::Deserialize;
use serde::Serialize;

View File

@@ -4,6 +4,8 @@
use crate::config::Config;
use codex_protocol::models::FunctionCallOutputContentItem;
use codex_protocol::openai_models::TruncationMode;
use codex_protocol::openai_models::TruncationPolicyConfig;
const APPROX_BYTES_PER_TOKEN: usize = 4;
@@ -13,6 +15,15 @@ pub enum TruncationPolicy {
Tokens(usize),
}
impl From<TruncationPolicyConfig> for TruncationPolicy {
fn from(config: TruncationPolicyConfig) -> Self {
match config.mode {
TruncationMode::Bytes => Self::Bytes(config.limit as usize),
TruncationMode::Tokens => Self::Tokens(config.limit as usize),
}
}
}
impl TruncationPolicy {
/// Scale the underlying budget by `multiplier`, rounding up to avoid under-budgeting.
pub fn mul(self, multiplier: f64) -> Self {

View File

@@ -10,11 +10,11 @@ use codex_core::Prompt;
use codex_core::ResponseEvent;
use codex_core::ResponseItem;
use codex_core::WireApi;
use codex_core::config::types::ReasoningSummaryFormat;
use codex_core::openai_models::models_manager::ModelsManager;
use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::ConversationId;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::openai_models::ReasoningSummaryFormat;
use codex_protocol::protocol::SessionSource;
use core_test_support::load_default_config_for_test;
use core_test_support::responses;

View File

@@ -25,6 +25,8 @@ use codex_protocol::openai_models::ModelVisibility;
use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::ReasoningSummaryFormat;
use codex_protocol::openai_models::TruncationPolicyConfig;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
use core_test_support::responses::ev_assistant_message;
@@ -75,6 +77,15 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
priority: 1,
upgrade: None,
base_instructions: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
context_window: None,
reasoning_summary_format: ReasoningSummaryFormat::None,
experimental_supported_tools: Vec::new(),
};
let models_mock = mount_models_once(
@@ -206,6 +217,15 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
priority: 1,
upgrade: None,
base_instructions: Some(remote_base.to_string()),
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
context_window: None,
reasoning_summary_format: ReasoningSummaryFormat::None,
experimental_supported_tools: Vec::new(),
};
mount_models_once(
&server,

View File

@@ -8,6 +8,8 @@ use strum_macros::Display;
use strum_macros::EnumIter;
use ts_rs::TS;
use crate::config_types::Verbosity;
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
#[derive(
Debug,
@@ -112,6 +114,51 @@ pub enum ConfigShellToolType {
ShellCommand,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, TS, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub enum ApplyPatchToolType {
Freeform,
Function,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Eq, Default, Hash, TS, JsonSchema, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ReasoningSummaryFormat {
#[default]
None,
Experimental,
}
/// Server-provided truncation policy metadata for a model.
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub enum TruncationMode {
Bytes,
Tokens,
}
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema)]
pub struct TruncationPolicyConfig {
pub mode: TruncationMode,
pub limit: i64,
}
impl TruncationPolicyConfig {
pub const fn bytes(limit: i64) -> Self {
Self {
mode: TruncationMode::Bytes,
limit,
}
}
pub const fn tokens(limit: i64) -> Self {
Self {
mode: TruncationMode::Tokens,
limit,
}
}
}
/// Semantic version triple encoded as an array in JSON (e.g. [0, 62, 0]).
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema)]
pub struct ClientVersion(pub i32, pub i32, pub i32);
@@ -121,22 +168,25 @@ pub struct ClientVersion(pub i32, pub i32, pub i32);
pub struct ModelInfo {
pub slug: String,
pub display_name: String,
#[serde(default)]
pub description: Option<String>,
pub default_reasoning_level: ReasoningEffort,
pub supported_reasoning_levels: Vec<ReasoningEffortPreset>,
pub shell_type: ConfigShellToolType,
#[serde(default = "default_visibility")]
pub visibility: ModelVisibility,
pub minimal_client_version: ClientVersion,
#[serde(default)]
pub supported_in_api: bool,
#[serde(default)]
pub priority: i32,
#[serde(default)]
pub upgrade: Option<String>,
#[serde(default)]
pub base_instructions: Option<String>,
pub supports_reasoning_summaries: bool,
pub support_verbosity: bool,
pub default_verbosity: Option<Verbosity>,
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
pub truncation_policy: TruncationPolicyConfig,
pub supports_parallel_tool_calls: bool,
pub context_window: Option<i64>,
pub reasoning_summary_format: ReasoningSummaryFormat,
pub experimental_supported_tools: Vec<String>,
}
/// Response wrapper for `/models`.
@@ -147,10 +197,6 @@ pub struct ModelsResponse {
pub etag: String,
}
fn default_visibility() -> ModelVisibility {
ModelVisibility::None
}
// convert ModelInfo to ModelPreset
impl From<ModelInfo> for ModelPreset {
fn from(info: ModelInfo) -> Self {

View File

@@ -26,12 +26,12 @@ use base64::Engine;
use codex_common::format_env_display::format_env_display;
use codex_core::config::Config;
use codex_core::config::types::McpServerTransportConfig;
use codex_core::config::types::ReasoningSummaryFormat;
use codex_core::protocol::FileChange;
use codex_core::protocol::McpAuthStatus;
use codex_core::protocol::McpInvocation;
use codex_core::protocol::SessionConfiguredEvent;
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::openai_models::ReasoningSummaryFormat;
use codex_protocol::plan_tool::PlanItemArg;
use codex_protocol::plan_tool::StepStatus;
use codex_protocol::plan_tool::UpdatePlanArgs;

View File

@@ -26,12 +26,12 @@ use base64::Engine;
use codex_common::format_env_display::format_env_display;
use codex_core::config::Config;
use codex_core::config::types::McpServerTransportConfig;
use codex_core::config::types::ReasoningSummaryFormat;
use codex_core::protocol::FileChange;
use codex_core::protocol::McpAuthStatus;
use codex_core::protocol::McpInvocation;
use codex_core::protocol::SessionConfiguredEvent;
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::openai_models::ReasoningSummaryFormat;
use codex_protocol::plan_tool::PlanItemArg;
use codex_protocol::plan_tool::StepStatus;
use codex_protocol::plan_tool::UpdatePlanArgs;