fix(stuff) (#7855)

Co-authored-by: Ahmed Ibrahim <aibrahim@openai.com>
This commit is contained in:
Dylan Hurd
2025-12-11 00:39:47 -08:00
committed by GitHub
parent 13c0919bff
commit dca7f4cb60
6 changed files with 134 additions and 4 deletions

View File

@@ -116,6 +116,37 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "robin".to_string(),
model: "robin".to_string(),
display_name: "robin".to_string(),
description: "Robin".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward \
queries and short explanations"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for \
general-purpose tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::XHigh,
description: "Extra high reasoning for complex problems".to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
@@ -243,8 +274,30 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(fourth_response)?;
assert_eq!(fourth_items.len(), 1);
assert_eq!(fourth_items[0].id, "gpt-5.1");
assert!(fourth_cursor.is_none());
assert_eq!(fourth_items[0].id, "robin");
let fifth_cursor = fourth_cursor.ok_or_else(|| anyhow!("cursor for fifth page"))?;
let fifth_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(fifth_cursor.clone()),
})
.await?;
let fifth_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(fifth_request)),
)
.await??;
let ModelListResponse {
data: fifth_items,
next_cursor: fifth_cursor,
} = to_response::<ModelListResponse>(fifth_response)?;
assert_eq!(fifth_items.len(), 1);
assert_eq!(fifth_items[0].id, "gpt-5.1");
assert!(fifth_cursor.is_none());
Ok(())
}

View File

@@ -284,6 +284,20 @@ pub fn find_family_for_model(slug: &str) -> ModelFamily {
truncation_policy: TruncationPolicy::Tokens(10_000),
context_window: Some(CONTEXT_WINDOW_272K),
)
} else if slug.starts_with("robin") {
model_family!(
slug, slug,
supports_reasoning_summaries: true,
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
support_verbosity: true,
default_verbosity: Some(Verbosity::Low),
base_instructions: GPT_5_1_INSTRUCTIONS.to_string(),
default_reasoning_effort: Some(ReasoningEffort::Medium),
truncation_policy: TruncationPolicy::Bytes(10_000),
shell_type: ConfigShellToolType::ShellCommand,
supports_parallel_tool_calls: true,
context_window: Some(CONTEXT_WINDOW_272K),
)
} else if slug.starts_with("gpt-5.1") {
model_family!(
slug, "gpt-5.1",

View File

@@ -93,6 +93,34 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
}),
show_in_picker: true,
},
ModelPreset {
id: "robin".to_string(),
model: "robin".to_string(),
display_name: "robin".to_string(),
description: "Robin".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::XHigh,
description: "Extra high reasoning for complex problems".to_string(),
},
],
is_default: false,
upgrade: None,
show_in_picker: true,
},
ModelPreset {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),

View File

@@ -46,6 +46,7 @@ fn expected_models_for_api_key() -> Vec<ModelPreset> {
gpt_5_1_codex_max(),
gpt_5_1_codex(),
gpt_5_1_codex_mini(),
robin(),
gpt_5_1(),
]
}
@@ -55,6 +56,7 @@ fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
gpt_5_1_codex_max(),
gpt_5_1_codex(),
gpt_5_1_codex_mini(),
robin(),
gpt_5_1(),
]
}
@@ -140,6 +142,37 @@ fn gpt_5_1_codex_mini() -> ModelPreset {
}
}
fn robin() -> ModelPreset {
ModelPreset {
id: "robin".to_string(),
model: "robin".to_string(),
display_name: "robin".to_string(),
description: "Robin".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning for complex problems",
),
],
is_default: false,
upgrade: None,
show_in_picker: true,
}
}
fn gpt_5_1() -> ModelPreset {
ModelPreset {
id: "gpt-5.1".to_string(),

View File

@@ -10,7 +10,8 @@ expression: popup
2. gpt-5.1-codex Optimized for codex.
3. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but
less capable.
4. gpt-5.1 Broad world knowledge with strong general
4. robin Robin
5. gpt-5.1 Broad world knowledge with strong general
reasoning.
Press enter to select reasoning effort, or esc to dismiss.

View File

@@ -10,6 +10,7 @@ expression: popup
2. gpt-5.1-codex Optimized for codex.
3. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less
capable.
4. gpt-5.1 Broad world knowledge with strong general reasoning.
4. robin Robin
5. gpt-5.1 Broad world knowledge with strong general reasoning.
Press enter to select reasoning effort, or esc to dismiss.