chores: clean picker (#8232)

# External (non-OpenAI) Pull Request Requirements

Before opening this Pull Request, please read the dedicated
"Contributing" markdown file or your PR may be closed:
https://github.com/openai/codex/blob/main/docs/contributing.md

If your PR conforms to our contribution guidelines, replace this text
with a detailed and high quality description of your changes.

Include a link to a bug report or enhancement request.
This commit is contained in:
Ahmed Ibrahim
2025-12-18 08:41:34 -08:00
committed by GitHub
parent 9bf41e9262
commit 374d591311
7 changed files with 76 additions and 240 deletions

View File

@@ -47,33 +47,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
} = to_response::<ModelListResponse>(response)?;
let expected_models = vec![
Model {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward \
queries and short explanations"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for \
general-purpose tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: true,
},
Model {
id: "gpt-5.2".to_string(),
model: "gpt-5.2".to_string(),
@@ -105,7 +78,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
is_default: true,
},
Model {
id: "gpt-5.1-codex-mini".to_string(),
@@ -126,29 +99,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
display_name: "gpt-5.1-codex".to_string(),
description: "Optimized for codex.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "gpt-5.1-codex-max".to_string(),
model: "gpt-5.1-codex-max".to_string(),
@@ -237,7 +187,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(first_response)?;
assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, "gpt-5.1");
assert_eq!(first_items[0].id, "gpt-5.2");
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
let second_request = mcp
@@ -259,7 +209,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(second_response)?;
assert_eq!(second_items.len(), 1);
assert_eq!(second_items[0].id, "gpt-5.2");
assert_eq!(second_items[0].id, "gpt-5.1-codex-mini");
let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?;
let third_request = mcp
@@ -281,7 +231,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(third_response)?;
assert_eq!(third_items.len(), 1);
assert_eq!(third_items[0].id, "gpt-5.1-codex-mini");
assert_eq!(third_items[0].id, "gpt-5.1-codex-max");
let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?;
let fourth_request = mcp
@@ -303,52 +253,8 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(fourth_response)?;
assert_eq!(fourth_items.len(), 1);
assert_eq!(fourth_items[0].id, "gpt-5.1-codex");
let fifth_cursor = fourth_cursor.ok_or_else(|| anyhow!("cursor for fifth page"))?;
let fifth_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(fifth_cursor.clone()),
})
.await?;
let fifth_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(fifth_request)),
)
.await??;
let ModelListResponse {
data: fifth_items,
next_cursor: fifth_cursor,
} = to_response::<ModelListResponse>(fifth_response)?;
assert_eq!(fifth_items.len(), 1);
assert_eq!(fifth_items[0].id, "gpt-5.1-codex-max");
let sixth_cursor = fifth_cursor.ok_or_else(|| anyhow!("cursor for sixth page"))?;
let sixth_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(sixth_cursor.clone()),
})
.await?;
let sixth_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(sixth_request)),
)
.await??;
let ModelListResponse {
data: sixth_items,
next_cursor: sixth_cursor,
} = to_response::<ModelListResponse>(sixth_response)?;
assert_eq!(sixth_items.len(), 1);
assert_eq!(sixth_items[0].id, "caribou");
assert!(sixth_cursor.is_none());
assert_eq!(fourth_items[0].id, "caribou");
assert!(fourth_cursor.is_none());
Ok(())
}