mirror of
https://github.com/openai/codex.git
synced 2026-04-24 22:54:54 +00:00
192 lines
8.0 KiB
Rust
192 lines
8.0 KiB
Rust
use std::time::Duration;
|
|
|
|
use anyhow::Result;
|
|
use app_test_support::McpProcess;
|
|
use app_test_support::to_response;
|
|
use app_test_support::write_models_cache;
|
|
use codex_app_server_protocol::JSONRPCNotification;
|
|
use codex_app_server_protocol::JSONRPCResponse;
|
|
use codex_app_server_protocol::Model;
|
|
use codex_app_server_protocol::ModelListParams;
|
|
use codex_app_server_protocol::ModelListResponse;
|
|
use codex_app_server_protocol::ReasoningEffortOption;
|
|
use codex_app_server_protocol::RequestId;
|
|
use codex_app_server_protocol::ServerNotification;
|
|
use codex_protocol::openai_models::ReasoningEffort;
|
|
use pretty_assertions::assert_eq;
|
|
use tempfile::TempDir;
|
|
use tokio::time::timeout;
|
|
|
|
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
|
|
|
|
#[tokio::test]
|
|
async fn list_models_returns_empty_response_and_notification() -> Result<()> {
|
|
let codex_home = TempDir::new()?;
|
|
write_models_cache(codex_home.path())?;
|
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
|
|
|
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
|
|
|
let request_id = mcp
|
|
.send_list_models_request(ModelListParams {
|
|
limit: Some(1),
|
|
cursor: Some("ignored".to_string()),
|
|
})
|
|
.await?;
|
|
|
|
let response: JSONRPCResponse = timeout(
|
|
DEFAULT_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
|
)
|
|
.await??;
|
|
|
|
let ModelListResponse {} = to_response::<ModelListResponse>(response)?;
|
|
|
|
let notification: JSONRPCNotification = timeout(
|
|
DEFAULT_TIMEOUT,
|
|
mcp.read_stream_until_notification_message("model/presets/updated"),
|
|
)
|
|
.await??;
|
|
let server_notification: ServerNotification = notification.try_into()?;
|
|
let ServerNotification::ModelPresetsUpdated(payload) = server_notification else {
|
|
unreachable!("expected model/presets/updated notification");
|
|
};
|
|
|
|
assert_eq!(payload.models, expected_models());
|
|
Ok(())
|
|
}
|
|
|
|
fn expected_models() -> Vec<Model> {
|
|
vec![
|
|
Model {
|
|
id: "gpt-5.1-codex-max".to_string(),
|
|
model: "gpt-5.1-codex-max".to_string(),
|
|
display_name: "gpt-5.1-codex-max".to_string(),
|
|
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
|
|
supported_reasoning_efforts: vec![
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::Low,
|
|
description: "Fast responses with lighter reasoning".to_string(),
|
|
},
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::Medium,
|
|
description: "Balances speed and reasoning depth for everyday tasks"
|
|
.to_string(),
|
|
},
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::High,
|
|
description: "Greater reasoning depth for complex problems".to_string(),
|
|
},
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::XHigh,
|
|
description: "Extra high reasoning depth for complex problems".to_string(),
|
|
},
|
|
],
|
|
default_reasoning_effort: ReasoningEffort::Medium,
|
|
is_default: true,
|
|
},
|
|
Model {
|
|
id: "gpt-5.1-codex".to_string(),
|
|
model: "gpt-5.1-codex".to_string(),
|
|
display_name: "gpt-5.1-codex".to_string(),
|
|
description: "Optimized for codex.".to_string(),
|
|
supported_reasoning_efforts: vec![
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::Low,
|
|
description: "Fastest responses with limited reasoning".to_string(),
|
|
},
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::Medium,
|
|
description: "Dynamically adjusts reasoning based on the task".to_string(),
|
|
},
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::High,
|
|
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
|
.to_string(),
|
|
},
|
|
],
|
|
default_reasoning_effort: ReasoningEffort::Medium,
|
|
is_default: false,
|
|
},
|
|
Model {
|
|
id: "gpt-5.1-codex-mini".to_string(),
|
|
model: "gpt-5.1-codex-mini".to_string(),
|
|
display_name: "gpt-5.1-codex-mini".to_string(),
|
|
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
|
|
supported_reasoning_efforts: vec![
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::Medium,
|
|
description: "Dynamically adjusts reasoning based on the task".to_string(),
|
|
},
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::High,
|
|
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
|
.to_string(),
|
|
},
|
|
],
|
|
default_reasoning_effort: ReasoningEffort::Medium,
|
|
is_default: false,
|
|
},
|
|
Model {
|
|
id: "gpt-5.2".to_string(),
|
|
model: "gpt-5.2".to_string(),
|
|
display_name: "gpt-5.2".to_string(),
|
|
description:
|
|
"Latest frontier model with improvements across knowledge, reasoning and coding"
|
|
.to_string(),
|
|
supported_reasoning_efforts: vec![
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::Low,
|
|
description: "Balances speed with some reasoning; useful for straightforward \
|
|
queries and short explanations"
|
|
.to_string(),
|
|
},
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::Medium,
|
|
description: "Provides a solid balance of reasoning depth and latency for \
|
|
general-purpose tasks"
|
|
.to_string(),
|
|
},
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::High,
|
|
description: "Greater reasoning depth for complex or ambiguous problems"
|
|
.to_string(),
|
|
},
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::XHigh,
|
|
description: "Extra high reasoning for complex problems".to_string(),
|
|
},
|
|
],
|
|
default_reasoning_effort: ReasoningEffort::Medium,
|
|
is_default: false,
|
|
},
|
|
Model {
|
|
id: "gpt-5.1".to_string(),
|
|
model: "gpt-5.1".to_string(),
|
|
display_name: "gpt-5.1".to_string(),
|
|
description: "Broad world knowledge with strong general reasoning.".to_string(),
|
|
supported_reasoning_efforts: vec![
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::Low,
|
|
description: "Balances speed with some reasoning; useful for straightforward \
|
|
queries and short explanations"
|
|
.to_string(),
|
|
},
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::Medium,
|
|
description: "Provides a solid balance of reasoning depth and latency for \
|
|
general-purpose tasks"
|
|
.to_string(),
|
|
},
|
|
ReasoningEffortOption {
|
|
reasoning_effort: ReasoningEffort::High,
|
|
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
|
.to_string(),
|
|
},
|
|
],
|
|
default_reasoning_effort: ReasoningEffort::Medium,
|
|
is_default: false,
|
|
},
|
|
]
|
|
}
|