mirror of
https://github.com/openai/codex.git
synced 2026-04-27 16:15:09 +00:00
- This PR wires `with_remote_overrides` and make the `construct_model_families` an async function - Moves getting model family a level above to keep the function `sync` - Updates the tests to local, offline, and `sync` helper for model families
313 lines
9.5 KiB
Rust
313 lines
9.5 KiB
Rust
use std::sync::Arc;
|
|
|
|
use codex_app_server_protocol::AuthMode;
|
|
use codex_core::AuthManager;
|
|
use codex_core::CodexAuth;
|
|
use codex_core::ContentItem;
|
|
use codex_core::ModelClient;
|
|
use codex_core::ModelProviderInfo;
|
|
use codex_core::Prompt;
|
|
use codex_core::ResponseEvent;
|
|
use codex_core::ResponseItem;
|
|
use codex_core::WireApi;
|
|
use codex_core::config::types::ReasoningSummaryFormat;
|
|
use codex_core::openai_models::models_manager::ModelsManager;
|
|
use codex_otel::otel_event_manager::OtelEventManager;
|
|
use codex_protocol::ConversationId;
|
|
use codex_protocol::config_types::ReasoningSummary;
|
|
use codex_protocol::protocol::SessionSource;
|
|
use core_test_support::load_default_config_for_test;
|
|
use core_test_support::responses;
|
|
use futures::StreamExt;
|
|
use tempfile::TempDir;
|
|
use wiremock::matchers::header;
|
|
|
|
#[tokio::test]
|
|
async fn responses_stream_includes_subagent_header_on_review() {
|
|
core_test_support::skip_if_no_network!();
|
|
|
|
let server = responses::start_mock_server().await;
|
|
let response_body = responses::sse(vec![
|
|
responses::ev_response_created("resp-1"),
|
|
responses::ev_completed("resp-1"),
|
|
]);
|
|
|
|
let request_recorder = responses::mount_sse_once_match(
|
|
&server,
|
|
header("x-openai-subagent", "review"),
|
|
response_body,
|
|
)
|
|
.await;
|
|
|
|
let provider = ModelProviderInfo {
|
|
name: "mock".into(),
|
|
base_url: Some(format!("{}/v1", server.uri())),
|
|
env_key: None,
|
|
env_key_instructions: None,
|
|
experimental_bearer_token: None,
|
|
wire_api: WireApi::Responses,
|
|
query_params: None,
|
|
http_headers: None,
|
|
env_http_headers: None,
|
|
request_max_retries: Some(0),
|
|
stream_max_retries: Some(0),
|
|
stream_idle_timeout_ms: Some(5_000),
|
|
requires_openai_auth: false,
|
|
};
|
|
|
|
let codex_home = TempDir::new().expect("failed to create TempDir");
|
|
let mut config = load_default_config_for_test(&codex_home);
|
|
config.model_provider_id = provider.name.clone();
|
|
config.model_provider = provider.clone();
|
|
let effort = config.model_reasoning_effort;
|
|
let summary = config.model_reasoning_summary;
|
|
let config = Arc::new(config);
|
|
|
|
let conversation_id = ConversationId::new();
|
|
let auth_mode = AuthMode::ChatGPT;
|
|
let model_family = ModelsManager::construct_model_family_offline(&config.model, &config);
|
|
let otel_event_manager = OtelEventManager::new(
|
|
conversation_id,
|
|
config.model.as_str(),
|
|
model_family.slug.as_str(),
|
|
None,
|
|
Some("test@test.com".to_string()),
|
|
Some(auth_mode),
|
|
false,
|
|
"test".to_string(),
|
|
);
|
|
|
|
let client = ModelClient::new(
|
|
Arc::clone(&config),
|
|
None,
|
|
model_family,
|
|
otel_event_manager,
|
|
provider,
|
|
effort,
|
|
summary,
|
|
conversation_id,
|
|
SessionSource::SubAgent(codex_protocol::protocol::SubAgentSource::Review),
|
|
);
|
|
|
|
let mut prompt = Prompt::default();
|
|
prompt.input = vec![ResponseItem::Message {
|
|
id: None,
|
|
role: "user".into(),
|
|
content: vec![ContentItem::InputText {
|
|
text: "hello".into(),
|
|
}],
|
|
}];
|
|
|
|
let mut stream = client.stream(&prompt).await.expect("stream failed");
|
|
while let Some(event) = stream.next().await {
|
|
if matches!(event, Ok(ResponseEvent::Completed { .. })) {
|
|
break;
|
|
}
|
|
}
|
|
|
|
let request = request_recorder.single_request();
|
|
assert_eq!(
|
|
request.header("x-openai-subagent").as_deref(),
|
|
Some("review")
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn responses_stream_includes_subagent_header_on_other() {
|
|
core_test_support::skip_if_no_network!();
|
|
|
|
let server = responses::start_mock_server().await;
|
|
let response_body = responses::sse(vec![
|
|
responses::ev_response_created("resp-1"),
|
|
responses::ev_completed("resp-1"),
|
|
]);
|
|
|
|
let request_recorder = responses::mount_sse_once_match(
|
|
&server,
|
|
header("x-openai-subagent", "my-task"),
|
|
response_body,
|
|
)
|
|
.await;
|
|
|
|
let provider = ModelProviderInfo {
|
|
name: "mock".into(),
|
|
base_url: Some(format!("{}/v1", server.uri())),
|
|
env_key: None,
|
|
env_key_instructions: None,
|
|
experimental_bearer_token: None,
|
|
wire_api: WireApi::Responses,
|
|
query_params: None,
|
|
http_headers: None,
|
|
env_http_headers: None,
|
|
request_max_retries: Some(0),
|
|
stream_max_retries: Some(0),
|
|
stream_idle_timeout_ms: Some(5_000),
|
|
requires_openai_auth: false,
|
|
};
|
|
|
|
let codex_home = TempDir::new().expect("failed to create TempDir");
|
|
let mut config = load_default_config_for_test(&codex_home);
|
|
config.model_provider_id = provider.name.clone();
|
|
config.model_provider = provider.clone();
|
|
let effort = config.model_reasoning_effort;
|
|
let summary = config.model_reasoning_summary;
|
|
let config = Arc::new(config);
|
|
|
|
let conversation_id = ConversationId::new();
|
|
let auth_mode = AuthMode::ChatGPT;
|
|
let model_family = ModelsManager::construct_model_family_offline(&config.model, &config);
|
|
|
|
let otel_event_manager = OtelEventManager::new(
|
|
conversation_id,
|
|
config.model.as_str(),
|
|
model_family.slug.as_str(),
|
|
None,
|
|
Some("test@test.com".to_string()),
|
|
Some(auth_mode),
|
|
false,
|
|
"test".to_string(),
|
|
);
|
|
|
|
let client = ModelClient::new(
|
|
Arc::clone(&config),
|
|
None,
|
|
model_family,
|
|
otel_event_manager,
|
|
provider,
|
|
effort,
|
|
summary,
|
|
conversation_id,
|
|
SessionSource::SubAgent(codex_protocol::protocol::SubAgentSource::Other(
|
|
"my-task".to_string(),
|
|
)),
|
|
);
|
|
|
|
let mut prompt = Prompt::default();
|
|
prompt.input = vec![ResponseItem::Message {
|
|
id: None,
|
|
role: "user".into(),
|
|
content: vec![ContentItem::InputText {
|
|
text: "hello".into(),
|
|
}],
|
|
}];
|
|
|
|
let mut stream = client.stream(&prompt).await.expect("stream failed");
|
|
while let Some(event) = stream.next().await {
|
|
if matches!(event, Ok(ResponseEvent::Completed { .. })) {
|
|
break;
|
|
}
|
|
}
|
|
|
|
let request = request_recorder.single_request();
|
|
assert_eq!(
|
|
request.header("x-openai-subagent").as_deref(),
|
|
Some("my-task")
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn responses_respects_model_family_overrides_from_config() {
|
|
core_test_support::skip_if_no_network!();
|
|
|
|
let server = responses::start_mock_server().await;
|
|
let response_body = responses::sse(vec![
|
|
responses::ev_response_created("resp-1"),
|
|
responses::ev_completed("resp-1"),
|
|
]);
|
|
|
|
let request_recorder = responses::mount_sse_once(&server, response_body).await;
|
|
|
|
let provider = ModelProviderInfo {
|
|
name: "mock".into(),
|
|
base_url: Some(format!("{}/v1", server.uri())),
|
|
env_key: None,
|
|
env_key_instructions: None,
|
|
experimental_bearer_token: None,
|
|
wire_api: WireApi::Responses,
|
|
query_params: None,
|
|
http_headers: None,
|
|
env_http_headers: None,
|
|
request_max_retries: Some(0),
|
|
stream_max_retries: Some(0),
|
|
stream_idle_timeout_ms: Some(5_000),
|
|
requires_openai_auth: false,
|
|
};
|
|
|
|
let codex_home = TempDir::new().expect("failed to create TempDir");
|
|
let mut config = load_default_config_for_test(&codex_home);
|
|
config.model = "gpt-3.5-turbo".to_string();
|
|
config.model_provider_id = provider.name.clone();
|
|
config.model_provider = provider.clone();
|
|
config.model_supports_reasoning_summaries = Some(true);
|
|
config.model_reasoning_summary_format = Some(ReasoningSummaryFormat::Experimental);
|
|
config.model_reasoning_summary = ReasoningSummary::Detailed;
|
|
let effort = config.model_reasoning_effort;
|
|
let summary = config.model_reasoning_summary;
|
|
let config = Arc::new(config);
|
|
|
|
let conversation_id = ConversationId::new();
|
|
let auth_mode =
|
|
AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")).get_auth_mode();
|
|
let model_family = ModelsManager::construct_model_family_offline(&config.model, &config);
|
|
let otel_event_manager = OtelEventManager::new(
|
|
conversation_id,
|
|
config.model.as_str(),
|
|
model_family.slug.as_str(),
|
|
None,
|
|
Some("test@test.com".to_string()),
|
|
auth_mode,
|
|
false,
|
|
"test".to_string(),
|
|
);
|
|
|
|
let client = ModelClient::new(
|
|
Arc::clone(&config),
|
|
None,
|
|
model_family,
|
|
otel_event_manager,
|
|
provider,
|
|
effort,
|
|
summary,
|
|
conversation_id,
|
|
SessionSource::SubAgent(codex_protocol::protocol::SubAgentSource::Other(
|
|
"override-check".to_string(),
|
|
)),
|
|
);
|
|
|
|
let mut prompt = Prompt::default();
|
|
prompt.input = vec![ResponseItem::Message {
|
|
id: None,
|
|
role: "user".into(),
|
|
content: vec![ContentItem::InputText {
|
|
text: "hello".into(),
|
|
}],
|
|
}];
|
|
|
|
let mut stream = client.stream(&prompt).await.expect("stream failed");
|
|
while let Some(event) = stream.next().await {
|
|
if matches!(event, Ok(ResponseEvent::Completed { .. })) {
|
|
break;
|
|
}
|
|
}
|
|
|
|
let request = request_recorder.single_request();
|
|
let body = request.body_json();
|
|
let reasoning = body
|
|
.get("reasoning")
|
|
.and_then(|value| value.as_object())
|
|
.cloned();
|
|
|
|
assert!(
|
|
reasoning.is_some(),
|
|
"reasoning should be present when config enables summaries"
|
|
);
|
|
|
|
assert_eq!(
|
|
reasoning
|
|
.as_ref()
|
|
.and_then(|value| value.get("summary"))
|
|
.and_then(|value| value.as_str()),
|
|
Some("detailed")
|
|
);
|
|
}
|