Update tests to stop using sse_completed fixture (#10638)

Summary:
- replace the `sse_completed` fixture and related JSON template with
direct `responses::ev_completed` payload builders
- cascade the new SSE helpers through all affected core tests for
consistency and clarity
- remove legacy fixtures that were no longer needed once the helpers are
in place

Testing:
- Not run (not requested)
This commit is contained in:
pakrym-oai
2026-02-04 08:38:06 -08:00
committed by GitHub
parent 583e5d4f41
commit 0efd33f7f4
14 changed files with 428 additions and 194 deletions

View File

@@ -36,8 +36,9 @@ use codex_protocol::models::WebSearchAction;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
use core_test_support::responses::ev_completed;
use core_test_support::responses::ev_completed_with_tokens;
use core_test_support::responses::ev_response_created;
use core_test_support::responses::mount_sse_once;
use core_test_support::responses::mount_sse_once_match;
use core_test_support::responses::mount_sse_sequence;
@@ -64,11 +65,6 @@ use wiremock::matchers::method;
use wiremock::matchers::path;
use wiremock::matchers::query_param;
/// Build minimal SSE stream with completed marker using the JSON fixture.
fn sse_completed(id: &str) -> String {
load_sse_fixture_with_id("../fixtures/completed_template.json", id)
}
#[expect(clippy::unwrap_used)]
fn assert_message_role(request_body: &serde_json::Value, role: &str) {
assert_eq!(request_body["role"].as_str().unwrap(), role);
@@ -259,7 +255,11 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
// Mock server that will receive the resumed request
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
// Configure Codex to resume from our file
let codex_home = Arc::new(TempDir::new().unwrap());
@@ -377,7 +377,11 @@ async fn includes_conversation_id_and_model_headers_in_request() {
// Mock server
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let mut builder = test_codex().with_auth(CodexAuth::from_api_key("Test API Key"));
let test = builder
@@ -418,7 +422,11 @@ async fn includes_base_instructions_override_in_request() {
skip_if_no_network!();
// Mock server
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let mut builder = test_codex()
.with_auth(CodexAuth::from_api_key("Test API Key"))
@@ -462,7 +470,11 @@ async fn chatgpt_auth_sends_correct_request() {
// Mock server
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let mut model_provider = built_in_model_providers()["openai"].clone();
model_provider.base_url = Some(format!("{}/api/codex", server.uri()));
@@ -524,7 +536,10 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
.set_body_raw(
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
"text/event-stream",
);
// Expect API key header, no ChatGPT account header required.
Mock::given(method("POST"))
@@ -590,7 +605,11 @@ async fn includes_user_instructions_message_in_request() {
skip_if_no_network!();
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let mut builder = test_codex()
.with_auth(CodexAuth::from_api_key("Test API Key"))
@@ -652,7 +671,11 @@ async fn skills_append_to_instructions() {
skip_if_no_network!();
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let codex_home = Arc::new(TempDir::new().unwrap());
let skill_dir = codex_home.path().join("skills/demo");
@@ -720,7 +743,11 @@ async fn includes_configured_effort_in_request() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex()
.with_model("gpt-5.1-codex")
.with_config(|config| {
@@ -761,7 +788,11 @@ async fn includes_no_effort_in_request() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex()
.with_model("gpt-5.1-codex")
.build(&server)
@@ -800,7 +831,11 @@ async fn includes_default_reasoning_effort_in_request_when_defined_by_model_info
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex().with_model("gpt-5.1").build(&server).await?;
codex
@@ -835,7 +870,11 @@ async fn user_turn_collaboration_mode_overrides_model_and_effort() -> anyhow::Re
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex {
codex,
config,
@@ -893,7 +932,11 @@ async fn configured_reasoning_summary_is_sent() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex()
.with_config(|config| {
config.model_reasoning_summary = ReasoningSummary::Concise;
@@ -933,7 +976,11 @@ async fn reasoning_summary_is_omitted_when_disabled() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex()
.with_config(|config| {
config.model_reasoning_summary = ReasoningSummary::None;
@@ -972,7 +1019,11 @@ async fn includes_default_verbosity_in_request() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex().with_model("gpt-5.1").build(&server).await?;
codex
@@ -1007,7 +1058,11 @@ async fn configured_verbosity_not_sent_for_models_without_support() -> anyhow::R
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex()
.with_model("gpt-5.1-codex")
.with_config(|config| {
@@ -1047,7 +1102,11 @@ async fn configured_verbosity_is_sent() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex()
.with_model("gpt-5.1")
.with_config(|config| {
@@ -1088,7 +1147,11 @@ async fn includes_developer_instructions_message_in_request() {
skip_if_no_network!();
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let mut builder = test_codex()
.with_auth(CodexAuth::from_api_key("Test API Key"))
.with_config(|config| {
@@ -1570,7 +1633,10 @@ async fn context_window_error_sets_total_tokens_to_model_window() -> anyhow::Res
mount_sse_once_match(
&server,
body_string_contains("seed turn"),
sse_completed("resp_seed"),
sse(vec![
ev_response_created("resp_seed"),
ev_completed("resp_seed"),
]),
)
.await;
@@ -1656,7 +1722,10 @@ async fn azure_overrides_assign_properties_used_for_responses_url() {
// First request must NOT include `previous_response_id`.
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
.set_body_raw(
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
"text/event-stream",
);
// Expect POST to /openai/responses with api-version query param
Mock::given(method("POST"))
@@ -1737,7 +1806,10 @@ async fn env_var_overrides_loaded_auth() {
// First request must NOT include `previous_response_id`.
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
.set_body_raw(
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
"text/event-stream",
);
// Expect POST to /openai/responses with api-version query param
Mock::given(method("POST"))