Migrate prompt caching tests to test_codex (#6605)

To hopefully fix the flakiness
This commit is contained in:
pakrym-oai
2025-11-13 09:19:38 -08:00
committed by GitHub
parent e6995174c1
commit 041d6ad902
2 changed files with 145 additions and 350 deletions

View File

@@ -71,6 +71,7 @@ impl TestCodexBuilder {
resume_from: Option<PathBuf>,
) -> anyhow::Result<TestCodex> {
let (config, cwd) = self.prepare_config(server, &home).await?;
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let new_conversation = match resume_from {
@@ -79,15 +80,20 @@ impl TestCodexBuilder {
CodexAuth::from_api_key("dummy"),
);
conversation_manager
.resume_conversation_from_rollout(config, path, auth_manager)
.resume_conversation_from_rollout(config.clone(), path, auth_manager)
.await?
}
None => {
conversation_manager
.new_conversation(config.clone())
.await?
}
None => conversation_manager.new_conversation(config).await?,
};
Ok(TestCodex {
home,
cwd,
config,
codex: new_conversation.conversation,
session_configured: new_conversation.session_configured,
})
@@ -131,6 +137,7 @@ pub struct TestCodex {
pub cwd: Arc<TempDir>,
pub codex: Arc<CodexConversation>,
pub session_configured: SessionConfiguredEvent,
pub config: Config,
}
impl TestCodex {

View File

@@ -1,9 +1,5 @@
#![allow(clippy::unwrap_used)]
use codex_core::CodexAuth;
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::built_in_model_providers;
use codex_core::config::OPENAI_DEFAULT_MODEL;
use codex_core::features::Feature;
use codex_core::model_family::find_family_for_model;
@@ -16,17 +12,16 @@ use codex_core::protocol_config_types::ReasoningSummary;
use codex_core::shell::Shell;
use codex_core::shell::default_user_shell;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
use core_test_support::responses::mount_sse_once_match;
use core_test_support::responses::start_mock_server;
use core_test_support::skip_if_no_network;
use core_test_support::test_codex::TestCodex;
use core_test_support::test_codex::test_codex;
use core_test_support::wait_for_event;
use std::collections::HashMap;
use tempfile::TempDir;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
use wiremock::matchers::any;
fn text_user_input(text: String) -> serde_json::Value {
serde_json::json!({
@@ -70,48 +65,24 @@ fn assert_tool_names(body: &serde_json::Value, expected_names: &[&str]) {
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn codex_mini_latest_tools() {
skip_if_no_network!();
async fn codex_mini_latest_tools() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
let server = start_mock_server().await;
let req1 = mount_sse_once_match(&server, any(), sse_completed("resp-1")).await;
let req2 = mount_sse_once_match(&server, any(), sse_completed("resp-2")).await;
let sse = sse_completed("resp");
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream");
// Expect two POSTs to /v1/responses
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(template)
.expect(2)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let cwd = TempDir::new().unwrap();
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
config.features.disable(Feature::ApplyPatchFreeform);
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
config.model = "codex-mini-latest".to_string();
config.model_family = find_family_for_model("codex-mini-latest").unwrap();
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation")
.conversation;
let TestCodex { codex, .. } = test_codex()
.with_config(|config| {
config.user_instructions = Some("be consistent and helpful".to_string());
config.features.disable(Feature::ApplyPatchFreeform);
config.model = "codex-mini-latest".to_string();
config.model_family = find_family_for_model("codex-mini-latest")
.expect("model family for codex-mini-latest");
})
.build(&server)
.await?;
codex
.submit(Op::UserInput {
@@ -119,8 +90,7 @@ async fn codex_mini_latest_tools() {
text: "hello 1".into(),
}],
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex
@@ -129,72 +99,45 @@ async fn codex_mini_latest_tools() {
text: "hello 2".into(),
}],
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2, "expected two POST requests");
let expected_instructions = [
include_str!("../../prompt.md"),
include_str!("../../../apply-patch/apply_patch_tool_instructions.md"),
]
.join("\n");
let body0 = requests[0].body_json::<serde_json::Value>().unwrap();
let body0 = req1.single_request().body_json();
assert_eq!(
body0["instructions"],
serde_json::json!(expected_instructions),
);
let body1 = requests[1].body_json::<serde_json::Value>().unwrap();
let body1 = req2.single_request().body_json();
assert_eq!(
body1["instructions"],
serde_json::json!(expected_instructions),
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn prompt_tools_are_consistent_across_requests() {
skip_if_no_network!();
async fn prompt_tools_are_consistent_across_requests() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
let server = start_mock_server().await;
let req1 = mount_sse_once_match(&server, any(), sse_completed("resp-1")).await;
let req2 = mount_sse_once_match(&server, any(), sse_completed("resp-2")).await;
let sse = sse_completed("resp");
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream");
// Expect two POSTs to /v1/responses
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(template)
.expect(2)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let cwd = TempDir::new().unwrap();
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let TestCodex { codex, config, .. } = test_codex()
.with_config(|config| {
config.user_instructions = Some("be consistent and helpful".to_string());
})
.build(&server)
.await?;
let base_instructions = config.model_family.base_instructions.clone();
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation")
.conversation;
codex
.submit(Op::UserInput {
@@ -202,8 +145,7 @@ async fn prompt_tools_are_consistent_across_requests() {
text: "hello 1".into(),
}],
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex
@@ -212,13 +154,9 @@ async fn prompt_tools_are_consistent_across_requests() {
text: "hello 2".into(),
}],
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2, "expected two POST requests");
// our internal implementation is responsible for keeping tools in sync
// with the OpenAI schema, so we just verify the tool presence here
let tools_by_model: HashMap<&'static str, Vec<&'static str>> = HashMap::from([
@@ -274,7 +212,7 @@ async fn prompt_tools_are_consistent_across_requests() {
.get(OPENAI_DEFAULT_MODEL)
.unwrap_or_else(|| panic!("expected tools to be defined for model {OPENAI_DEFAULT_MODEL}"))
.as_slice();
let body0 = requests[0].body_json::<serde_json::Value>().unwrap();
let body0 = req1.single_request().body_json();
let expected_instructions = if expected_tools_names.contains(&"apply_patch") {
base_instructions
@@ -292,54 +230,32 @@ async fn prompt_tools_are_consistent_across_requests() {
);
assert_tool_names(&body0, expected_tools_names);
let body1 = requests[1].body_json::<serde_json::Value>().unwrap();
let body1 = req2.single_request().body_json();
assert_eq!(
body1["instructions"],
serde_json::json!(expected_instructions),
);
assert_tool_names(&body1, expected_tools_names);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
#[ignore = "flaky on ubuntu-24.04-arm - aarch64-unknown-linux-gpu"]
async fn prefixes_context_and_instructions_once_and_consistently_across_requests() {
skip_if_no_network!();
async fn prefixes_context_and_instructions_once_and_consistently_across_requests()
-> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
let server = start_mock_server().await;
let req1 = mount_sse_once_match(&server, any(), sse_completed("resp-1")).await;
let req2 = mount_sse_once_match(&server, any(), sse_completed("resp-2")).await;
let sse = sse_completed("resp");
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream");
// Expect two POSTs to /v1/responses
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(template)
.expect(2)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let cwd = TempDir::new().unwrap();
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation")
.conversation;
let TestCodex { codex, config, .. } = test_codex()
.with_config(|config| {
config.user_instructions = Some("be consistent and helpful".to_string());
})
.build(&server)
.await?;
codex
.submit(Op::UserInput {
@@ -347,8 +263,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
text: "hello 1".into(),
}],
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex
@@ -357,31 +272,14 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
text: "hello 2".into(),
}],
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2, "expected two POST requests");
let shell = default_user_shell().await;
let expected_env_text = format!(
r#"<environment_context>
<cwd>{}</cwd>
<approval_policy>on-request</approval_policy>
<sandbox_mode>read-only</sandbox_mode>
<network_access>restricted</network_access>
{}</environment_context>"#,
cwd.path().to_string_lossy(),
match shell.name() {
Some(name) => format!(" <shell>{name}</shell>\n"),
None => String::new(),
}
);
let cwd_str = config.cwd.to_string_lossy();
let expected_env_text = default_env_context_str(&cwd_str, &shell);
let expected_ui_text = format!(
"# AGENTS.md instructions for {}\n\n<INSTRUCTIONS>\nbe consistent and helpful\n</INSTRUCTIONS>",
cwd.path().to_string_lossy()
"# AGENTS.md instructions for {cwd_str}\n\n<INSTRUCTIONS>\nbe consistent and helpful\n</INSTRUCTIONS>"
);
let expected_env_msg = serde_json::json!({
@@ -400,7 +298,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
"role": "user",
"content": [ { "type": "input_text", "text": "hello 1" } ]
});
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
let body1 = req1.single_request().body_json();
assert_eq!(
body1["input"],
serde_json::json!([expected_ui_msg, expected_env_msg, expected_user_message_1])
@@ -411,7 +309,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
"role": "user",
"content": [ { "type": "input_text", "text": "hello 2" } ]
});
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
let body2 = req2.single_request().body_json();
let expected_body2 = serde_json::json!(
[
body1["input"].as_array().unwrap().as_slice(),
@@ -420,47 +318,25 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
.concat()
);
assert_eq!(body2["input"], expected_body2);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
skip_if_no_network!();
async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
let server = start_mock_server().await;
let req1 = mount_sse_once_match(&server, any(), sse_completed("resp-1")).await;
let req2 = mount_sse_once_match(&server, any(), sse_completed("resp-2")).await;
let sse = sse_completed("resp");
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream");
// Expect two POSTs to /v1/responses
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(template)
.expect(2)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let cwd = TempDir::new().unwrap();
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation")
.conversation;
let TestCodex { codex, .. } = test_codex()
.with_config(|config| {
config.user_instructions = Some("be consistent and helpful".to_string());
})
.build(&server)
.await?;
// First turn
codex
@@ -469,8 +345,7 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
text: "hello 1".into(),
}],
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let writable = TempDir::new().unwrap();
@@ -488,8 +363,7 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
effort: Some(Some(ReasoningEffort::High)),
summary: Some(ReasoningSummary::Detailed),
})
.await
.unwrap();
.await?;
// Second turn after overrides
codex
@@ -498,16 +372,11 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
text: "hello 2".into(),
}],
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Verify we issued exactly two requests, and the cached prefix stayed identical.
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2, "expected two POST requests");
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
let body1 = req1.single_request().body_json();
let body2 = req2.single_request().body_json();
// prompt_cache_key should remain constant across overrides
assert_eq!(
body1["prompt_cache_key"], body2["prompt_cache_key"],
@@ -548,47 +417,25 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
.concat()
);
assert_eq!(body2["input"], expected_body2);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
skip_if_no_network!();
async fn per_turn_overrides_keep_cached_prefix_and_key_constant() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
let server = start_mock_server().await;
let req1 = mount_sse_once_match(&server, any(), sse_completed("resp-1")).await;
let req2 = mount_sse_once_match(&server, any(), sse_completed("resp-2")).await;
let sse = sse_completed("resp");
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream");
// Expect two POSTs to /v1/responses
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(template)
.expect(2)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let cwd = TempDir::new().unwrap();
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation")
.conversation;
let TestCodex { codex, .. } = test_codex()
.with_config(|config| {
config.user_instructions = Some("be consistent and helpful".to_string());
})
.build(&server)
.await?;
// First turn
codex
@@ -597,8 +444,7 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
text: "hello 1".into(),
}],
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Second turn using per-turn overrides via UserTurn
@@ -622,16 +468,11 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
summary: ReasoningSummary::Detailed,
final_output_json_schema: None,
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Verify we issued exactly two requests, and the cached prefix stayed identical.
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2, "expected two POST requests");
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
let body1 = req1.single_request().body_json();
let body2 = req2.single_request().body_json();
// prompt_cache_key should remain constant across per-turn overrides
assert_eq!(
@@ -672,38 +513,25 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
.concat()
);
assert_eq!(body2["input"], expected_body2);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn send_user_turn_with_no_changes_does_not_send_environment_context() {
skip_if_no_network!();
async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
let server = start_mock_server().await;
let req1 = mount_sse_once_match(&server, any(), sse_completed("resp-1")).await;
let req2 = mount_sse_once_match(&server, any(), sse_completed("resp-2")).await;
let sse = sse_completed("resp");
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream");
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(template)
.expect(2)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let cwd = TempDir::new().unwrap();
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
let TestCodex { codex, config, .. } = test_codex()
.with_config(|config| {
config.user_instructions = Some("be consistent and helpful".to_string());
})
.build(&server)
.await?;
let default_cwd = config.cwd.clone();
let default_approval_policy = config.approval_policy;
@@ -712,14 +540,6 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() {
let default_effort = config.model_reasoning_effort;
let default_summary = config.model_reasoning_summary;
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation")
.conversation;
codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
@@ -733,8 +553,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() {
summary: default_summary,
final_output_json_schema: None,
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex
@@ -750,27 +569,20 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() {
summary: default_summary,
final_output_json_schema: None,
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2, "expected two POST requests");
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
let body1 = req1.single_request().body_json();
let body2 = req2.single_request().body_json();
let shell = default_user_shell().await;
let default_cwd_lossy = default_cwd.to_string_lossy();
let expected_ui_text = format!(
"# AGENTS.md instructions for {}\n\n<INSTRUCTIONS>\nbe consistent and helpful\n</INSTRUCTIONS>",
default_cwd.to_string_lossy()
"# AGENTS.md instructions for {default_cwd_lossy}\n\n<INSTRUCTIONS>\nbe consistent and helpful\n</INSTRUCTIONS>"
);
let expected_ui_msg = text_user_input(expected_ui_text);
let expected_env_msg_1 = text_user_input(default_env_context_str(
&cwd.path().to_string_lossy(),
&shell,
));
let expected_env_msg_1 = text_user_input(default_env_context_str(&default_cwd_lossy, &shell));
let expected_user_message_1 = text_user_input("hello 1".to_string());
let expected_input_1 = serde_json::Value::Array(vec![
@@ -788,38 +600,25 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() {
expected_user_message_2,
]);
assert_eq!(body2["input"], expected_input_2);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn send_user_turn_with_changes_sends_environment_context() {
skip_if_no_network!();
async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
let server = start_mock_server().await;
let sse = sse_completed("resp");
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream");
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(template)
.expect(2)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let cwd = TempDir::new().unwrap();
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
let req1 = mount_sse_once_match(&server, any(), sse_completed("resp-1")).await;
let req2 = mount_sse_once_match(&server, any(), sse_completed("resp-2")).await;
let TestCodex { codex, config, .. } = test_codex()
.with_config(|config| {
config.user_instructions = Some("be consistent and helpful".to_string());
})
.build(&server)
.await?;
let default_cwd = config.cwd.clone();
let default_approval_policy = config.approval_policy;
@@ -828,14 +627,6 @@ async fn send_user_turn_with_changes_sends_environment_context() {
let default_effort = config.model_reasoning_effort;
let default_summary = config.model_reasoning_summary;
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let codex = conversation_manager
.new_conversation(config.clone())
.await
.expect("create new conversation")
.conversation;
codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
@@ -849,8 +640,7 @@ async fn send_user_turn_with_changes_sends_environment_context() {
summary: default_summary,
final_output_json_schema: None,
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
codex
@@ -866,15 +656,11 @@ async fn send_user_turn_with_changes_sends_environment_context() {
summary: ReasoningSummary::Detailed,
final_output_json_schema: None,
})
.await
.unwrap();
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2, "expected two POST requests");
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
let body1 = req1.single_request().body_json();
let body2 = req2.single_request().body_json();
let shell = default_user_shell().await;
let expected_ui_text = format!(
@@ -913,4 +699,6 @@ async fn send_user_turn_with_changes_sends_environment_context() {
expected_user_message_2,
]);
assert_eq!(body2["input"], expected_input_2);
Ok(())
}