Merge commit '4fd857d8567b3aa55940c022abe611ce1edec332' into dev/friel/collab-stack

This commit is contained in:
Friel
2026-04-01 00:03:27 +00:00
151 changed files with 7739 additions and 2841 deletions

View File

@@ -71,9 +71,11 @@ use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
#[cfg(windows)]
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(25);
#[cfg(not(windows))]
use super::analytics::assert_basic_thread_initialized_event;
use super::analytics::enable_analytics_capture;
use super::analytics::thread_initialized_event;
use super::analytics::wait_for_analytics_payload;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const CODEX_5_2_INSTRUCTIONS_TEMPLATE_DEFAULT: &str = "You are Codex, a coding agent based on GPT-5. You and the user share the same workspace and collaborate to achieve the user's goals.";
@@ -124,6 +126,51 @@ async fn thread_resume_rejects_unmaterialized_thread() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_resume_tracks_thread_initialized_analytics() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml_with_chatgpt_base_url(
codex_home.path(),
&server.uri(),
&server.uri(),
/*general_analytics_enabled*/ true,
)?;
enable_analytics_capture(&server, codex_home.path()).await?;
let conversation_id = create_fake_rollout_with_text_elements(
codex_home.path(),
"2025-01-05T12-00-00",
"2025-01-05T12:00:00Z",
"Saved user message",
Vec::new(),
Some("mock_provider"),
/*git_info*/ None,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: conversation_id,
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let ThreadResumeResponse { thread, .. } = to_response::<ThreadResumeResponse>(resume_resp)?;
let payload = wait_for_analytics_payload(&server, DEFAULT_READ_TIMEOUT).await?;
let event = thread_initialized_event(&payload)?;
assert_basic_thread_initialized_event(event, &thread.id, "gpt-5.2-codex", "resumed");
Ok(())
}
#[tokio::test]
async fn thread_resume_returns_rollout_history() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
@@ -1514,6 +1561,7 @@ async fn thread_resume_surfaces_cloud_requirements_load_errors() -> Result<()> {
codex_home.path(),
&model_server.uri(),
&chatgpt_base_url,
/*general_analytics_enabled*/ false,
)?;
write_chatgpt_auth(
codex_home.path(),
@@ -1892,6 +1940,7 @@ model_provider = "mock_provider"
[features]
personality = true
general_analytics = true
[model_providers.mock_provider]
name = "Mock provider for test"
@@ -1908,7 +1957,13 @@ fn create_config_toml_with_chatgpt_base_url(
codex_home: &std::path::Path,
server_uri: &str,
chatgpt_base_url: &str,
general_analytics_enabled: bool,
) -> std::io::Result<()> {
let general_analytics_toml = if general_analytics_enabled {
"\ngeneral_analytics = true".to_string()
} else {
String::new()
};
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
@@ -1923,6 +1978,7 @@ model_provider = "mock_provider"
[features]
personality = true
{general_analytics_toml}
[model_providers.mock_provider]
name = "Mock provider for test"