Merge upstream/main into subagent inbox injection

This commit is contained in:
Friel
2026-04-02 07:49:01 +00:00
487 changed files with 35270 additions and 25966 deletions

View File

@@ -16,9 +16,9 @@ use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadStartedNotification;
use codex_app_server_protocol::ThreadStatus;
use codex_app_server_protocol::ThreadStatusChangedNotification;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::auth::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR;
use codex_core::config::set_project_trust_level;
use codex_login::AuthCredentialsStoreMode;
use codex_login::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
@@ -26,6 +26,7 @@ use pretty_assertions::assert_eq;
use serde_json::Value;
use serde_json::json;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
use wiremock::Mock;
@@ -34,6 +35,11 @@ use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
use super::analytics::assert_basic_thread_initialized_event;
use super::analytics::enable_analytics_capture;
use super::analytics::thread_initialized_event;
use super::analytics::wait_for_analytics_payload;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
@@ -153,6 +159,73 @@ async fn thread_start_creates_thread_and_emits_started() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_start_tracks_thread_initialized_analytics() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml_with_chatgpt_base_url(
codex_home.path(),
&server.uri(),
&server.uri(),
/*general_analytics_enabled*/ true,
)?;
enable_analytics_capture(&server, codex_home.path()).await?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let req_id = mcp
.send_thread_start_request(ThreadStartParams::default())
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(resp)?;
let payload = wait_for_analytics_payload(&server, DEFAULT_READ_TIMEOUT).await?;
assert_eq!(payload["events"].as_array().expect("events array").len(), 1);
let event = thread_initialized_event(&payload)?;
assert_basic_thread_initialized_event(event, &thread.id, "mock-model", "new");
Ok(())
}
#[tokio::test]
async fn thread_start_does_not_track_thread_initialized_analytics_without_feature() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml_with_chatgpt_base_url(
codex_home.path(),
&server.uri(),
&server.uri(),
/*general_analytics_enabled*/ false,
)?;
enable_analytics_capture(&server, codex_home.path()).await?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let req_id = mcp
.send_thread_start_request(ThreadStartParams::default())
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let _ = to_response::<ThreadStartResponse>(resp)?;
let payload = wait_for_analytics_payload(&server, Duration::from_millis(250)).await;
assert!(
payload.is_err(),
"thread analytics should be gated off when general_analytics is disabled"
);
Ok(())
}
#[tokio::test]
async fn thread_start_respects_project_config_from_cwd() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
@@ -460,6 +533,7 @@ async fn thread_start_surfaces_cloud_requirements_load_errors() -> Result<()> {
codex_home.path(),
&model_server.uri(),
&chatgpt_base_url,
/*general_analytics_enabled*/ false,
)?;
write_chatgpt_auth(
codex_home.path(),
@@ -543,7 +617,13 @@ fn create_config_toml_with_chatgpt_base_url(
codex_home: &Path,
server_uri: &str,
chatgpt_base_url: &str,
general_analytics_enabled: bool,
) -> std::io::Result<()> {
let general_analytics_toml = if general_analytics_enabled {
"\n[features]\ngeneral_analytics = true\n".to_string()
} else {
String::new()
};
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
@@ -555,6 +635,7 @@ sandbox_mode = "read-only"
chatgpt_base_url = "{chatgpt_base_url}"
model_provider = "mock_provider"
{general_analytics_toml}
[model_providers.mock_provider]
name = "Mock provider for test"