mirror of
https://github.com/openai/codex.git
synced 2026-04-29 08:56:38 +00:00
[codex-analytics] feature plumbing and emittance (#16640)
--- [//]: # (BEGIN SAPLING FOOTER) Stack created with [Sapling](https://sapling-scm.com). Best reviewed with [ReviewStack](https://reviewstack.dev/openai/codex/pull/16640). * #16870 * #16706 * #16641 * __->__ #16640
This commit is contained in:
@@ -1,4 +1,5 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::DEFAULT_CLIENT_NAME;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_apply_patch_sse_response;
|
||||
use app_test_support::create_exec_command_sse_response;
|
||||
@@ -9,6 +10,7 @@ use app_test_support::create_mock_responses_server_sequence_unchecked;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::format_with_current_shell_display;
|
||||
use app_test_support::to_response;
|
||||
use app_test_support::write_mock_responses_config_toml_with_chatgpt_base_url;
|
||||
use codex_app_server::INPUT_TOO_LARGE_ERROR_CODE;
|
||||
use codex_app_server::INVALID_PARAMS_ERROR_CODE;
|
||||
use codex_app_server_protocol::ByteRange;
|
||||
@@ -64,6 +66,10 @@ use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
use super::analytics::enable_analytics_capture;
|
||||
use super::analytics::mount_analytics_capture;
|
||||
use super::analytics::wait_for_analytics_event;
|
||||
|
||||
#[cfg(windows)]
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(25);
|
||||
#[cfg(not(windows))]
|
||||
@@ -328,6 +334,163 @@ async fn thread_start_omits_empty_instruction_overrides_from_model_request() ->
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_tracks_turn_event_analytics() -> Result<()> {
|
||||
let responses = vec![create_final_assistant_message_sse_response("Done")?];
|
||||
let server = create_mock_responses_server_sequence_unchecked(responses).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
write_mock_responses_config_toml_with_chatgpt_base_url(
|
||||
codex_home.path(),
|
||||
&server.uri(),
|
||||
&server.uri(),
|
||||
)?;
|
||||
enable_analytics_capture(&server, codex_home.path()).await?;
|
||||
|
||||
let mut mcp = McpProcess::new_without_managed_config(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("mock-model".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let thread_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
|
||||
|
||||
let turn_req = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Image {
|
||||
url: "https://example.com/a.png".to_string(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
|
||||
)
|
||||
.await??;
|
||||
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let event = wait_for_analytics_event(&server, DEFAULT_READ_TIMEOUT, "codex_turn_event").await?;
|
||||
assert_eq!(event["event_params"]["thread_id"], thread.id);
|
||||
assert_eq!(event["event_params"]["turn_id"], turn.id);
|
||||
assert_eq!(
|
||||
event["event_params"]["app_server_client"]["product_client_id"],
|
||||
DEFAULT_CLIENT_NAME
|
||||
);
|
||||
assert_eq!(event["event_params"]["model"], "mock-model");
|
||||
assert_eq!(event["event_params"]["model_provider"], "mock_provider");
|
||||
assert_eq!(event["event_params"]["sandbox_policy"], "read_only");
|
||||
assert_eq!(event["event_params"]["ephemeral"], false);
|
||||
assert_eq!(event["event_params"]["thread_source"], "user");
|
||||
assert_eq!(event["event_params"]["initialization_mode"], "new");
|
||||
assert_eq!(
|
||||
event["event_params"]["subagent_source"],
|
||||
serde_json::Value::Null
|
||||
);
|
||||
assert_eq!(
|
||||
event["event_params"]["parent_thread_id"],
|
||||
serde_json::Value::Null
|
||||
);
|
||||
assert_eq!(event["event_params"]["num_input_images"], 1);
|
||||
assert_eq!(event["event_params"]["status"], "completed");
|
||||
assert!(event["event_params"]["started_at"].as_u64().is_some());
|
||||
assert!(event["event_params"]["completed_at"].as_u64().is_some());
|
||||
assert!(event["event_params"]["duration_ms"].as_u64().is_some());
|
||||
assert_eq!(event["event_params"]["input_tokens"], 0);
|
||||
assert_eq!(event["event_params"]["cached_input_tokens"], 0);
|
||||
assert_eq!(event["event_params"]["output_tokens"], 0);
|
||||
assert_eq!(event["event_params"]["reasoning_output_tokens"], 0);
|
||||
assert_eq!(event["event_params"]["total_tokens"], 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_does_not_track_turn_event_analytics_without_feature() -> Result<()> {
|
||||
let responses = vec![create_final_assistant_message_sse_response("Done")?];
|
||||
let server = create_mock_responses_server_sequence_unchecked(responses).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
write_mock_responses_config_toml_with_chatgpt_base_url(
|
||||
codex_home.path(),
|
||||
&server.uri(),
|
||||
&server.uri(),
|
||||
)?;
|
||||
let config_path = codex_home.path().join("config.toml");
|
||||
let config_toml = std::fs::read_to_string(&config_path)?;
|
||||
std::fs::write(
|
||||
&config_path,
|
||||
format!("{config_toml}\n[features]\ngeneral_analytics = false\n"),
|
||||
)?;
|
||||
mount_analytics_capture(&server, codex_home.path()).await?;
|
||||
|
||||
let mut mcp = McpProcess::new_without_managed_config(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("mock-model".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let thread_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
|
||||
|
||||
let turn_req = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id,
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "hello".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
|
||||
)
|
||||
.await??;
|
||||
let _ = to_response::<TurnStartResponse>(turn_resp)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let turn_event = wait_for_analytics_event(
|
||||
&server,
|
||||
std::time::Duration::from_millis(250),
|
||||
"codex_turn_event",
|
||||
)
|
||||
.await;
|
||||
assert!(
|
||||
turn_event.is_err(),
|
||||
"turn analytics should be gated off when general_analytics is disabled"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_accepts_text_at_limit_with_mention_item() -> Result<()> {
|
||||
let responses = vec![create_final_assistant_message_sse_response("Done")?];
|
||||
|
||||
Reference in New Issue
Block a user