mirror of
https://github.com/openai/codex.git
synced 2026-05-15 16:53:05 +00:00
## Summary This adds a stable Codex installation ID and includes it on Responses API requests via `x-codex-installation-id` passed in via the `client_metadata` field for analytics/debugging. The main pieces are: - persist a UUID in `$CODEX_HOME/installation_id` - thread the installation ID into `ModelClient` - send it in `client_metadata` on Responses requests so it works consistently across HTTP and WebSocket transports
172 lines
5.7 KiB
Rust
172 lines
5.7 KiB
Rust
use super::AuthRequestTelemetryContext;
|
|
use super::ModelClient;
|
|
use super::PendingUnauthorizedRetry;
|
|
use super::UnauthorizedRecoveryExecution;
|
|
use super::X_CODEX_INSTALLATION_ID_HEADER;
|
|
use super::X_CODEX_PARENT_THREAD_ID_HEADER;
|
|
use super::X_CODEX_TURN_METADATA_HEADER;
|
|
use super::X_CODEX_WINDOW_ID_HEADER;
|
|
use super::X_OPENAI_SUBAGENT_HEADER;
|
|
use codex_api::CoreAuthProvider;
|
|
use codex_app_server_protocol::AuthMode;
|
|
use codex_model_provider_info::WireApi;
|
|
use codex_model_provider_info::create_oss_provider_with_base_url;
|
|
use codex_otel::SessionTelemetry;
|
|
use codex_protocol::ThreadId;
|
|
use codex_protocol::openai_models::ModelInfo;
|
|
use codex_protocol::protocol::SessionSource;
|
|
use codex_protocol::protocol::SubAgentSource;
|
|
use pretty_assertions::assert_eq;
|
|
use serde_json::json;
|
|
|
|
fn test_model_client(session_source: SessionSource) -> ModelClient {
|
|
let provider = create_oss_provider_with_base_url("https://example.com/v1", WireApi::Responses);
|
|
ModelClient::new(
|
|
/*auth_manager*/ None,
|
|
ThreadId::new(),
|
|
/*installation_id*/ "11111111-1111-4111-8111-111111111111".to_string(),
|
|
provider,
|
|
session_source,
|
|
/*model_verbosity*/ None,
|
|
/*enable_request_compression*/ false,
|
|
/*include_timing_metrics*/ false,
|
|
/*beta_features_header*/ None,
|
|
)
|
|
}
|
|
|
|
fn test_model_info() -> ModelInfo {
|
|
serde_json::from_value(json!({
|
|
"slug": "gpt-test",
|
|
"display_name": "gpt-test",
|
|
"description": "desc",
|
|
"default_reasoning_level": "medium",
|
|
"supported_reasoning_levels": [
|
|
{"effort": "medium", "description": "medium"}
|
|
],
|
|
"shell_type": "shell_command",
|
|
"visibility": "list",
|
|
"supported_in_api": true,
|
|
"priority": 1,
|
|
"upgrade": null,
|
|
"base_instructions": "base instructions",
|
|
"model_messages": null,
|
|
"supports_reasoning_summaries": false,
|
|
"support_verbosity": false,
|
|
"default_verbosity": null,
|
|
"apply_patch_tool_type": null,
|
|
"truncation_policy": {"mode": "bytes", "limit": 10000},
|
|
"supports_parallel_tool_calls": false,
|
|
"supports_image_detail_original": false,
|
|
"context_window": 272000,
|
|
"auto_compact_token_limit": null,
|
|
"experimental_supported_tools": []
|
|
}))
|
|
.expect("deserialize test model info")
|
|
}
|
|
|
|
fn test_session_telemetry() -> SessionTelemetry {
|
|
SessionTelemetry::new(
|
|
ThreadId::new(),
|
|
"gpt-test",
|
|
"gpt-test",
|
|
/*account_id*/ None,
|
|
/*account_email*/ None,
|
|
/*auth_mode*/ None,
|
|
"test-originator".to_string(),
|
|
/*log_user_prompts*/ false,
|
|
"test-terminal".to_string(),
|
|
SessionSource::Cli,
|
|
)
|
|
}
|
|
|
|
#[test]
|
|
fn build_subagent_headers_sets_other_subagent_label() {
|
|
let client = test_model_client(SessionSource::SubAgent(SubAgentSource::Other(
|
|
"memory_consolidation".to_string(),
|
|
)));
|
|
let headers = client.build_subagent_headers();
|
|
let value = headers
|
|
.get(X_OPENAI_SUBAGENT_HEADER)
|
|
.and_then(|value| value.to_str().ok());
|
|
assert_eq!(value, Some("memory_consolidation"));
|
|
}
|
|
|
|
#[test]
|
|
fn build_ws_client_metadata_includes_window_lineage_and_turn_metadata() {
|
|
let parent_thread_id = ThreadId::new();
|
|
let client = test_model_client(SessionSource::SubAgent(SubAgentSource::ThreadSpawn {
|
|
parent_thread_id,
|
|
depth: 2,
|
|
agent_path: None,
|
|
agent_nickname: None,
|
|
agent_role: None,
|
|
}));
|
|
|
|
client.advance_window_generation();
|
|
|
|
let client_metadata = client.build_ws_client_metadata(Some(r#"{"turn_id":"turn-123"}"#));
|
|
let conversation_id = client.state.conversation_id;
|
|
assert_eq!(
|
|
client_metadata,
|
|
std::collections::HashMap::from([
|
|
(
|
|
X_CODEX_INSTALLATION_ID_HEADER.to_string(),
|
|
"11111111-1111-4111-8111-111111111111".to_string(),
|
|
),
|
|
(
|
|
X_CODEX_WINDOW_ID_HEADER.to_string(),
|
|
format!("{conversation_id}:1"),
|
|
),
|
|
(
|
|
X_OPENAI_SUBAGENT_HEADER.to_string(),
|
|
"collab_spawn".to_string(),
|
|
),
|
|
(
|
|
X_CODEX_PARENT_THREAD_ID_HEADER.to_string(),
|
|
parent_thread_id.to_string(),
|
|
),
|
|
(
|
|
X_CODEX_TURN_METADATA_HEADER.to_string(),
|
|
r#"{"turn_id":"turn-123"}"#.to_string(),
|
|
),
|
|
])
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn summarize_memories_returns_empty_for_empty_input() {
|
|
let client = test_model_client(SessionSource::Cli);
|
|
let model_info = test_model_info();
|
|
let session_telemetry = test_session_telemetry();
|
|
|
|
let output = client
|
|
.summarize_memories(
|
|
Vec::new(),
|
|
&model_info,
|
|
/*effort*/ None,
|
|
&session_telemetry,
|
|
)
|
|
.await
|
|
.expect("empty summarize request should succeed");
|
|
assert_eq!(output.len(), 0);
|
|
}
|
|
|
|
#[test]
|
|
fn auth_request_telemetry_context_tracks_attached_auth_and_retry_phase() {
|
|
let auth_context = AuthRequestTelemetryContext::new(
|
|
Some(AuthMode::Chatgpt),
|
|
&CoreAuthProvider::for_test(Some("access-token"), Some("workspace-123")),
|
|
PendingUnauthorizedRetry::from_recovery(UnauthorizedRecoveryExecution {
|
|
mode: "managed",
|
|
phase: "refresh_token",
|
|
}),
|
|
);
|
|
|
|
assert_eq!(auth_context.auth_mode, Some("Chatgpt"));
|
|
assert!(auth_context.auth_header_attached);
|
|
assert_eq!(auth_context.auth_header_name, Some("authorization"));
|
|
assert!(auth_context.retry_after_unauthorized);
|
|
assert_eq!(auth_context.recovery_mode, Some("managed"));
|
|
assert_eq!(auth_context.recovery_phase, Some("refresh_token"));
|
|
}
|