mirror of
https://github.com/openai/codex.git
synced 2026-04-30 01:16:54 +00:00
feat(app-server): propagate traces across tasks and core ops (#14387)
## Summary This PR keeps app-server RPC request trace context alive for the full lifetime of the work that request kicks off (e.g. for `thread/start`, this is `app-server rpc handler -> tokio background task -> core op submissions`). Previously we lose trace lineage once the request handler returns or hands work off to background tasks. This approach is especially relevant for `thread/start` and other RPC handlers that run in a non-blocking way. In the near future we'll most likely want to make all app-server handlers run in a non-blocking way by default, and only queue operations that must operate in order (e.g. thread RPCs per thread?), so we want to make sure tracing in app-server just generally works. Depends on https://github.com/openai/codex/pull/14300 **Before** <img width="155" height="207" alt="image" src="https://github.com/user-attachments/assets/c9487459-36f1-436c-beb7-fafeb40737af" /> **After** <img width="299" height="337" alt="image" src="https://github.com/user-attachments/assets/727392b2-d072-4427-9dc4-0502d8652dea" /> ## What changed - Keep request-scoped trace context around until we send the final response or error, or the connection closes. - Thread that trace context through detached `thread/start` work so background startup stays attached to the originating request. - Pass request trace context through to downstream core operations, including: - thread creation - resume/fork flows - turn submission - review - interrupt - realtime conversation operations - Add tracing tests that verify: - remote W3C trace context is preserved for `thread/start` - remote W3C trace context is preserved for `turn/start` - downstream core spans stay under the originating request span - request-scoped tracing state is cleaned up correctly - Clean up shutdown behavior so detached background tasks and spawned threads are drained before process exit.
This commit is contained in:
@@ -687,7 +687,7 @@ async fn resume_conversation(
|
||||
let auth_manager = codex_core::test_support::auth_manager_from_auth(
|
||||
codex_core::CodexAuth::from_api_key("dummy"),
|
||||
);
|
||||
Box::pin(manager.resume_thread_from_rollout(config.clone(), path, auth_manager))
|
||||
Box::pin(manager.resume_thread_from_rollout(config.clone(), path, auth_manager, None))
|
||||
.await
|
||||
.expect("resume conversation")
|
||||
.thread
|
||||
@@ -700,7 +700,7 @@ async fn fork_thread(
|
||||
path: std::path::PathBuf,
|
||||
nth_user_message: usize,
|
||||
) -> Arc<CodexThread> {
|
||||
Box::pin(manager.fork_thread(nth_user_message, config.clone(), path, false))
|
||||
Box::pin(manager.fork_thread(nth_user_message, config.clone(), path, false, None))
|
||||
.await
|
||||
.expect("fork conversation")
|
||||
.thread
|
||||
|
||||
@@ -110,7 +110,7 @@ async fn fork_thread_twice_drops_to_first_message() {
|
||||
thread: codex_fork1,
|
||||
..
|
||||
} = thread_manager
|
||||
.fork_thread(1, config_for_fork.clone(), base_path.clone(), false)
|
||||
.fork_thread(1, config_for_fork.clone(), base_path.clone(), false, None)
|
||||
.await
|
||||
.expect("fork 1");
|
||||
|
||||
@@ -129,7 +129,7 @@ async fn fork_thread_twice_drops_to_first_message() {
|
||||
thread: codex_fork2,
|
||||
..
|
||||
} = thread_manager
|
||||
.fork_thread(0, config_for_fork.clone(), fork1_path.clone(), false)
|
||||
.fork_thread(0, config_for_fork.clone(), fork1_path.clone(), false, None)
|
||||
.await
|
||||
.expect("fork 2");
|
||||
|
||||
|
||||
@@ -416,7 +416,7 @@ async fn resume_and_fork_append_permissions_messages() -> Result<()> {
|
||||
fork_config.permissions.approval_policy = Constrained::allow_any(AskForApproval::UnlessTrusted);
|
||||
let forked = initial
|
||||
.thread_manager
|
||||
.fork_thread(usize::MAX, fork_config, rollout_path, false)
|
||||
.fork_thread(usize::MAX, fork_config, rollout_path, false, None)
|
||||
.await?;
|
||||
forked
|
||||
.thread
|
||||
|
||||
@@ -98,7 +98,7 @@ async fn emits_warning_when_resumed_model_differs() {
|
||||
thread: conversation,
|
||||
..
|
||||
} = thread_manager
|
||||
.resume_thread_with_history(config, initial_history, auth_manager, false)
|
||||
.resume_thread_with_history(config, initial_history, auth_manager, false, None)
|
||||
.await
|
||||
.expect("resume conversation");
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ async fn emits_warning_when_unstable_features_enabled_via_config() {
|
||||
thread: conversation,
|
||||
..
|
||||
} = thread_manager
|
||||
.resume_thread_with_history(config, InitialHistory::New, auth_manager, false)
|
||||
.resume_thread_with_history(config, InitialHistory::New, auth_manager, false, None)
|
||||
.await
|
||||
.expect("spawn conversation");
|
||||
|
||||
@@ -83,7 +83,7 @@ async fn suppresses_warning_when_configured() {
|
||||
thread: conversation,
|
||||
..
|
||||
} = thread_manager
|
||||
.resume_thread_with_history(config, InitialHistory::New, auth_manager, false)
|
||||
.resume_thread_with_history(config, InitialHistory::New, auth_manager, false, None)
|
||||
.await
|
||||
.expect("spawn conversation");
|
||||
|
||||
|
||||
Reference in New Issue
Block a user