Files
codex/codex-rs/core/tests/suite/window_headers.rs
neil-oai a92a5085bd Forward app-server turn clientMetadata to Responses (#16009)
## Summary
App-server v2 already receives turn-scoped `clientMetadata`, but the
Rust app-server was dropping it before the outbound Responses request.
This change keeps the fix lightweight by threading that metadata through
the existing turn-metadata path rather than inventing a new transport.

## What we're trying to do and why
We want turn-scoped metadata from the app-server protocol layer,
especially fields like Hermes/GAAS run IDs, to survive all the way to
the actual Responses API request so it is visible in downstream
websocket request logging and analytics.

The specific bug was:
- app-server protocol uses camelCase `clientMetadata`
- Responses transport already has an existing turn metadata carrier:
`x-codex-turn-metadata`
- websocket transport already rewrites that header into
`request.request_body.client_metadata["x-codex-turn-metadata"]`
- but the Rust app-server never parsed or stored `clientMetadata`, so
nothing from the app-server request was making it into that existing
path

This PR fixes that without adding a new header or a second metadata
channel.

## How we did it
### Protocol surface
- Add optional `clientMetadata` to v2 `TurnStartParams` and
`TurnSteerParams`
- Regenerate the JSON schema / TypeScript fixtures
- Update app-server docs to describe the field and its behavior

### Runtime plumbing
- Add a dedicated core op for app-server user input carrying turn-scoped
metadata: `Op::UserInputWithClientMetadata`
- Wire `turn/start` and `turn/steer` through that op / signature path
instead of dropping the metadata at the message-processor boundary
- Store the metadata in `TurnMetadataState`

### Transport behavior
- Reuse the existing serialized `x-codex-turn-metadata` payload
- Merge the new app-server `clientMetadata` into that JSON additively
- Do **not** replace built-in reserved fields already present in the
turn metadata payload
- Keep websocket behavior unchanged at the outer shape level: it still
sends only `client_metadata["x-codex-turn-metadata"]`, but that JSON
string now contains the merged fields
- Keep HTTP fallback behavior unchanged except that the existing
`x-codex-turn-metadata` header now includes the merged fields too

### Request shape before / after
Before, a websocket `response.create` looked like:
```json
{
  "type": "response.create",
  "client_metadata": {
    "x-codex-turn-metadata": "{\"session_id\":\"...\",\"turn_id\":\"...\"}"
  }
}
```
Even if the app-server caller supplied `clientMetadata`, it was not
represented there.

After, the same request shape is preserved, but the serialized payload
now includes the new turn-scoped fields:
```json
{
  "type": "response.create",
  "client_metadata": {
    "x-codex-turn-metadata": "{\"session_id\":\"...\",\"turn_id\":\"...\",\"fiber_run_id\":\"fiber-start-123\",\"origin\":\"gaas\"}"
  }
}
```

## Validation
### Targeted tests added / updated
- protocol round-trip coverage for `clientMetadata` on `turn/start` and
`turn/steer`
- protocol round-trip coverage for `Op::UserInputWithClientMetadata`
- `TurnMetadataState` merge test proving client metadata is added
without overwriting reserved built-in fields
- websocket request-shape test proving outbound `response.create`
contains merged metadata inside
`client_metadata["x-codex-turn-metadata"]`
- app-server integration tests proving:
- `turn/start` forwards `clientMetadata` into the outbound Responses
request path
  - websocket warmup + real turn request both behave correctly
  - `turn/steer` updates the follow-up request metadata

### Commands run
- `just write-app-server-schema`
- `cargo test -p codex-app-server-protocol`
- `cargo test -p codex-protocol`
- `cargo test -p codex-core
turn_metadata_state_merges_client_metadata_without_replacing_reserved_fields
--lib`
- `cargo test -p codex-core --test all
responses_websocket_preserves_custom_turn_metadata_fields`
- `cargo test -p codex-app-server --test all client_metadata`
- `cargo test -p codex-app-server --test all
turn_start_forwards_client_metadata_to_responses_websocket_request_body_v2
-- --nocapture`
- `just fmt`
- `just fix -p codex-core -p codex-protocol -p codex-app-server-protocol
-p codex-app-server`
- `just fix -p codex-exec -p codex-tui-app-server`
- `just argument-comment-lint`

### Full suite note
`cargo test` in `codex-rs` still fails in:
-
`suite::v2::turn_interrupt::turn_interrupt_resolves_pending_command_approval_request`

I verified that same failure on a clean detached `HEAD` worktree with an
isolated `CARGO_TARGET_DIR`, so it is not caused by this patch.
2026-04-09 11:52:37 -07:00

148 lines
5.4 KiB
Rust

#![allow(clippy::expect_used)]
use super::compact::COMPACT_WARNING_MESSAGE;
use anyhow::Result;
use codex_core::CodexThread;
use codex_core::compact::SUMMARIZATION_PROMPT;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::Op;
use codex_protocol::protocol::WarningEvent;
use codex_protocol::user_input::UserInput;
use core_test_support::responses::ResponsesRequest;
use core_test_support::responses::ev_assistant_message;
use core_test_support::responses::ev_completed;
use core_test_support::responses::mount_sse_sequence;
use core_test_support::responses::sse;
use core_test_support::responses::start_mock_server;
use core_test_support::skip_if_no_network;
use core_test_support::test_codex::test_codex;
use core_test_support::wait_for_event;
use pretty_assertions::assert_eq;
use std::sync::Arc;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn window_id_advances_after_compact_persists_on_resume_and_resets_on_fork() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let request_log = mount_sse_sequence(
&server,
vec![
sse(vec![
ev_assistant_message("msg-1", "first reply"),
ev_completed("resp-1"),
]),
sse(vec![
ev_assistant_message("msg-2", "summary"),
ev_completed("resp-2"),
]),
sse(vec![ev_completed("resp-3")]),
sse(vec![ev_completed("resp-4")]),
sse(vec![ev_completed("resp-5")]),
],
)
.await;
let mut builder = test_codex().with_config(|config| {
config.model_provider.name = "Non-OpenAI Model provider".to_string();
config.compact_prompt = Some(SUMMARIZATION_PROMPT.to_string());
});
let initial = builder.build(&server).await?;
let initial_thread = Arc::clone(&initial.codex);
let rollout_path = initial
.session_configured
.rollout_path
.clone()
.expect("rollout path");
submit_user_turn(&initial_thread, "before compact").await?;
submit_compact_turn(&initial_thread).await?;
submit_user_turn(&initial_thread, "after compact").await?;
shutdown_thread(&initial_thread).await?;
let resumed = builder
.resume(&server, initial.home.clone(), rollout_path.clone())
.await?;
submit_user_turn(&resumed.codex, "after resume").await?;
shutdown_thread(&resumed.codex).await?;
let forked = resumed
.thread_manager
.fork_thread(
/*snapshot*/ 0usize,
resumed.config.clone(),
rollout_path,
/*persist_extended_history*/ false,
/*parent_trace*/ None,
)
.await?;
submit_user_turn(&forked.thread, "after fork").await?;
shutdown_thread(&forked.thread).await?;
let requests = request_log.requests();
assert_eq!(requests.len(), 5, "expected five model requests");
let (initial_thread_id, first_generation) = window_id_parts(&requests[0]);
let (compact_thread_id, compact_generation) = window_id_parts(&requests[1]);
let (after_compact_thread_id, after_compact_generation) = window_id_parts(&requests[2]);
let (after_resume_thread_id, after_resume_generation) = window_id_parts(&requests[3]);
let (after_fork_thread_id, after_fork_generation) = window_id_parts(&requests[4]);
assert_eq!(first_generation, 0);
assert_eq!(compact_thread_id, initial_thread_id);
assert_eq!(compact_generation, 0);
assert_eq!(after_compact_thread_id, initial_thread_id);
assert_eq!(after_compact_generation, 1);
assert_eq!(after_resume_thread_id, initial_thread_id);
assert_eq!(after_resume_generation, 1);
assert_ne!(after_fork_thread_id, initial_thread_id);
assert_eq!(after_fork_generation, 0);
Ok(())
}
async fn submit_user_turn(codex: &Arc<CodexThread>, text: &str) -> Result<()> {
codex
.submit(Op::UserInput {
items: vec![UserInput::Text {
text: text.to_string(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
responsesapi_client_metadata: None,
})
.await?;
wait_for_event(codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
Ok(())
}
async fn submit_compact_turn(codex: &Arc<CodexThread>) -> Result<()> {
codex.submit(Op::Compact).await?;
let warning_event = wait_for_event(codex, |event| matches!(event, EventMsg::Warning(_))).await;
let EventMsg::Warning(WarningEvent { message }) = warning_event else {
panic!("expected warning event after compact");
};
assert_eq!(message, COMPACT_WARNING_MESSAGE);
wait_for_event(codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
Ok(())
}
async fn shutdown_thread(codex: &Arc<CodexThread>) -> Result<()> {
codex.submit(Op::Shutdown).await?;
wait_for_event(codex, |event| matches!(event, EventMsg::ShutdownComplete)).await;
Ok(())
}
fn window_id_parts(request: &ResponsesRequest) -> (String, u64) {
let window_id = request
.header("x-codex-window-id")
.expect("missing x-codex-window-id header");
let (thread_id, generation) = window_id
.rsplit_once(':')
.unwrap_or_else(|| panic!("invalid window id header: {window_id}"));
let generation = generation
.parse::<u64>()
.unwrap_or_else(|err| panic!("invalid window generation in {window_id}: {err}"));
(thread_id.to_string(), generation)
}