mirror of
https://github.com/openai/codex.git
synced 2026-04-24 22:54:54 +00:00
Support response.done and add integration tests (#9129)
The agent loop using a persistent incremental web socket connection.
This commit is contained in:
@@ -88,6 +88,14 @@ struct ResponseCompleted {
|
||||
usage: Option<ResponseCompletedUsage>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct ResponseDone {
|
||||
#[serde(default)]
|
||||
id: Option<String>,
|
||||
#[serde(default)]
|
||||
usage: Option<ResponseCompletedUsage>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct ResponseCompletedUsage {
|
||||
input_tokens: i64,
|
||||
@@ -229,6 +237,29 @@ pub fn process_responses_event(
|
||||
}
|
||||
}
|
||||
}
|
||||
"response.done" => {
|
||||
if let Some(resp_val) = event.response {
|
||||
match serde_json::from_value::<ResponseDone>(resp_val) {
|
||||
Ok(resp) => {
|
||||
return Ok(Some(ResponseEvent::Completed {
|
||||
response_id: resp.id.unwrap_or_default(),
|
||||
token_usage: resp.usage.map(Into::into),
|
||||
}));
|
||||
}
|
||||
Err(err) => {
|
||||
let error = format!("failed to parse ResponseCompleted: {err}");
|
||||
debug!("{error}");
|
||||
return Err(ResponsesEventError::Api(ApiError::Stream(error)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!("response.done missing response payload");
|
||||
return Ok(Some(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
token_usage: None,
|
||||
}));
|
||||
}
|
||||
"response.output_item.added" => {
|
||||
if let Some(item_val) = event.item {
|
||||
if let Ok(item) = serde_json::from_value::<ResponseItem>(item_val) {
|
||||
@@ -517,6 +548,65 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn response_done_emits_completed() {
|
||||
let done = json!({
|
||||
"type": "response.done",
|
||||
"response": {
|
||||
"usage": {
|
||||
"input_tokens": 1,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 2,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 3
|
||||
}
|
||||
}
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let sse1 = format!("event: response.done\ndata: {done}\n\n");
|
||||
|
||||
let events = collect_events(&[sse1.as_bytes()]).await;
|
||||
|
||||
assert_eq!(events.len(), 1);
|
||||
|
||||
match &events[0] {
|
||||
Ok(ResponseEvent::Completed {
|
||||
response_id,
|
||||
token_usage,
|
||||
}) => {
|
||||
assert_eq!(response_id, "");
|
||||
assert!(token_usage.is_some());
|
||||
}
|
||||
other => panic!("unexpected event: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn response_done_without_payload_emits_completed() {
|
||||
let done = json!({
|
||||
"type": "response.done"
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let sse1 = format!("event: response.done\ndata: {done}\n\n");
|
||||
|
||||
let events = collect_events(&[sse1.as_bytes()]).await;
|
||||
|
||||
assert_eq!(events.len(), 1);
|
||||
|
||||
match &events[0] {
|
||||
Ok(ResponseEvent::Completed {
|
||||
response_id,
|
||||
token_usage,
|
||||
}) => {
|
||||
assert_eq!(response_id, "");
|
||||
assert!(token_usage.is_none());
|
||||
}
|
||||
other => panic!("unexpected event: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn error_when_error_event() {
|
||||
let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_689bcf18d7f08194bf3440ba62fe05d803fee0cdac429894","object":"response","created_at":1755041560,"status":"failed","background":false,"error":{"code":"rate_limit_exceeded","message":"Rate limit reached for gpt-5.1 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."}, "usage":null,"user":null,"metadata":{}}}"#;
|
||||
|
||||
Reference in New Issue
Block a user