mirror of
https://github.com/openai/codex.git
synced 2026-04-30 17:36:40 +00:00
app-server: Add back pressure and batching to command/exec (#15547)
* Add `OutgoingMessageSender::send_server_notification_to_connection_and_wait` which returns only once message is written to websocket (or failed to do so) * Use this mechanism to apply back pressure to stdout/stderr streams of processes spawned by `command/exec`, to limit them to at most one message in-memory at a time * Use back pressure signal to also batch smaller chunks into ≈64KiB ones This should make commands execution more robust over high-latency/low-throughput networks
This commit is contained in:
committed by
GitHub
parent
daf5e584c2
commit
d61c03ca08
@@ -390,6 +390,7 @@ async fn read_response<T: serde::de::DeserializeOwned>(
|
||||
let crate::outgoing_message::OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message,
|
||||
..
|
||||
} = envelope
|
||||
else {
|
||||
continue;
|
||||
@@ -420,6 +421,7 @@ async fn read_thread_started_notification(
|
||||
crate::outgoing_message::OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message,
|
||||
..
|
||||
} => {
|
||||
if connection_id != TEST_CONNECTION_ID {
|
||||
continue;
|
||||
|
||||
Reference in New Issue
Block a user