Request compression.

Add new model_provider flag for compression to enable request
compression.  We support zstd and gzip, server also supports brotli

You can test this against the sign in with chatgpt flow by adding the
following profile:

```
[profiles.compressed]
name = "compressed"
model_provider = "openai-zstd"

[model_providers.openai-zstd]
name = "OpenAI (ChatGPT, zstd)"
wire_api = "responses"
request_compression = "zstd"
requires_openai_auth = true
```

This will zstd compress your request before sending it to the server.
This commit is contained in:
Channing Conger
2025-11-25 11:53:38 -08:00
parent 07f077dfb3
commit 55acaaffac
37 changed files with 491 additions and 79 deletions

View File

@@ -5,6 +5,7 @@ use crate::error::ApiError;
use crate::provider::Provider;
use crate::provider::WireApi;
use crate::telemetry::run_with_request_telemetry;
use codex_client::Body;
use codex_client::HttpTransport;
use codex_client::RequestTelemetry;
use codex_protocol::models::ResponseItem;
@@ -54,7 +55,7 @@ impl<T: HttpTransport, A: AuthProvider> CompactClient<T, A> {
let builder = || {
let mut req = self.provider.build_request(Method::POST, path);
req.headers.extend(extra_headers.clone());
req.body = Some(body.clone());
req.body = Some(Body::Json(body.clone()));
add_auth_headers(&self.auth, req)
};
@@ -89,6 +90,7 @@ struct CompactHistoryResponse {
#[cfg(test)]
mod tests {
use super::*;
use crate::provider::RequestCompression;
use crate::provider::RetryConfig;
use async_trait::async_trait;
use codex_client::Request;
@@ -135,6 +137,7 @@ mod tests {
retry_5xx: true,
retry_transport: true,
},
request_compression: RequestCompression::None,
stream_idle_timeout: Duration::from_secs(1),
}
}