mirror of
https://github.com/openai/codex.git
synced 2026-05-01 09:56:37 +00:00
Request compression.
Add new model_provider flag for compression to enable request compression. We support zstd and gzip, server also supports brotli You can test this against the sign in with chatgpt flow by adding the following profile: ``` [profiles.compressed] name = "compressed" model_provider = "openai-zstd" [model_providers.openai-zstd] name = "OpenAI (ChatGPT, zstd)" wire_api = "responses" request_compression = "zstd" requires_openai_auth = true ``` This will zstd compress your request before sending it to the server.
This commit is contained in:
@@ -5,6 +5,7 @@ use crate::error::ApiError;
|
||||
use crate::provider::Provider;
|
||||
use crate::provider::WireApi;
|
||||
use crate::telemetry::run_with_request_telemetry;
|
||||
use codex_client::Body;
|
||||
use codex_client::HttpTransport;
|
||||
use codex_client::RequestTelemetry;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
@@ -54,7 +55,7 @@ impl<T: HttpTransport, A: AuthProvider> CompactClient<T, A> {
|
||||
let builder = || {
|
||||
let mut req = self.provider.build_request(Method::POST, path);
|
||||
req.headers.extend(extra_headers.clone());
|
||||
req.body = Some(body.clone());
|
||||
req.body = Some(Body::Json(body.clone()));
|
||||
add_auth_headers(&self.auth, req)
|
||||
};
|
||||
|
||||
@@ -89,6 +90,7 @@ struct CompactHistoryResponse {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::provider::RequestCompression;
|
||||
use crate::provider::RetryConfig;
|
||||
use async_trait::async_trait;
|
||||
use codex_client::Request;
|
||||
@@ -135,6 +137,7 @@ mod tests {
|
||||
retry_5xx: true,
|
||||
retry_transport: true,
|
||||
},
|
||||
request_compression: RequestCompression::None,
|
||||
stream_idle_timeout: Duration::from_secs(1),
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user