mirror of
https://github.com/openai/codex.git
synced 2026-05-01 01:47:18 +00:00
Request compression.
Add new model_provider flag for compression to enable request compression. We support zstd and gzip, server also supports brotli You can test this against the sign in with chatgpt flow by adding the following profile: ``` [profiles.compressed] name = "compressed" model_provider = "openai-zstd" [model_providers.openai-zstd] name = "OpenAI (ChatGPT, zstd)" wire_api = "responses" request_compression = "zstd" requires_openai_auth = true ``` This will zstd compress your request before sending it to the server.
This commit is contained in:
@@ -11,11 +11,15 @@ use regex_lite::Regex;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub mod process;
|
||||
pub mod request;
|
||||
pub mod responses;
|
||||
pub mod streaming_sse;
|
||||
pub mod test_codex;
|
||||
pub mod test_codex_exec;
|
||||
|
||||
pub use request::RequestBodyExt;
|
||||
pub use request::body_contains;
|
||||
|
||||
#[track_caller]
|
||||
pub fn assert_regex_match<'s>(pattern: &str, actual: &'s str) -> regex_lite::Captures<'s> {
|
||||
let regex = Regex::new(pattern).unwrap_or_else(|err| {
|
||||
|
||||
Reference in New Issue
Block a user