fix: read max_output_tokens param from config (#4139)

Request param `max_output_tokens` is documented in
`https://github.com/openai/codex/blob/main/docs/config.md`,
but nowhere uses the item in config, this commit read it from config for
GPT responses API.

see https://github.com/openai/codex/issues/4138 for issue report.

Signed-off-by: Yorling <shallowcloud@yeah.net>
This commit is contained in:
Yorling
2025-11-21 14:46:34 +08:00
committed by GitHub
parent bacdc004be
commit c9e149fd5c
3 changed files with 16 additions and 5 deletions

View File

@@ -273,6 +273,7 @@ impl ModelClient {
include,
prompt_cache_key: Some(self.conversation_id.to_string()),
text,
max_output_tokens: self.config.model_max_output_tokens,
};
let mut payload_json = serde_json::to_value(&payload)?;

View File

@@ -280,6 +280,8 @@ pub(crate) struct ResponsesApiRequest<'a> {
pub(crate) prompt_cache_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) text: Option<TextControls>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) max_output_tokens: Option<i64>,
}
pub(crate) mod tools {
@@ -463,6 +465,7 @@ mod tests {
verbosity: Some(OpenAiVerbosity::Low),
format: None,
}),
max_output_tokens: Some(10_000),
};
let v = serde_json::to_value(&req).expect("json");
@@ -501,6 +504,7 @@ mod tests {
include: vec![],
prompt_cache_key: None,
text: Some(text_controls),
max_output_tokens: Some(10_000),
};
let v = serde_json::to_value(&req).expect("json");
@@ -537,6 +541,7 @@ mod tests {
include: vec![],
prompt_cache_key: None,
text: None,
max_output_tokens: Some(10_000),
};
let v = serde_json::to_value(&req).expect("json");

View File

@@ -282,7 +282,8 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": prompt_cache_key
"prompt_cache_key": prompt_cache_key,
"max_output_tokens": 128000,
});
let compact_1 = json!(
{
@@ -351,7 +352,8 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": prompt_cache_key
"prompt_cache_key": prompt_cache_key,
"max_output_tokens": 128000,
});
let user_turn_2_after_compact = json!(
{
@@ -411,7 +413,8 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": prompt_cache_key
"prompt_cache_key": prompt_cache_key,
"max_output_tokens": 128000,
});
let usert_turn_3_after_resume = json!(
{
@@ -491,7 +494,8 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": prompt_cache_key
"prompt_cache_key": prompt_cache_key,
"max_output_tokens": 128000,
});
let user_turn_3_after_fork = json!(
{
@@ -571,7 +575,8 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": fork_prompt_cache_key
"prompt_cache_key": fork_prompt_cache_key,
"max_output_tokens": 128000,
});
let mut expected = json!([
user_turn_1,