mirror of
https://github.com/openai/codex.git
synced 2026-04-24 14:45:27 +00:00
fix: read max_output_tokens param from config (#4139)
Request param `max_output_tokens` is documented in `https://github.com/openai/codex/blob/main/docs/config.md`, but nowhere uses the item in config, this commit read it from config for GPT responses API. see https://github.com/openai/codex/issues/4138 for issue report. Signed-off-by: Yorling <shallowcloud@yeah.net>
This commit is contained in:
@@ -273,6 +273,7 @@ impl ModelClient {
|
||||
include,
|
||||
prompt_cache_key: Some(self.conversation_id.to_string()),
|
||||
text,
|
||||
max_output_tokens: self.config.model_max_output_tokens,
|
||||
};
|
||||
|
||||
let mut payload_json = serde_json::to_value(&payload)?;
|
||||
|
||||
@@ -280,6 +280,8 @@ pub(crate) struct ResponsesApiRequest<'a> {
|
||||
pub(crate) prompt_cache_key: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) text: Option<TextControls>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) max_output_tokens: Option<i64>,
|
||||
}
|
||||
|
||||
pub(crate) mod tools {
|
||||
@@ -463,6 +465,7 @@ mod tests {
|
||||
verbosity: Some(OpenAiVerbosity::Low),
|
||||
format: None,
|
||||
}),
|
||||
max_output_tokens: Some(10_000),
|
||||
};
|
||||
|
||||
let v = serde_json::to_value(&req).expect("json");
|
||||
@@ -501,6 +504,7 @@ mod tests {
|
||||
include: vec![],
|
||||
prompt_cache_key: None,
|
||||
text: Some(text_controls),
|
||||
max_output_tokens: Some(10_000),
|
||||
};
|
||||
|
||||
let v = serde_json::to_value(&req).expect("json");
|
||||
@@ -537,6 +541,7 @@ mod tests {
|
||||
include: vec![],
|
||||
prompt_cache_key: None,
|
||||
text: None,
|
||||
max_output_tokens: Some(10_000),
|
||||
};
|
||||
|
||||
let v = serde_json::to_value(&req).expect("json");
|
||||
|
||||
@@ -282,7 +282,8 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
"include": [
|
||||
"reasoning.encrypted_content"
|
||||
],
|
||||
"prompt_cache_key": prompt_cache_key
|
||||
"prompt_cache_key": prompt_cache_key,
|
||||
"max_output_tokens": 128000,
|
||||
});
|
||||
let compact_1 = json!(
|
||||
{
|
||||
@@ -351,7 +352,8 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
"include": [
|
||||
"reasoning.encrypted_content"
|
||||
],
|
||||
"prompt_cache_key": prompt_cache_key
|
||||
"prompt_cache_key": prompt_cache_key,
|
||||
"max_output_tokens": 128000,
|
||||
});
|
||||
let user_turn_2_after_compact = json!(
|
||||
{
|
||||
@@ -411,7 +413,8 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
"include": [
|
||||
"reasoning.encrypted_content"
|
||||
],
|
||||
"prompt_cache_key": prompt_cache_key
|
||||
"prompt_cache_key": prompt_cache_key,
|
||||
"max_output_tokens": 128000,
|
||||
});
|
||||
let usert_turn_3_after_resume = json!(
|
||||
{
|
||||
@@ -491,7 +494,8 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
"include": [
|
||||
"reasoning.encrypted_content"
|
||||
],
|
||||
"prompt_cache_key": prompt_cache_key
|
||||
"prompt_cache_key": prompt_cache_key,
|
||||
"max_output_tokens": 128000,
|
||||
});
|
||||
let user_turn_3_after_fork = json!(
|
||||
{
|
||||
@@ -571,7 +575,8 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
"include": [
|
||||
"reasoning.encrypted_content"
|
||||
],
|
||||
"prompt_cache_key": fork_prompt_cache_key
|
||||
"prompt_cache_key": fork_prompt_cache_key,
|
||||
"max_output_tokens": 128000,
|
||||
});
|
||||
let mut expected = json!([
|
||||
user_turn_1,
|
||||
|
||||
Reference in New Issue
Block a user