mirror of
https://github.com/openai/codex.git
synced 2026-04-24 22:54:54 +00:00
chore: drop model_max_output_tokens (#7100)
This commit is contained in:
@@ -247,12 +247,6 @@ The size of the context window for the model, in tokens.
|
||||
|
||||
In general, Codex knows the context window for the most common OpenAI models, but if you are using a new model with an old version of the Codex CLI, then you can use `model_context_window` to tell Codex what value to use to determine how much context is left during a conversation.
|
||||
|
||||
### model_max_output_tokens
|
||||
|
||||
This is analogous to `model_context_window`, but for the maximum number of output tokens for the model.
|
||||
|
||||
> See also [`codex exec`](./exec.md) to see how these model settings influence non-interactive runs.
|
||||
|
||||
### oss_provider
|
||||
|
||||
Specifies the default OSS provider to use when running Codex. This is used when the `--oss` flag is provided without a specific provider.
|
||||
@@ -945,7 +939,6 @@ Valid values:
|
||||
| `model` | string | Model to use (e.g., `gpt-5.1-codex-max`). |
|
||||
| `model_provider` | string | Provider id from `model_providers` (default: `openai`). |
|
||||
| `model_context_window` | number | Context window tokens. |
|
||||
| `model_max_output_tokens` | number | Max output tokens. |
|
||||
| `tool_output_token_limit` | number | Token budget for stored function/tool outputs in history (default: 2,560 tokens). |
|
||||
| `approval_policy` | `untrusted` \| `on-failure` \| `on-request` \| `never` | When to prompt for approval. |
|
||||
| `sandbox_mode` | `read-only` \| `workspace-write` \| `danger-full-access` | OS sandbox policy. |
|
||||
|
||||
@@ -30,7 +30,6 @@ model_provider = "openai"
|
||||
# Optional manual model metadata. When unset, Codex auto-detects from model.
|
||||
# Uncomment to force values.
|
||||
# model_context_window = 128000 # tokens; default: auto for model
|
||||
# model_max_output_tokens = 8192 # tokens; default: auto for model
|
||||
# model_auto_compact_token_limit = 0 # disable/override auto; default: model family specific
|
||||
# tool_output_token_limit = 10000 # tokens stored per tool output; default: 10000 for gpt-5.1-codex-max
|
||||
|
||||
|
||||
Reference in New Issue
Block a user