fix: ensure variants for copilot models work w/ maxTokens being set

This commit is contained in:
Aiden Cline
2026-01-28 21:55:50 -06:00
parent 870c38a6aa
commit 29ea9fcf25
2 changed files with 2 additions and 15 deletions

View File

@@ -428,13 +428,13 @@ export namespace ProviderTransform {
high: {
thinking: {
type: "enabled",
budgetTokens: 16000,
budgetTokens: Math.min(16_000, Math.floor(model.limit.output / 2 - 1)),
},
},
max: {
thinking: {
type: "enabled",
budgetTokens: 31999,
budgetTokens: Math.min(31_999, model.limit.output - 1),
},
},
}

View File

@@ -158,19 +158,6 @@ export namespace LLM {
input.model.limit.output,
OUTPUT_TOKEN_MAX,
)
log.info("max_output_tokens", {
tokens: ProviderTransform.maxOutputTokens(
input.model.api.npm,
params.options,
input.model.limit.output,
OUTPUT_TOKEN_MAX,
),
modelOptions: params.options,
outputLimit: input.model.limit.output,
})
// tokens = 32000
// outputLimit = 64000
// modelOptions={"reasoningEffort":"minimal"}
const tools = await resolveTools(input)