mirror of
https://github.com/anomalyco/opencode.git
synced 2026-02-10 19:04:17 +00:00
Compare commits
5 Commits
beta
...
fix-compac
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18e5da652a | ||
|
|
3cf3325240 | ||
|
|
a794489b10 | ||
|
|
bdd108be2e | ||
|
|
3e0b40039c |
@@ -5,6 +5,7 @@ import type { JSONSchema } from "zod/v4/core"
|
||||
import type { Provider } from "./provider"
|
||||
import type { ModelsDev } from "./models"
|
||||
import { iife } from "@/util/iife"
|
||||
import { Flag } from "@/flag/flag"
|
||||
|
||||
type Modality = NonNullable<ModelsDev.Model["modalities"]>["input"][number]
|
||||
|
||||
@@ -17,6 +18,8 @@ function mimeToModality(mime: string): Modality | undefined {
|
||||
}
|
||||
|
||||
export namespace ProviderTransform {
|
||||
export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000
|
||||
|
||||
// Maps npm package to the key the AI SDK expects for providerOptions
|
||||
function sdkKey(npm: string): string | undefined {
|
||||
switch (npm) {
|
||||
@@ -723,29 +726,8 @@ export namespace ProviderTransform {
|
||||
return { [key]: options }
|
||||
}
|
||||
|
||||
export function maxOutputTokens(
|
||||
npm: string,
|
||||
options: Record<string, any>,
|
||||
modelLimit: number,
|
||||
globalLimit: number,
|
||||
): number {
|
||||
const modelCap = modelLimit || globalLimit
|
||||
const standardLimit = Math.min(modelCap, globalLimit)
|
||||
|
||||
if (npm === "@ai-sdk/anthropic" || npm === "@ai-sdk/google-vertex/anthropic") {
|
||||
const thinking = options?.["thinking"]
|
||||
const budgetTokens = typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : 0
|
||||
const enabled = thinking?.["type"] === "enabled"
|
||||
if (enabled && budgetTokens > 0) {
|
||||
// Return text tokens so that text + thinking <= model cap, preferring 32k text when possible.
|
||||
if (budgetTokens + standardLimit <= modelCap) {
|
||||
return standardLimit
|
||||
}
|
||||
return modelCap - budgetTokens
|
||||
}
|
||||
}
|
||||
|
||||
return standardLimit
|
||||
export function maxOutputTokens(model: Provider.Model): number {
|
||||
return Math.min(model.limit.output, OUTPUT_TOKEN_MAX) || OUTPUT_TOKEN_MAX
|
||||
}
|
||||
|
||||
export function schema(model: Provider.Model, schema: JSONSchema.BaseSchema | JSONSchema7): JSONSchema7 {
|
||||
|
||||
@@ -6,7 +6,6 @@ import { Instance } from "../project/instance"
|
||||
import { Provider } from "../provider/provider"
|
||||
import { MessageV2 } from "./message-v2"
|
||||
import z from "zod"
|
||||
import { SessionPrompt } from "./prompt"
|
||||
import { Token } from "../util/token"
|
||||
import { Log } from "../util/log"
|
||||
import { SessionProcessor } from "./processor"
|
||||
@@ -14,6 +13,7 @@ import { fn } from "@/util/fn"
|
||||
import { Agent } from "@/agent/agent"
|
||||
import { Plugin } from "@/plugin"
|
||||
import { Config } from "@/config/config"
|
||||
import { ProviderTransform } from "@/provider/transform"
|
||||
|
||||
export namespace SessionCompaction {
|
||||
const log = Log.create({ service: "session.compaction" })
|
||||
@@ -32,10 +32,14 @@ export namespace SessionCompaction {
|
||||
if (config.compaction?.auto === false) return false
|
||||
const context = input.model.limit.context
|
||||
if (context === 0) return false
|
||||
const count = input.tokens.input + input.tokens.cache.read + input.tokens.output
|
||||
const output = Math.min(input.model.limit.output, SessionPrompt.OUTPUT_TOKEN_MAX) || SessionPrompt.OUTPUT_TOKEN_MAX
|
||||
|
||||
const count =
|
||||
input.tokens.total ||
|
||||
input.tokens.input + input.tokens.output + input.tokens.cache.read + input.tokens.cache.write
|
||||
|
||||
const output = ProviderTransform.maxOutputTokens(input.model)
|
||||
const usable = input.model.limit.input || context - output
|
||||
return count > usable
|
||||
return count >= usable
|
||||
}
|
||||
|
||||
export const PRUNE_MINIMUM = 20_000
|
||||
|
||||
@@ -4,7 +4,7 @@ import { BusEvent } from "@/bus/bus-event"
|
||||
import { Bus } from "@/bus"
|
||||
import { Decimal } from "decimal.js"
|
||||
import z from "zod"
|
||||
import { type LanguageModelUsage, type ProviderMetadata } from "ai"
|
||||
import { type ProviderMetadata } from "ai"
|
||||
import { Config } from "../config/config"
|
||||
import { Flag } from "../flag/flag"
|
||||
import { Identifier } from "../id/id"
|
||||
@@ -22,6 +22,8 @@ import { Snapshot } from "@/snapshot"
|
||||
import type { Provider } from "@/provider/provider"
|
||||
import { PermissionNext } from "@/permission/next"
|
||||
import { Global } from "@/global"
|
||||
import type { LanguageModelV2Usage } from "@ai-sdk/provider"
|
||||
import { iife } from "@/util/iife"
|
||||
|
||||
export namespace Session {
|
||||
const log = Log.create({ service: "session" })
|
||||
@@ -436,37 +438,58 @@ export namespace Session {
|
||||
return part
|
||||
})
|
||||
|
||||
const safe = (value: number) => {
|
||||
if (!Number.isFinite(value)) return 0
|
||||
return value
|
||||
}
|
||||
|
||||
export const getUsage = fn(
|
||||
z.object({
|
||||
model: z.custom<Provider.Model>(),
|
||||
usage: z.custom<LanguageModelUsage>(),
|
||||
usage: z.custom<LanguageModelV2Usage>(),
|
||||
metadata: z.custom<ProviderMetadata>().optional(),
|
||||
}),
|
||||
(input) => {
|
||||
const cacheReadInputTokens = input.usage.cachedInputTokens ?? 0
|
||||
const cacheWriteInputTokens = (input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
|
||||
// @ts-expect-error
|
||||
input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
|
||||
// @ts-expect-error
|
||||
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
|
||||
0) as number
|
||||
const inputTokens = safe(input.usage.inputTokens ?? 0)
|
||||
const outputTokens = safe(input.usage.outputTokens ?? 0)
|
||||
const reasoningTokens = safe(input.usage.reasoningTokens ?? 0)
|
||||
|
||||
const cacheReadInputTokens = safe(input.usage.cachedInputTokens ?? 0)
|
||||
const cacheWriteInputTokens = safe(
|
||||
(input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
|
||||
// @ts-expect-error
|
||||
input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
|
||||
// @ts-expect-error
|
||||
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
|
||||
0) as number,
|
||||
)
|
||||
|
||||
// OpenRouter provides inputTokens as the total count of input tokens (including cached).
|
||||
// AFAIK other providers (OpenRouter/OpenAI/Gemini etc.) do it the same way e.g. vercel/ai#8794 (comment)
|
||||
// Anthropic does it differently though - inputTokens doesn't include cached tokens.
|
||||
// It looks like OpenCode's cost calculation assumes all providers return inputTokens the same way Anthropic does (I'm guessing getUsage logic was originally implemented with anthropic), so it's causing incorrect cost calculation for OpenRouter and others.
|
||||
const excludesCachedTokens = !!(input.metadata?.["anthropic"] || input.metadata?.["bedrock"])
|
||||
const adjustedInputTokens = excludesCachedTokens
|
||||
? (input.usage.inputTokens ?? 0)
|
||||
: (input.usage.inputTokens ?? 0) - cacheReadInputTokens - cacheWriteInputTokens
|
||||
const safe = (value: number) => {
|
||||
if (!Number.isFinite(value)) return 0
|
||||
return value
|
||||
}
|
||||
const adjustedInputTokens = safe(
|
||||
excludesCachedTokens ? inputTokens : inputTokens - cacheReadInputTokens - cacheWriteInputTokens,
|
||||
)
|
||||
|
||||
const total = iife(() => {
|
||||
// Anthropic doesn't provide total_tokens, also ai sdk will vastly undercount if we
|
||||
// don't compute from components
|
||||
if (input.model.api.npm === "@ai-sdk/anthropic" || input.model.api.npm === "@ai-sdk/bedrock") {
|
||||
return adjustedInputTokens + outputTokens + cacheReadInputTokens + cacheWriteInputTokens
|
||||
}
|
||||
return input.usage.totalTokens
|
||||
})
|
||||
|
||||
const tokens = {
|
||||
input: safe(adjustedInputTokens),
|
||||
output: safe(input.usage.outputTokens ?? 0),
|
||||
reasoning: safe(input.usage?.reasoningTokens ?? 0),
|
||||
total,
|
||||
input: adjustedInputTokens,
|
||||
output: outputTokens,
|
||||
reasoning: reasoningTokens,
|
||||
cache: {
|
||||
write: safe(cacheWriteInputTokens),
|
||||
read: safe(cacheReadInputTokens),
|
||||
write: cacheWriteInputTokens,
|
||||
read: cacheReadInputTokens,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -25,8 +25,7 @@ import { Auth } from "@/auth"
|
||||
|
||||
export namespace LLM {
|
||||
const log = Log.create({ service: "llm" })
|
||||
|
||||
export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000
|
||||
export const OUTPUT_TOKEN_MAX = ProviderTransform.OUTPUT_TOKEN_MAX
|
||||
|
||||
export type StreamInput = {
|
||||
user: MessageV2.User
|
||||
@@ -149,14 +148,7 @@ export namespace LLM {
|
||||
)
|
||||
|
||||
const maxOutputTokens =
|
||||
isCodex || provider.id.includes("github-copilot")
|
||||
? undefined
|
||||
: ProviderTransform.maxOutputTokens(
|
||||
input.model.api.npm,
|
||||
params.options,
|
||||
input.model.limit.output,
|
||||
OUTPUT_TOKEN_MAX,
|
||||
)
|
||||
isCodex || provider.id.includes("github-copilot") ? undefined : ProviderTransform.maxOutputTokens(input.model)
|
||||
|
||||
const tools = await resolveTools(input)
|
||||
|
||||
|
||||
@@ -210,6 +210,7 @@ export namespace MessageV2 {
|
||||
snapshot: z.string().optional(),
|
||||
cost: z.number(),
|
||||
tokens: z.object({
|
||||
total: z.number().optional(),
|
||||
input: z.number(),
|
||||
output: z.number(),
|
||||
reasoning: z.number(),
|
||||
@@ -383,6 +384,7 @@ export namespace MessageV2 {
|
||||
summary: z.boolean().optional(),
|
||||
cost: z.number(),
|
||||
tokens: z.object({
|
||||
total: z.number().optional(),
|
||||
input: z.number(),
|
||||
output: z.number(),
|
||||
reasoning: z.number(),
|
||||
|
||||
@@ -342,6 +342,9 @@ export namespace SessionProcessor {
|
||||
stack: JSON.stringify(e.stack),
|
||||
})
|
||||
const error = MessageV2.fromError(e, { providerID: input.model.providerID })
|
||||
// DO NOT retry context overflow errors
|
||||
if (MessageV2.ContextOverflowError.isInstance(error)) {
|
||||
}
|
||||
const retry = SessionRetry.retryable(error)
|
||||
if (retry !== undefined) {
|
||||
attempt++
|
||||
|
||||
@@ -52,7 +52,6 @@ globalThis.AI_SDK_LOG_WARNINGS = false
|
||||
|
||||
export namespace SessionPrompt {
|
||||
const log = Log.create({ service: "session.prompt" })
|
||||
export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000
|
||||
|
||||
const state = Instance.state(
|
||||
() => {
|
||||
|
||||
@@ -59,9 +59,6 @@ export namespace SessionRetry {
|
||||
}
|
||||
|
||||
export function retryable(error: ReturnType<NamedError["toObject"]>) {
|
||||
// DO NOT retry context overflow errors
|
||||
if (MessageV2.ContextOverflowError.isInstance(error)) return undefined
|
||||
|
||||
if (MessageV2.APIError.isInstance(error)) {
|
||||
if (!error.data.isRetryable) return undefined
|
||||
return error.data.message.includes("Overloaded") ? "Provider is overloaded" : error.data.message
|
||||
|
||||
199
packages/opencode/src/session/tokens.txt
Normal file
199
packages/opencode/src/session/tokens.txt
Normal file
@@ -0,0 +1,199 @@
|
||||
## Openai
|
||||
|
||||
--- Real ---
|
||||
{
|
||||
"usage": {
|
||||
"input_tokens": 14195,
|
||||
"input_tokens_details": {
|
||||
"cached_tokens": 12032
|
||||
},
|
||||
"output_tokens": 377,
|
||||
"output_tokens_details": {
|
||||
"reasoning_tokens": 41
|
||||
},
|
||||
"total_tokens": 14572
|
||||
}
|
||||
}
|
||||
|
||||
--- Calculated ---
|
||||
{
|
||||
"tokens": {
|
||||
"total": 14572,
|
||||
"input": 2163,
|
||||
"output": 377,
|
||||
"reasoning": 41,
|
||||
"cache": {
|
||||
"read": 12032,
|
||||
"write": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
## Anthropic
|
||||
|
||||
--- Real ---
|
||||
{
|
||||
"usage": {
|
||||
"input_tokens": 4,
|
||||
"cache_creation_input_tokens": 2466,
|
||||
"cache_read_input_tokens": 18873,
|
||||
"output_tokens": 346
|
||||
}
|
||||
}
|
||||
|
||||
--- Calculated ---
|
||||
{
|
||||
"tokens": {
|
||||
"total": 350,
|
||||
"input": 4,
|
||||
"output": 346,
|
||||
"reasoning": 0,
|
||||
"cache": {
|
||||
"read": 18873,
|
||||
"write": 2466
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
## Bedrock
|
||||
|
||||
--- Real ---
|
||||
{
|
||||
"usage": {
|
||||
"cacheReadInputTokenCount": 16138,
|
||||
"cacheReadInputTokens": 16138,
|
||||
"cacheWriteInputTokenCount": 2571,
|
||||
"cacheWriteInputTokens": 2571,
|
||||
"inputTokens": 4,
|
||||
"outputTokens": 358,
|
||||
"serverToolUsage": {},
|
||||
"totalTokens": 19071
|
||||
}
|
||||
}
|
||||
|
||||
--- Calculated ---
|
||||
{
|
||||
"tokens": {
|
||||
"total": 362,
|
||||
"input": 4,
|
||||
"output": 358,
|
||||
"reasoning": 0,
|
||||
"cache": {
|
||||
"read": 16138,
|
||||
"write": 2571
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
## Google
|
||||
|
||||
--- Real ---
|
||||
{
|
||||
"usageMetadata": {
|
||||
"promptTokenCount": 19435,
|
||||
"candidatesTokenCount": 291,
|
||||
"totalTokenCount": 19726,
|
||||
"cachedContentTokenCount": 11447,
|
||||
"trafficType": "ON_DEMAND",
|
||||
"promptTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 19435
|
||||
}
|
||||
],
|
||||
"cacheTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 11447
|
||||
}
|
||||
],
|
||||
"candidatesTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 291
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
--- Calculated ---
|
||||
{
|
||||
"tokens": {
|
||||
"total": 19726,
|
||||
"input": 7988,
|
||||
"output": 291,
|
||||
"reasoning": 0,
|
||||
"cache": {
|
||||
"read": 11447,
|
||||
"write": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
## Github Copilot
|
||||
|
||||
--- Real ---
|
||||
{
|
||||
"usage": {
|
||||
"completion_tokens": 448,
|
||||
"prompt_tokens": 21172,
|
||||
"prompt_tokens_details": {
|
||||
"cached_tokens": 18702
|
||||
},
|
||||
"total_tokens": 21620
|
||||
}
|
||||
}
|
||||
|
||||
--- Calculated ---
|
||||
{
|
||||
"tokens": {
|
||||
"total": 21620,
|
||||
"input": 2470,
|
||||
"output": 448,
|
||||
"reasoning": 0,
|
||||
"cache": {
|
||||
"read": 18702,
|
||||
"write": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
## OpenRouter
|
||||
|
||||
--- Real ---
|
||||
{
|
||||
"usage": {
|
||||
"prompt_tokens": 14145,
|
||||
"completion_tokens": 447,
|
||||
"total_tokens": 14592,
|
||||
"cost": 0.02215125,
|
||||
"is_byok": false,
|
||||
"prompt_tokens_details": {
|
||||
"cached_tokens": 0
|
||||
},
|
||||
"cost_details": {
|
||||
"upstream_inference_cost": 0.02215125,
|
||||
"upstream_inference_prompt_cost": 0.01768125,
|
||||
"upstream_inference_completions_cost": 0.00447
|
||||
},
|
||||
"completion_tokens_details": {
|
||||
"reasoning_tokens": 64,
|
||||
"image_tokens": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
--- Calculated ---
|
||||
{
|
||||
"tokens": {
|
||||
"total": 14592,
|
||||
"input": 14145,
|
||||
"output": 447,
|
||||
"reasoning": 64,
|
||||
"cache": {
|
||||
"read": 0,
|
||||
"write": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -175,100 +175,6 @@ describe("ProviderTransform.options - gpt-5 textVerbosity", () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe("ProviderTransform.maxOutputTokens", () => {
|
||||
test("returns 32k when modelLimit > 32k", () => {
|
||||
const modelLimit = 100000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
||||
})
|
||||
|
||||
test("returns modelLimit when modelLimit < 32k", () => {
|
||||
const modelLimit = 16000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(16000)
|
||||
})
|
||||
|
||||
describe("azure", () => {
|
||||
test("returns 32k when modelLimit > 32k", () => {
|
||||
const modelLimit = 100000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
||||
})
|
||||
|
||||
test("returns modelLimit when modelLimit < 32k", () => {
|
||||
const modelLimit = 16000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(16000)
|
||||
})
|
||||
})
|
||||
|
||||
describe("bedrock", () => {
|
||||
test("returns 32k when modelLimit > 32k", () => {
|
||||
const modelLimit = 100000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
||||
})
|
||||
|
||||
test("returns modelLimit when modelLimit < 32k", () => {
|
||||
const modelLimit = 16000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(16000)
|
||||
})
|
||||
})
|
||||
|
||||
describe("anthropic without thinking options", () => {
|
||||
test("returns 32k when modelLimit > 32k", () => {
|
||||
const modelLimit = 100000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
||||
})
|
||||
|
||||
test("returns modelLimit when modelLimit < 32k", () => {
|
||||
const modelLimit = 16000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(16000)
|
||||
})
|
||||
})
|
||||
|
||||
describe("anthropic with thinking options", () => {
|
||||
test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
|
||||
const modelLimit = 100000
|
||||
const options = {
|
||||
thinking: {
|
||||
type: "enabled",
|
||||
budgetTokens: 10000,
|
||||
},
|
||||
}
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
||||
})
|
||||
|
||||
test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
|
||||
const modelLimit = 50000
|
||||
const options = {
|
||||
thinking: {
|
||||
type: "enabled",
|
||||
budgetTokens: 30000,
|
||||
},
|
||||
}
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(20000)
|
||||
})
|
||||
|
||||
test("returns 32k when thinking type is not enabled", () => {
|
||||
const modelLimit = 100000
|
||||
const options = {
|
||||
thinking: {
|
||||
type: "disabled",
|
||||
budgetTokens: 10000,
|
||||
},
|
||||
}
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("ProviderTransform.schema - gemini array items", () => {
|
||||
test("adds missing items for array properties", () => {
|
||||
const geminiModel = {
|
||||
|
||||
@@ -314,12 +314,7 @@ describe("session.llm.stream", () => {
|
||||
expect(body.stream).toBe(true)
|
||||
|
||||
const maxTokens = (body.max_tokens as number | undefined) ?? (body.max_output_tokens as number | undefined)
|
||||
const expectedMaxTokens = ProviderTransform.maxOutputTokens(
|
||||
resolved.api.npm,
|
||||
ProviderTransform.options({ model: resolved, sessionID }),
|
||||
resolved.limit.output,
|
||||
LLM.OUTPUT_TOKEN_MAX,
|
||||
)
|
||||
const expectedMaxTokens = ProviderTransform.maxOutputTokens(resolved)
|
||||
expect(maxTokens).toBe(expectedMaxTokens)
|
||||
|
||||
const reasoning = (body.reasoningEffort as string | undefined) ?? (body.reasoning_effort as string | undefined)
|
||||
@@ -442,12 +437,7 @@ describe("session.llm.stream", () => {
|
||||
expect((body.reasoning as { effort?: string } | undefined)?.effort).toBe("high")
|
||||
|
||||
const maxTokens = body.max_output_tokens as number | undefined
|
||||
const expectedMaxTokens = ProviderTransform.maxOutputTokens(
|
||||
resolved.api.npm,
|
||||
ProviderTransform.options({ model: resolved, sessionID }),
|
||||
resolved.limit.output,
|
||||
LLM.OUTPUT_TOKEN_MAX,
|
||||
)
|
||||
const expectedMaxTokens = ProviderTransform.maxOutputTokens(resolved)
|
||||
expect(maxTokens).toBe(expectedMaxTokens)
|
||||
},
|
||||
})
|
||||
@@ -565,14 +555,7 @@ describe("session.llm.stream", () => {
|
||||
|
||||
expect(capture.url.pathname.endsWith("/messages")).toBe(true)
|
||||
expect(body.model).toBe(resolved.api.id)
|
||||
expect(body.max_tokens).toBe(
|
||||
ProviderTransform.maxOutputTokens(
|
||||
resolved.api.npm,
|
||||
ProviderTransform.options({ model: resolved, sessionID }),
|
||||
resolved.limit.output,
|
||||
LLM.OUTPUT_TOKEN_MAX,
|
||||
),
|
||||
)
|
||||
expect(body.max_tokens).toBe(ProviderTransform.maxOutputTokens(resolved))
|
||||
expect(body.temperature).toBe(0.4)
|
||||
expect(body.top_p).toBe(0.9)
|
||||
},
|
||||
@@ -677,14 +660,7 @@ describe("session.llm.stream", () => {
|
||||
expect(capture.url.pathname).toBe(pathSuffix)
|
||||
expect(config?.temperature).toBe(0.3)
|
||||
expect(config?.topP).toBe(0.8)
|
||||
expect(config?.maxOutputTokens).toBe(
|
||||
ProviderTransform.maxOutputTokens(
|
||||
resolved.api.npm,
|
||||
ProviderTransform.options({ model: resolved, sessionID }),
|
||||
resolved.limit.output,
|
||||
LLM.OUTPUT_TOKEN_MAX,
|
||||
),
|
||||
)
|
||||
expect(config?.maxOutputTokens).toBe(ProviderTransform.maxOutputTokens(resolved))
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
@@ -203,6 +203,7 @@ export type AssistantMessage = {
|
||||
summary?: boolean
|
||||
cost: number
|
||||
tokens: {
|
||||
total?: number
|
||||
input: number
|
||||
output: number
|
||||
reasoning: number
|
||||
|
||||
Reference in New Issue
Block a user