This commit is contained in:
Aiden Cline
2026-02-09 23:09:08 -06:00
parent 3e0b40039c
commit bdd108be2e
3 changed files with 207 additions and 0 deletions

View File

@@ -457,6 +457,10 @@ export namespace Session {
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
0) as number
// OpenRouter provides inputTokens as the total count of input tokens (including cached).
// AFAIK other providers (OpenRouter/OpenAI/Gemini etc.) do it the same way e.g. vercel/ai#8794 (comment)
// Anthropic does it differently though - inputTokens doesn't include cached tokens.
// It looks like OpenCode's cost calculation assumes all providers return inputTokens the same way Anthropic does (I'm guessing getUsage logic was originally implemented with anthropic), so it's causing incorrect cost calculation for OpenRouter and others.
const excludesCachedTokens = !!(input.metadata?.["anthropic"] || input.metadata?.["bedrock"])
const adjustedInputTokens = excludesCachedTokens
? inputTokens
@@ -465,6 +469,9 @@ export namespace Session {
if (!Number.isFinite(value)) return 0
return value
}
// Anthropic doesn't provide total_tokens, compute from components
// output.usage.totalTokens =
// output.usage.input + output.usage.output + output.usage.cacheRead + output.usage.cacheWrite;
const tokens = {
total: input.usage.totalTokens,

View File

@@ -210,6 +210,7 @@ export namespace MessageV2 {
snapshot: z.string().optional(),
cost: z.number(),
tokens: z.object({
total: z.number().optional(),
input: z.number(),
output: z.number(),
reasoning: z.number(),

View File

@@ -0,0 +1,199 @@
## Openai
--- Real ---
{
"usage": {
"input_tokens": 14195,
"input_tokens_details": {
"cached_tokens": 12032
},
"output_tokens": 377,
"output_tokens_details": {
"reasoning_tokens": 41
},
"total_tokens": 14572
}
}
--- Calculated ---
{
"tokens": {
"total": 14572,
"input": 2163,
"output": 377,
"reasoning": 41,
"cache": {
"read": 12032,
"write": 0
}
}
}
## Anthropic
--- Real ---
{
"usage": {
"input_tokens": 4,
"cache_creation_input_tokens": 2466,
"cache_read_input_tokens": 18873,
"output_tokens": 346
}
}
--- Calculated ---
{
"tokens": {
"total": 350,
"input": 4,
"output": 346,
"reasoning": 0,
"cache": {
"read": 18873,
"write": 2466
}
}
}
## Bedrock
--- Real ---
{
"usage": {
"cacheReadInputTokenCount": 16138,
"cacheReadInputTokens": 16138,
"cacheWriteInputTokenCount": 2571,
"cacheWriteInputTokens": 2571,
"inputTokens": 4,
"outputTokens": 358,
"serverToolUsage": {},
"totalTokens": 19071
}
}
--- Calculated ---
{
"tokens": {
"total": 362,
"input": 4,
"output": 358,
"reasoning": 0,
"cache": {
"read": 16138,
"write": 2571
}
}
}
## Google
--- Real ---
{
"usageMetadata": {
"promptTokenCount": 19435,
"candidatesTokenCount": 291,
"totalTokenCount": 19726,
"cachedContentTokenCount": 11447,
"trafficType": "ON_DEMAND",
"promptTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 19435
}
],
"cacheTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 11447
}
],
"candidatesTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 291
}
]
}
}
--- Calculated ---
{
"tokens": {
"total": 19726,
"input": 7988,
"output": 291,
"reasoning": 0,
"cache": {
"read": 11447,
"write": 0
}
}
}
## Github Copilot
--- Real ---
{
"usage": {
"completion_tokens": 448,
"prompt_tokens": 21172,
"prompt_tokens_details": {
"cached_tokens": 18702
},
"total_tokens": 21620
}
}
--- Calculated ---
{
"tokens": {
"total": 21620,
"input": 2470,
"output": 448,
"reasoning": 0,
"cache": {
"read": 18702,
"write": 0
}
}
}
## OpenRouter
--- Real ---
{
"usage": {
"prompt_tokens": 14145,
"completion_tokens": 447,
"total_tokens": 14592,
"cost": 0.02215125,
"is_byok": false,
"prompt_tokens_details": {
"cached_tokens": 0
},
"cost_details": {
"upstream_inference_cost": 0.02215125,
"upstream_inference_prompt_cost": 0.01768125,
"upstream_inference_completions_cost": 0.00447
},
"completion_tokens_details": {
"reasoning_tokens": 64,
"image_tokens": 0
}
}
}
--- Calculated ---
{
"tokens": {
"total": 14592,
"input": 14145,
"output": 447,
"reasoning": 64,
"cache": {
"read": 0,
"write": 0
}
}
}