feat: enable prompt caching and cache token tracking for google-vertex-anthropic (#20266)

Signed-off-by: Major Hayden <major@mhtx.net>
This commit is contained in:
Major Hayden
2026-03-31 15:16:14 -05:00
committed by GitHub
parent 4dd866d5c4
commit 26cc924ea2
4 changed files with 78 additions and 0 deletions

View File

@@ -1199,4 +1199,26 @@ describe("session.getUsage", () => {
expect(result.tokens.total).toBe(1500)
},
)
test("extracts cache write tokens from vertex metadata key", () => {
const model = createModel({ context: 100_000, output: 32_000, npm: "@ai-sdk/google-vertex/anthropic" })
const result = Session.getUsage({
model,
usage: {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
cachedInputTokens: 200,
},
metadata: {
vertex: {
cacheCreationInputTokens: 300,
},
},
})
expect(result.tokens.input).toBe(500)
expect(result.tokens.cache.read).toBe(200)
expect(result.tokens.cache.write).toBe(300)
})
})