mirror of
https://github.com/anomalyco/opencode.git
synced 2026-02-09 10:24:11 +00:00
Compare commits
1 Commits
sqlite2
...
fix-vercel
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
11c782d598 |
@@ -1,3 +1,5 @@
|
||||
// TODO: this file is a fuckfest, will cleanup better soon
|
||||
|
||||
import type { APICallError, ModelMessage } from "ai"
|
||||
import { mergeDeep, unique } from "remeda"
|
||||
import type { JSONSchema7 } from "@ai-sdk/provider"
|
||||
@@ -360,8 +362,51 @@ export namespace ProviderTransform {
|
||||
if (!model.id.includes("gpt") && !model.id.includes("gemini-3")) return {}
|
||||
return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoning: { effort } }]))
|
||||
|
||||
// TODO: YOU CANNOT SET max_tokens if this is set!!!
|
||||
case "@ai-sdk/gateway":
|
||||
if (model.id.includes("anthropic")) {
|
||||
return {
|
||||
high: {
|
||||
reasoningConfig: {
|
||||
type: "enabled",
|
||||
budgetTokens: 16000,
|
||||
},
|
||||
},
|
||||
max: {
|
||||
reasoningConfig: {
|
||||
type: "enabled",
|
||||
budgetTokens: 31999,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
if (model.id.includes("google")) {
|
||||
if (id.includes("2.5")) {
|
||||
return {
|
||||
high: {
|
||||
thinkingConfig: {
|
||||
includeThoughts: true,
|
||||
thinkingBudget: 16000,
|
||||
},
|
||||
},
|
||||
max: {
|
||||
thinkingConfig: {
|
||||
includeThoughts: true,
|
||||
thinkingBudget: 24576,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
return Object.fromEntries(
|
||||
["low", "high"].map((effort) => [
|
||||
effort,
|
||||
{
|
||||
includeThoughts: true,
|
||||
thinkingLevel: effort,
|
||||
},
|
||||
]),
|
||||
)
|
||||
}
|
||||
|
||||
return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
|
||||
|
||||
case "@ai-sdk/github-copilot":
|
||||
@@ -704,8 +749,24 @@ export namespace ProviderTransform {
|
||||
}
|
||||
|
||||
export function providerOptions(model: Provider.Model, options: { [x: string]: any }) {
|
||||
const key = sdkKey(model.api.npm) ?? model.providerID
|
||||
return { [key]: options }
|
||||
if (model.api.npm !== "@ai-sdk/gateway") {
|
||||
const key = sdkKey(model.api.npm) ?? model.providerID
|
||||
return { [key]: options }
|
||||
}
|
||||
|
||||
const key = model.api.id.includes("/") ? model.api.id.split("/")[0] : (sdkKey(model.api.npm) ?? model.providerID)
|
||||
|
||||
const rest = { ...options }
|
||||
const gate = rest.gateway
|
||||
delete rest.gateway
|
||||
if (Object.keys(rest).length === 0) {
|
||||
return { gateway: gate }
|
||||
}
|
||||
|
||||
return {
|
||||
gateway: gate,
|
||||
[key]: rest,
|
||||
}
|
||||
}
|
||||
|
||||
export function maxOutputTokens(
|
||||
|
||||
@@ -175,6 +175,109 @@ describe("ProviderTransform.options - gpt-5 textVerbosity", () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe("ProviderTransform.providerOptions", () => {
|
||||
const createModel = (overrides: Partial<any> = {}) =>
|
||||
({
|
||||
id: "test/test-model",
|
||||
providerID: "test",
|
||||
api: {
|
||||
id: "test-model",
|
||||
url: "https://api.test.com",
|
||||
npm: "@ai-sdk/openai",
|
||||
},
|
||||
name: "Test Model",
|
||||
capabilities: {
|
||||
temperature: true,
|
||||
reasoning: true,
|
||||
attachment: true,
|
||||
toolcall: true,
|
||||
input: { text: true, audio: false, image: true, video: false, pdf: false },
|
||||
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
||||
interleaved: false,
|
||||
},
|
||||
cost: {
|
||||
input: 0.001,
|
||||
output: 0.002,
|
||||
cache: { read: 0.0001, write: 0.0002 },
|
||||
},
|
||||
limit: {
|
||||
context: 200_000,
|
||||
output: 64_000,
|
||||
},
|
||||
status: "active",
|
||||
options: {},
|
||||
headers: {},
|
||||
release_date: "2024-01-01",
|
||||
...overrides,
|
||||
}) as any
|
||||
|
||||
test("uses sdk key for non-gateway models", () => {
|
||||
const model = createModel({
|
||||
providerID: "my-bedrock",
|
||||
api: {
|
||||
id: "anthropic.claude-sonnet-4",
|
||||
url: "https://bedrock.aws",
|
||||
npm: "@ai-sdk/amazon-bedrock",
|
||||
},
|
||||
})
|
||||
|
||||
expect(ProviderTransform.providerOptions(model, { cachePoint: { type: "default" } })).toEqual({
|
||||
bedrock: { cachePoint: { type: "default" } },
|
||||
})
|
||||
})
|
||||
|
||||
test("uses gateway model provider slug for gateway models", () => {
|
||||
const model = createModel({
|
||||
providerID: "vercel",
|
||||
api: {
|
||||
id: "anthropic/claude-sonnet-4",
|
||||
url: "https://ai-gateway.vercel.sh/v3/ai",
|
||||
npm: "@ai-sdk/gateway",
|
||||
},
|
||||
})
|
||||
|
||||
expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({
|
||||
anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } },
|
||||
})
|
||||
})
|
||||
|
||||
test("splits gateway routing options from provider-specific options", () => {
|
||||
const model = createModel({
|
||||
providerID: "vercel",
|
||||
api: {
|
||||
id: "anthropic/claude-sonnet-4",
|
||||
url: "https://ai-gateway.vercel.sh/v3/ai",
|
||||
npm: "@ai-sdk/gateway",
|
||||
},
|
||||
})
|
||||
|
||||
expect(
|
||||
ProviderTransform.providerOptions(model, {
|
||||
gateway: { order: ["vertex", "anthropic"] },
|
||||
thinking: { type: "enabled", budgetTokens: 12_000 },
|
||||
}),
|
||||
).toEqual({
|
||||
gateway: { order: ["vertex", "anthropic"] },
|
||||
anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } },
|
||||
} as any)
|
||||
})
|
||||
|
||||
test("falls back to gateway key when model id has no provider slug", () => {
|
||||
const model = createModel({
|
||||
providerID: "vercel",
|
||||
api: {
|
||||
id: "claude-sonnet-4",
|
||||
url: "https://ai-gateway.vercel.sh/v3/ai",
|
||||
npm: "@ai-sdk/gateway",
|
||||
},
|
||||
})
|
||||
|
||||
expect(ProviderTransform.providerOptions(model, { reasoningEffort: "high" })).toEqual({
|
||||
gateway: { reasoningEffort: "high" },
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("ProviderTransform.maxOutputTokens", () => {
|
||||
test("returns 32k when modelLimit > 32k", () => {
|
||||
const modelLimit = 100000
|
||||
|
||||
Reference in New Issue
Block a user