Compare commits

...

7 Commits

Author SHA1 Message Date
Aiden Cline
2afb05fd15 cleanup 2025-12-30 16:32:22 -06:00
Aiden Cline
c460a7d957 fix: test 2025-12-30 16:29:59 -06:00
Aiden Cline
8a2e0a0471 rm dead code 2025-12-30 16:26:29 -06:00
Aiden Cline
5cdd764123 cleanup + test 2025-12-30 16:23:16 -06:00
Aiden Cline
938a53b5a2 fix: ensure variants can be disabled 2025-12-30 16:12:01 -06:00
Aiden Cline
5e1986c0bc test: add transform tests 2025-12-30 15:38:17 -06:00
Daniel Smolsky
fc2a5b851b fix: auto-generate variants for user-configured custom provider models 2025-12-30 15:28:06 -05:00
10 changed files with 955 additions and 27 deletions

View File

@@ -2,6 +2,10 @@
- To test opencode in the `packages/opencode` directory you can run `bun dev`
## SDK
To regenerate the javascript SDK, run ./packages/sdk/js/script/build.ts
## Tool Calling
- ALWAYS USE PARALLEL TOOLS WHEN APPLICABLE.

View File

@@ -319,9 +319,7 @@ export const { use: useLocal, provider: LocalProvider } = createSimpleContext({
const provider = sync.data.provider.find((x) => x.id === m.providerID)
const info = provider?.models[m.modelID]
if (!info?.variants) return []
return Object.entries(info.variants)
.filter(([_, v]) => !v.disabled)
.map(([name]) => name)
return Object.keys(info.variants)
},
set(value: string | undefined) {
const m = currentModel()

View File

@@ -620,7 +620,24 @@ export namespace Config {
.extend({
whitelist: z.array(z.string()).optional(),
blacklist: z.array(z.string()).optional(),
models: z.record(z.string(), ModelsDev.Model.partial()).optional(),
models: z
.record(
z.string(),
ModelsDev.Model.partial().extend({
variants: z
.record(
z.string(),
z
.object({
disabled: z.boolean().optional().describe("Disable this variant for the model"),
})
.catchall(z.any()),
)
.optional()
.describe("Variant-specific configuration"),
}),
)
.optional(),
options: z
.object({
apiKey: z.string().optional(),

View File

@@ -60,6 +60,7 @@ export namespace ModelsDev {
options: z.record(z.string(), z.any()),
headers: z.record(z.string(), z.string()).optional(),
provider: z.object({ npm: z.string() }).optional(),
variants: z.record(z.string(), z.record(z.string(), z.any())).optional(),
})
export type Model = z.infer<typeof Model>

View File

@@ -1,7 +1,7 @@
import z from "zod"
import fuzzysort from "fuzzysort"
import { Config } from "../config/config"
import { mapValues, mergeDeep, sortBy } from "remeda"
import { mapValues, mergeDeep, omit, pickBy, sortBy } from "remeda"
import { NoSuchModelError, type Provider as SDK } from "ai"
import { Log } from "../util/log"
import { BunProc } from "../bun"
@@ -405,16 +405,6 @@ export namespace Provider {
},
}
export const Variant = z
.object({
disabled: z.boolean(),
})
.catchall(z.any())
.meta({
ref: "Variant",
})
export type Variant = z.infer<typeof Variant>
export const Model = z
.object({
id: z.string(),
@@ -478,7 +468,7 @@ export namespace Provider {
options: z.record(z.string(), z.any()),
headers: z.record(z.string(), z.string()),
release_date: z.string(),
variants: z.record(z.string(), Variant).optional(),
variants: z.record(z.string(), z.record(z.string(), z.any())).optional(),
})
.meta({
ref: "Model",
@@ -561,7 +551,7 @@ export namespace Provider {
variants: {},
}
m.variants = mapValues(ProviderTransform.variants(m), (v) => ({ disabled: false, ...v }))
m.variants = mapValues(ProviderTransform.variants(m), (v) => v)
return m
}
@@ -697,7 +687,13 @@ export namespace Provider {
headers: mergeDeep(existingModel?.headers ?? {}, model.headers ?? {}),
family: model.family ?? existingModel?.family ?? "",
release_date: model.release_date ?? existingModel?.release_date ?? "",
variants: {},
}
const merged = mergeDeep(ProviderTransform.variants(parsedModel), model.variants ?? {})
parsedModel.variants = mapValues(
pickBy(merged, (v) => !v.disabled),
(v) => omit(v, ["disabled"]),
)
parsed.models[modelID] = parsedModel
}
database[providerID] = parsed
@@ -822,6 +818,16 @@ export namespace Provider {
(configProvider?.whitelist && !configProvider.whitelist.includes(modelID))
)
delete provider.models[modelID]
// Filter out disabled variants from config
const configVariants = configProvider?.models?.[modelID]?.variants
if (configVariants && model.variants) {
const merged = mergeDeep(model.variants, configVariants)
model.variants = mapValues(
pickBy(merged, (v) => !v.disabled),
(v) => omit(v, ["disabled"]),
)
}
}
if (Object.keys(provider.models).length === 0) {

View File

@@ -246,7 +246,7 @@ export namespace ProviderTransform {
const WIDELY_SUPPORTED_EFFORTS = ["low", "medium", "high"]
const OPENAI_EFFORTS = ["none", "minimal", ...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
export function variants(model: Provider.Model) {
export function variants(model: Provider.Model): Record<string, Record<string, any>> {
if (!model.capabilities.reasoning) return {}
const id = model.id.toLowerCase()

View File

@@ -82,13 +82,14 @@ export namespace LLM {
}
const provider = await Provider.getProvider(input.model.providerID)
const variant = input.model.variants && input.user.variant ? input.model.variants[input.user.variant] : undefined
const small = input.small ? ProviderTransform.smallOptions(input.model) : {}
const variant = input.model.variants && input.user.variant ? input.model.variants[input.user.variant] : {}
const options = pipe(
ProviderTransform.options(input.model, input.sessionID, provider.options),
mergeDeep(input.small ? ProviderTransform.smallOptions(input.model) : {}),
mergeDeep(small),
mergeDeep(input.model.options),
mergeDeep(input.agent.options),
mergeDeep(variant && !variant.disabled ? variant : {}),
mergeDeep(variant),
)
const params = await Plugin.trigger(

View File

@@ -1807,3 +1807,321 @@ test("custom model inherits api.url from models.dev provider", async () => {
},
})
})
test("model variants are generated for reasoning models", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("ANTHROPIC_API_KEY", "test-api-key")
},
fn: async () => {
const providers = await Provider.list()
// Claude sonnet 4 has reasoning capability
const model = providers["anthropic"].models["claude-sonnet-4-20250514"]
expect(model.capabilities.reasoning).toBe(true)
expect(model.variants).toBeDefined()
expect(Object.keys(model.variants!).length).toBeGreaterThan(0)
},
})
})
test("model variants can be disabled via config", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
provider: {
anthropic: {
models: {
"claude-sonnet-4-20250514": {
variants: {
high: { disabled: true },
},
},
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("ANTHROPIC_API_KEY", "test-api-key")
},
fn: async () => {
const providers = await Provider.list()
const model = providers["anthropic"].models["claude-sonnet-4-20250514"]
expect(model.variants).toBeDefined()
expect(model.variants!["high"]).toBeUndefined()
// max variant should still exist
expect(model.variants!["max"]).toBeDefined()
},
})
})
test("model variants can be customized via config", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
provider: {
anthropic: {
models: {
"claude-sonnet-4-20250514": {
variants: {
high: {
thinking: {
type: "enabled",
budgetTokens: 20000,
},
},
},
},
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("ANTHROPIC_API_KEY", "test-api-key")
},
fn: async () => {
const providers = await Provider.list()
const model = providers["anthropic"].models["claude-sonnet-4-20250514"]
expect(model.variants!["high"]).toBeDefined()
expect(model.variants!["high"].thinking.budgetTokens).toBe(20000)
},
})
})
test("disabled key is stripped from variant config", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
provider: {
anthropic: {
models: {
"claude-sonnet-4-20250514": {
variants: {
max: {
disabled: false,
customField: "test",
},
},
},
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("ANTHROPIC_API_KEY", "test-api-key")
},
fn: async () => {
const providers = await Provider.list()
const model = providers["anthropic"].models["claude-sonnet-4-20250514"]
expect(model.variants!["max"]).toBeDefined()
expect(model.variants!["max"].disabled).toBeUndefined()
expect(model.variants!["max"].customField).toBe("test")
},
})
})
test("all variants can be disabled via config", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
provider: {
anthropic: {
models: {
"claude-sonnet-4-20250514": {
variants: {
high: { disabled: true },
max: { disabled: true },
},
},
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("ANTHROPIC_API_KEY", "test-api-key")
},
fn: async () => {
const providers = await Provider.list()
const model = providers["anthropic"].models["claude-sonnet-4-20250514"]
expect(model.variants).toBeDefined()
expect(Object.keys(model.variants!).length).toBe(0)
},
})
})
test("variant config merges with generated variants", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
provider: {
anthropic: {
models: {
"claude-sonnet-4-20250514": {
variants: {
high: {
extraOption: "custom-value",
},
},
},
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("ANTHROPIC_API_KEY", "test-api-key")
},
fn: async () => {
const providers = await Provider.list()
const model = providers["anthropic"].models["claude-sonnet-4-20250514"]
expect(model.variants!["high"]).toBeDefined()
// Should have both the generated thinking config and the custom option
expect(model.variants!["high"].thinking).toBeDefined()
expect(model.variants!["high"].extraOption).toBe("custom-value")
},
})
})
test("variants filtered in second pass for database models", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
provider: {
openai: {
models: {
"gpt-5": {
variants: {
high: { disabled: true },
},
},
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("OPENAI_API_KEY", "test-api-key")
},
fn: async () => {
const providers = await Provider.list()
const model = providers["openai"].models["gpt-5"]
expect(model.variants).toBeDefined()
expect(model.variants!["high"]).toBeUndefined()
// Other variants should still exist
expect(model.variants!["medium"]).toBeDefined()
},
})
})
test("custom model with variants enabled and disabled", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
provider: {
"custom-reasoning": {
name: "Custom Reasoning Provider",
npm: "@ai-sdk/openai-compatible",
env: [],
models: {
"reasoning-model": {
name: "Reasoning Model",
tool_call: true,
reasoning: true,
limit: { context: 128000, output: 16000 },
variants: {
low: { reasoningEffort: "low" },
medium: { reasoningEffort: "medium" },
high: { reasoningEffort: "high", disabled: true },
custom: { reasoningEffort: "custom", budgetTokens: 5000 },
},
},
},
options: { apiKey: "test-key" },
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const providers = await Provider.list()
const model = providers["custom-reasoning"].models["reasoning-model"]
expect(model.variants).toBeDefined()
// Enabled variants should exist
expect(model.variants!["low"]).toBeDefined()
expect(model.variants!["low"].reasoningEffort).toBe("low")
expect(model.variants!["medium"]).toBeDefined()
expect(model.variants!["medium"].reasoningEffort).toBe("medium")
expect(model.variants!["custom"]).toBeDefined()
expect(model.variants!["custom"].reasoningEffort).toBe("custom")
expect(model.variants!["custom"].budgetTokens).toBe(5000)
// Disabled variant should not exist
expect(model.variants!["high"]).toBeUndefined()
// disabled key should be stripped from all variants
expect(model.variants!["low"].disabled).toBeUndefined()
expect(model.variants!["medium"].disabled).toBeUndefined()
expect(model.variants!["custom"].disabled).toBeUndefined()
},
})
})

View File

@@ -409,3 +409,572 @@ describe("ProviderTransform.message - empty image handling", () => {
})
})
})
describe("ProviderTransform.variants", () => {
const createMockModel = (overrides: Partial<any> = {}): any => ({
id: "test/test-model",
providerID: "test",
api: {
id: "test-model",
url: "https://api.test.com",
npm: "@ai-sdk/openai",
},
name: "Test Model",
capabilities: {
temperature: true,
reasoning: true,
attachment: true,
toolcall: true,
input: { text: true, audio: false, image: true, video: false, pdf: false },
output: { text: true, audio: false, image: false, video: false, pdf: false },
interleaved: false,
},
cost: {
input: 0.001,
output: 0.002,
cache: { read: 0.0001, write: 0.0002 },
},
limit: {
context: 128000,
output: 8192,
},
status: "active",
options: {},
headers: {},
release_date: "2024-01-01",
...overrides,
})
test("returns empty object when model has no reasoning capabilities", () => {
const model = createMockModel({
capabilities: { reasoning: false },
})
const result = ProviderTransform.variants(model)
expect(result).toEqual({})
})
test("deepseek returns empty object", () => {
const model = createMockModel({
id: "deepseek/deepseek-chat",
providerID: "deepseek",
api: {
id: "deepseek-chat",
url: "https://api.deepseek.com",
npm: "@ai-sdk/openai-compatible",
},
})
const result = ProviderTransform.variants(model)
expect(result).toEqual({})
})
test("minimax returns empty object", () => {
const model = createMockModel({
id: "minimax/minimax-model",
providerID: "minimax",
api: {
id: "minimax-model",
url: "https://api.minimax.com",
npm: "@ai-sdk/openai-compatible",
},
})
const result = ProviderTransform.variants(model)
expect(result).toEqual({})
})
test("glm returns empty object", () => {
const model = createMockModel({
id: "glm/glm-4",
providerID: "glm",
api: {
id: "glm-4",
url: "https://api.glm.com",
npm: "@ai-sdk/openai-compatible",
},
})
const result = ProviderTransform.variants(model)
expect(result).toEqual({})
})
test("mistral returns empty object", () => {
const model = createMockModel({
id: "mistral/mistral-large",
providerID: "mistral",
api: {
id: "mistral-large-latest",
url: "https://api.mistral.com",
npm: "@ai-sdk/mistral",
},
})
const result = ProviderTransform.variants(model)
expect(result).toEqual({})
})
describe("@openrouter/ai-sdk-provider", () => {
test("returns empty object for non-qualifying models", () => {
const model = createMockModel({
id: "openrouter/test-model",
providerID: "openrouter",
api: {
id: "test-model",
url: "https://openrouter.ai",
npm: "@openrouter/ai-sdk-provider",
},
})
const result = ProviderTransform.variants(model)
expect(result).toEqual({})
})
test("gpt models return OPENAI_EFFORTS with reasoning", () => {
const model = createMockModel({
id: "openrouter/gpt-4",
providerID: "openrouter",
api: {
id: "gpt-4",
url: "https://openrouter.ai",
npm: "@openrouter/ai-sdk-provider",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
expect(result.low).toEqual({ reasoning: { effort: "low" } })
expect(result.high).toEqual({ reasoning: { effort: "high" } })
})
test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
const model = createMockModel({
id: "openrouter/gemini-3-5-pro",
providerID: "openrouter",
api: {
id: "gemini-3-5-pro",
url: "https://openrouter.ai",
npm: "@openrouter/ai-sdk-provider",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
})
test("grok-4 returns OPENAI_EFFORTS with reasoning", () => {
const model = createMockModel({
id: "openrouter/grok-4",
providerID: "openrouter",
api: {
id: "grok-4",
url: "https://openrouter.ai",
npm: "@openrouter/ai-sdk-provider",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
})
})
describe("@ai-sdk/gateway", () => {
test("returns OPENAI_EFFORTS with reasoningEffort", () => {
const model = createMockModel({
id: "gateway/gateway-model",
providerID: "gateway",
api: {
id: "gateway-model",
url: "https://gateway.ai",
npm: "@ai-sdk/gateway",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
expect(result.low).toEqual({ reasoningEffort: "low" })
expect(result.high).toEqual({ reasoningEffort: "high" })
})
})
describe("@ai-sdk/cerebras", () => {
test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
const model = createMockModel({
id: "cerebras/llama-4",
providerID: "cerebras",
api: {
id: "llama-4-sc",
url: "https://api.cerebras.ai",
npm: "@ai-sdk/cerebras",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
expect(result.low).toEqual({ reasoningEffort: "low" })
expect(result.high).toEqual({ reasoningEffort: "high" })
})
})
describe("@ai-sdk/togetherai", () => {
test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
const model = createMockModel({
id: "togetherai/llama-4",
providerID: "togetherai",
api: {
id: "llama-4-sc",
url: "https://api.togetherai.com",
npm: "@ai-sdk/togetherai",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
expect(result.low).toEqual({ reasoningEffort: "low" })
expect(result.high).toEqual({ reasoningEffort: "high" })
})
})
describe("@ai-sdk/xai", () => {
test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
const model = createMockModel({
id: "xai/grok-3",
providerID: "xai",
api: {
id: "grok-3",
url: "https://api.x.ai",
npm: "@ai-sdk/xai",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
expect(result.low).toEqual({ reasoningEffort: "low" })
expect(result.high).toEqual({ reasoningEffort: "high" })
})
})
describe("@ai-sdk/deepinfra", () => {
test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
const model = createMockModel({
id: "deepinfra/llama-4",
providerID: "deepinfra",
api: {
id: "llama-4-sc",
url: "https://api.deepinfra.com",
npm: "@ai-sdk/deepinfra",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
expect(result.low).toEqual({ reasoningEffort: "low" })
expect(result.high).toEqual({ reasoningEffort: "high" })
})
})
describe("@ai-sdk/openai-compatible", () => {
test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
const model = createMockModel({
id: "custom-provider/custom-model",
providerID: "custom-provider",
api: {
id: "custom-model",
url: "https://api.custom.com",
npm: "@ai-sdk/openai-compatible",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
expect(result.low).toEqual({ reasoningEffort: "low" })
expect(result.high).toEqual({ reasoningEffort: "high" })
})
})
describe("@ai-sdk/azure", () => {
test("o1-mini returns empty object", () => {
const model = createMockModel({
id: "o1-mini",
providerID: "azure",
api: {
id: "o1-mini",
url: "https://azure.com",
npm: "@ai-sdk/azure",
},
})
const result = ProviderTransform.variants(model)
expect(result).toEqual({})
})
test("standard azure models return custom efforts with reasoningSummary", () => {
const model = createMockModel({
id: "azure/gpt-4o",
providerID: "azure",
api: {
id: "gpt-4o",
url: "https://azure.com",
npm: "@ai-sdk/azure",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
expect(result.low).toEqual({
reasoningEffort: "low",
reasoningSummary: "auto",
include: ["reasoning.encrypted_content"],
})
})
test("gpt-5 adds minimal effort", () => {
const model = createMockModel({
id: "azure/gpt-5",
providerID: "azure",
api: {
id: "gpt-5",
url: "https://azure.com",
npm: "@ai-sdk/azure",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
})
})
describe("@ai-sdk/openai", () => {
test("gpt-5-pro returns empty object", () => {
const model = createMockModel({
id: "gpt-5-pro",
providerID: "openai",
api: {
id: "gpt-5-pro",
url: "https://api.openai.com",
npm: "@ai-sdk/openai",
},
})
const result = ProviderTransform.variants(model)
expect(result).toEqual({})
})
test("standard openai models return custom efforts with reasoningSummary", () => {
const model = createMockModel({
id: "openai/gpt-4o",
providerID: "openai",
api: {
id: "gpt-4o",
url: "https://api.openai.com",
npm: "@ai-sdk/openai",
},
release_date: "2024-06-01",
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
expect(result.low).toEqual({
reasoningEffort: "low",
reasoningSummary: "auto",
include: ["reasoning.encrypted_content"],
})
})
test("models after 2025-11-13 include 'none' effort", () => {
const model = createMockModel({
id: "openai/gpt-4.5",
providerID: "openai",
api: {
id: "gpt-4.5",
url: "https://api.openai.com",
npm: "@ai-sdk/openai",
},
release_date: "2025-11-14",
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
})
test("models after 2025-12-04 include 'xhigh' effort", () => {
const model = createMockModel({
id: "openai/gpt-5-chat",
providerID: "openai",
api: {
id: "gpt-5-chat",
url: "https://api.openai.com",
npm: "@ai-sdk/openai",
},
release_date: "2025-12-05",
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
})
})
describe("@ai-sdk/anthropic", () => {
test("returns high and max with thinking config", () => {
const model = createMockModel({
id: "anthropic/claude-4",
providerID: "anthropic",
api: {
id: "claude-4",
url: "https://api.anthropic.com",
npm: "@ai-sdk/anthropic",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["high", "max"])
expect(result.high).toEqual({
thinking: {
type: "enabled",
budgetTokens: 16000,
},
})
expect(result.max).toEqual({
thinking: {
type: "enabled",
budgetTokens: 31999,
},
})
})
})
describe("@ai-sdk/amazon-bedrock", () => {
test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
const model = createMockModel({
id: "bedrock/llama-4",
providerID: "bedrock",
api: {
id: "llama-4-sc",
url: "https://bedrock.amazonaws.com",
npm: "@ai-sdk/amazon-bedrock",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
expect(result.low).toEqual({
reasoningConfig: {
type: "enabled",
maxReasoningEffort: "low",
},
})
})
})
describe("@ai-sdk/google", () => {
test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
const model = createMockModel({
id: "google/gemini-2.5-pro",
providerID: "google",
api: {
id: "gemini-2.5-pro",
url: "https://generativelanguage.googleapis.com",
npm: "@ai-sdk/google",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["high", "max"])
expect(result.high).toEqual({
thinkingConfig: {
includeThoughts: true,
thinkingBudget: 16000,
},
})
expect(result.max).toEqual({
thinkingConfig: {
includeThoughts: true,
thinkingBudget: 24576,
},
})
})
test("other gemini models return low and high with thinkingLevel", () => {
const model = createMockModel({
id: "google/gemini-2.0-pro",
providerID: "google",
api: {
id: "gemini-2.0-pro",
url: "https://generativelanguage.googleapis.com",
npm: "@ai-sdk/google",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["low", "high"])
expect(result.low).toEqual({
includeThoughts: true,
thinkingLevel: "low",
})
expect(result.high).toEqual({
includeThoughts: true,
thinkingLevel: "high",
})
})
})
describe("@ai-sdk/google-vertex", () => {
test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
const model = createMockModel({
id: "google-vertex/gemini-2.5-pro",
providerID: "google-vertex",
api: {
id: "gemini-2.5-pro",
url: "https://vertexai.googleapis.com",
npm: "@ai-sdk/google-vertex",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["high", "max"])
})
test("other vertex models return low and high with thinkingLevel", () => {
const model = createMockModel({
id: "google-vertex/gemini-2.0-pro",
providerID: "google-vertex",
api: {
id: "gemini-2.0-pro",
url: "https://vertexai.googleapis.com",
npm: "@ai-sdk/google-vertex",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["low", "high"])
})
})
describe("@ai-sdk/cohere", () => {
test("returns empty object", () => {
const model = createMockModel({
id: "cohere/command-r",
providerID: "cohere",
api: {
id: "command-r",
url: "https://api.cohere.com",
npm: "@ai-sdk/cohere",
},
})
const result = ProviderTransform.variants(model)
expect(result).toEqual({})
})
})
describe("@ai-sdk/groq", () => {
test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
const model = createMockModel({
id: "groq/llama-4",
providerID: "groq",
api: {
id: "llama-4-sc",
url: "https://api.groq.com",
npm: "@ai-sdk/groq",
},
})
const result = ProviderTransform.variants(model)
expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
expect(result.none).toEqual({
includeThoughts: true,
thinkingLevel: "none",
})
expect(result.low).toEqual({
includeThoughts: true,
thinkingLevel: "low",
})
})
})
describe("@ai-sdk/perplexity", () => {
test("returns empty object", () => {
const model = createMockModel({
id: "perplexity/sonar-plus",
providerID: "perplexity",
api: {
id: "sonar-plus",
url: "https://api.perplexity.ai",
npm: "@ai-sdk/perplexity",
},
})
const result = ProviderTransform.variants(model)
expect(result).toEqual({})
})
})
})

View File

@@ -1309,6 +1309,18 @@ export type ProviderConfig = {
provider?: {
npm: string
}
/**
* Variant-specific configuration
*/
variants?: {
[key: string]: {
/**
* Disable this variant for the model
*/
disabled?: boolean
[key: string]: unknown | boolean | undefined
}
}
}
}
whitelist?: Array<string>
@@ -1717,11 +1729,6 @@ export type Command = {
subtask?: boolean
}
export type Variant = {
disabled: boolean
[key: string]: unknown | boolean
}
export type Model = {
id: string
providerID: string
@@ -1786,7 +1793,9 @@ export type Model = {
}
release_date: string
variants?: {
[key: string]: Variant
[key: string]: {
[key: string]: unknown
}
}
}
@@ -3497,6 +3506,11 @@ export type ProviderListResponses = {
provider?: {
npm: string
}
variants?: {
[key: string]: {
[key: string]: unknown
}
}
}
}
}>