Revert "fix(app): support anthropic models on azure cognitive services (#8335)"

This reverts commit b8e2895dfc.
This commit is contained in:
Aiden Cline
2026-01-16 15:25:14 -06:00
committed by GitHub
parent b8e2895dfc
commit 0eb7c2bd61
3 changed files with 13 additions and 67 deletions

View File

@@ -586,13 +586,6 @@ export namespace Provider {
})
export type Info = z.infer<typeof Info>
export function isAzureAnthropic(model: Model): boolean {
return (
model.providerID === "azure-cognitive-services" &&
(model.api.id.includes("claude") || model.api.id.includes("anthropic"))
)
}
function fromModelsDevModel(provider: ModelsDev.Provider, model: ModelsDev.Model): Model {
const m: Model = {
id: model.id,
@@ -1013,16 +1006,9 @@ export namespace Provider {
})
}
// Special cases for providers that use different npm packages
if (isAzureAnthropic(model)) {
const resourceName = Env.get("AZURE_COGNITIVE_SERVICES_RESOURCE_NAME")
if (resourceName) options["baseURL"] = `https://${resourceName}.services.ai.azure.com/anthropic/v1/`
}
const bundledKey = iife(() => {
if (model.providerID === "google-vertex-anthropic") return "@ai-sdk/google-vertex/anthropic"
if (isAzureAnthropic(model)) return "@ai-sdk/anthropic"
return model.api.npm
})
// Special case: google-vertex-anthropic uses a subpath import
const bundledKey =
model.providerID === "google-vertex-anthropic" ? "@ai-sdk/google-vertex/anthropic" : model.api.npm
const bundledFn = BUNDLED_PROVIDERS[bundledKey]
if (bundledFn) {
log.info("using bundled provider", { providerID: model.providerID, pkg: bundledKey })
@@ -1088,11 +1074,8 @@ export namespace Provider {
const provider = s.providers[model.providerID]
const sdk = await getSDK(model)
// Skip custom model loader for Azure Anthropic models since they use @ai-sdk/anthropic
const useCustomLoader = s.modelLoaders[model.providerID] && !isAzureAnthropic(model)
try {
const language = useCustomLoader
const language = s.modelLoaders[model.providerID]
? await s.modelLoaders[model.providerID](sdk, model.api.id, provider.options)
: sdk.languageModel(model.api.id)
s.models.set(key, language)

View File

@@ -16,17 +16,6 @@ function mimeToModality(mime: string): Modality | undefined {
}
export namespace ProviderTransform {
function isAzureAnthropic(model: Provider.Model): boolean {
return (
model.providerID === "azure-cognitive-services" &&
(model.api.id.includes("claude") || model.api.id.includes("anthropic"))
)
}
function usesAnthropicSDK(model: Provider.Model): boolean {
return model.api.npm === "@ai-sdk/anthropic" || isAzureAnthropic(model)
}
function normalizeMessages(
msgs: ModelMessage[],
model: Provider.Model,
@@ -61,7 +50,7 @@ export namespace ProviderTransform {
// Anthropic rejects messages with empty content - filter out empty string messages
// and remove empty text/reasoning parts from array content
if (usesAnthropicSDK(model)) {
if (model.api.npm === "@ai-sdk/anthropic") {
msgs = msgs
.map((msg) => {
if (typeof msg.content === "string") {
@@ -267,7 +256,7 @@ export namespace ProviderTransform {
model.providerID === "anthropic" ||
model.api.id.includes("anthropic") ||
model.api.id.includes("claude") ||
usesAnthropicSDK(model)
model.api.npm === "@ai-sdk/anthropic"
) {
msgs = applyCaching(msgs, model.providerID)
}
@@ -319,23 +308,6 @@ export namespace ProviderTransform {
const id = model.id.toLowerCase()
if (id.includes("deepseek") || id.includes("minimax") || id.includes("glm") || id.includes("mistral")) return {}
if (isAzureAnthropic(model)) {
return {
high: {
thinking: {
type: "enabled",
budgetTokens: 16000,
},
},
max: {
thinking: {
type: "enabled",
budgetTokens: 31999,
},
},
}
}
switch (model.api.npm) {
case "@openrouter/ai-sdk-provider":
if (!model.id.includes("gpt") && !model.id.includes("gemini-3") && !model.id.includes("grok-4")) return {}
@@ -606,9 +578,6 @@ export namespace ProviderTransform {
}
export function providerOptions(model: Provider.Model, options: { [x: string]: any }) {
if (isAzureAnthropic(model)) {
return { ["anthropic" as string]: options }
}
switch (model.api.npm) {
case "@ai-sdk/github-copilot":
case "@ai-sdk/openai":
@@ -644,27 +613,16 @@ export namespace ProviderTransform {
}
}
export function maxOutputTokens(model: Provider.Model, options: Record<string, any>, globalLimit: number): number
export function maxOutputTokens(
npm: string,
options: Record<string, any>,
modelLimit: number,
globalLimit: number,
): number
export function maxOutputTokens(
arg1: Provider.Model | string,
options: Record<string, any>,
arg3: number,
arg4?: number,
): number {
const model = typeof arg1 === "object" ? arg1 : null
const npm = model ? model.api.npm : (arg1 as string)
const modelLimit = model ? model.limit.output : arg3
const globalLimit = model ? arg3 : arg4!
const modelCap = modelLimit || globalLimit
const standardLimit = Math.min(modelCap, globalLimit)
if (model ? usesAnthropicSDK(model) : npm === "@ai-sdk/anthropic") {
if (npm === "@ai-sdk/anthropic") {
const thinking = options?.["thinking"]
const budgetTokens = typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : 0
const enabled = thinking?.["type"] === "enabled"

View File

@@ -133,7 +133,12 @@ export namespace LLM {
const maxOutputTokens = isCodex
? undefined
: ProviderTransform.maxOutputTokens(input.model, params.options, OUTPUT_TOKEN_MAX)
: ProviderTransform.maxOutputTokens(
input.model.api.npm,
params.options,
input.model.limit.output,
OUTPUT_TOKEN_MAX,
)
const tools = await resolveTools(input)