chore: generate

This commit is contained in:
opencode-agent[bot]
2026-01-31 02:35:22 +00:00
parent d9f18e4006
commit 644f0d4e92
12 changed files with 407 additions and 506 deletions

View File

@@ -2,64 +2,57 @@ import {
type LanguageModelV2Prompt, type LanguageModelV2Prompt,
type SharedV2ProviderMetadata, type SharedV2ProviderMetadata,
UnsupportedFunctionalityError, UnsupportedFunctionalityError,
} from '@ai-sdk/provider'; } from "@ai-sdk/provider"
import type { OpenAICompatibleChatPrompt } from './openai-compatible-api-types'; import type { OpenAICompatibleChatPrompt } from "./openai-compatible-api-types"
import { convertToBase64 } from '@ai-sdk/provider-utils'; import { convertToBase64 } from "@ai-sdk/provider-utils"
function getOpenAIMetadata(message: { function getOpenAIMetadata(message: { providerOptions?: SharedV2ProviderMetadata }) {
providerOptions?: SharedV2ProviderMetadata; return message?.providerOptions?.copilot ?? {}
}) {
return message?.providerOptions?.copilot ?? {};
} }
export function convertToOpenAICompatibleChatMessages( export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Prompt): OpenAICompatibleChatPrompt {
prompt: LanguageModelV2Prompt, const messages: OpenAICompatibleChatPrompt = []
): OpenAICompatibleChatPrompt {
const messages: OpenAICompatibleChatPrompt = [];
for (const { role, content, ...message } of prompt) { for (const { role, content, ...message } of prompt) {
const metadata = getOpenAIMetadata({ ...message }); const metadata = getOpenAIMetadata({ ...message })
switch (role) { switch (role) {
case 'system': { case "system": {
messages.push({ messages.push({
role: 'system', role: "system",
content: [ content: [
{ {
type: 'text', type: "text",
text: content, text: content,
}, },
], ],
...metadata, ...metadata,
}); })
break; break
} }
case 'user': { case "user": {
if (content.length === 1 && content[0].type === 'text') { if (content.length === 1 && content[0].type === "text") {
messages.push({ messages.push({
role: 'user', role: "user",
content: content[0].text, content: content[0].text,
...getOpenAIMetadata(content[0]), ...getOpenAIMetadata(content[0]),
}); })
break; break
} }
messages.push({ messages.push({
role: 'user', role: "user",
content: content.map(part => { content: content.map((part) => {
const partMetadata = getOpenAIMetadata(part); const partMetadata = getOpenAIMetadata(part)
switch (part.type) { switch (part.type) {
case 'text': { case "text": {
return { type: 'text', text: part.text, ...partMetadata }; return { type: "text", text: part.text, ...partMetadata }
} }
case 'file': { case "file": {
if (part.mediaType.startsWith('image/')) { if (part.mediaType.startsWith("image/")) {
const mediaType = const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType
part.mediaType === 'image/*'
? 'image/jpeg'
: part.mediaType;
return { return {
type: 'image_url', type: "image_url",
image_url: { image_url: {
url: url:
part.data instanceof URL part.data instanceof URL
@@ -67,111 +60,110 @@ export function convertToOpenAICompatibleChatMessages(
: `data:${mediaType};base64,${convertToBase64(part.data)}`, : `data:${mediaType};base64,${convertToBase64(part.data)}`,
}, },
...partMetadata, ...partMetadata,
}; }
} else { } else {
throw new UnsupportedFunctionalityError({ throw new UnsupportedFunctionalityError({
functionality: `file part media type ${part.mediaType}`, functionality: `file part media type ${part.mediaType}`,
}); })
} }
} }
} }
}), }),
...metadata, ...metadata,
}); })
break; break
} }
case 'assistant': { case "assistant": {
let text = ''; let text = ""
let reasoningText: string | undefined; let reasoningText: string | undefined
let reasoningOpaque: string | undefined; let reasoningOpaque: string | undefined
const toolCalls: Array<{ const toolCalls: Array<{
id: string; id: string
type: 'function'; type: "function"
function: { name: string; arguments: string }; function: { name: string; arguments: string }
}> = []; }> = []
for (const part of content) { for (const part of content) {
const partMetadata = getOpenAIMetadata(part); const partMetadata = getOpenAIMetadata(part)
// Check for reasoningOpaque on any part (may be attached to text/tool-call) // Check for reasoningOpaque on any part (may be attached to text/tool-call)
const partOpaque = ( const partOpaque = (part.providerOptions as { copilot?: { reasoningOpaque?: string } })?.copilot
part.providerOptions as { copilot?: { reasoningOpaque?: string } } ?.reasoningOpaque
)?.copilot?.reasoningOpaque;
if (partOpaque && !reasoningOpaque) { if (partOpaque && !reasoningOpaque) {
reasoningOpaque = partOpaque; reasoningOpaque = partOpaque
} }
switch (part.type) { switch (part.type) {
case 'text': { case "text": {
text += part.text; text += part.text
break; break
} }
case 'reasoning': { case "reasoning": {
reasoningText = part.text; reasoningText = part.text
break; break
} }
case 'tool-call': { case "tool-call": {
toolCalls.push({ toolCalls.push({
id: part.toolCallId, id: part.toolCallId,
type: 'function', type: "function",
function: { function: {
name: part.toolName, name: part.toolName,
arguments: JSON.stringify(part.input), arguments: JSON.stringify(part.input),
}, },
...partMetadata, ...partMetadata,
}); })
break; break
} }
} }
} }
messages.push({ messages.push({
role: 'assistant', role: "assistant",
content: text || null, content: text || null,
tool_calls: toolCalls.length > 0 ? toolCalls : undefined, tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
reasoning_text: reasoningText, reasoning_text: reasoningText,
reasoning_opaque: reasoningOpaque, reasoning_opaque: reasoningOpaque,
...metadata, ...metadata,
}); })
break; break
} }
case 'tool': { case "tool": {
for (const toolResponse of content) { for (const toolResponse of content) {
const output = toolResponse.output; const output = toolResponse.output
let contentValue: string; let contentValue: string
switch (output.type) { switch (output.type) {
case 'text': case "text":
case 'error-text': case "error-text":
contentValue = output.value; contentValue = output.value
break; break
case 'content': case "content":
case 'json': case "json":
case 'error-json': case "error-json":
contentValue = JSON.stringify(output.value); contentValue = JSON.stringify(output.value)
break; break
} }
const toolResponseMetadata = getOpenAIMetadata(toolResponse); const toolResponseMetadata = getOpenAIMetadata(toolResponse)
messages.push({ messages.push({
role: 'tool', role: "tool",
tool_call_id: toolResponse.toolCallId, tool_call_id: toolResponse.toolCallId,
content: contentValue, content: contentValue,
...toolResponseMetadata, ...toolResponseMetadata,
}); })
} }
break; break
} }
default: { default: {
const _exhaustiveCheck: never = role; const _exhaustiveCheck: never = role
throw new Error(`Unsupported role: ${_exhaustiveCheck}`); throw new Error(`Unsupported role: ${_exhaustiveCheck}`)
} }
} }
} }
return messages; return messages
} }

View File

@@ -3,13 +3,13 @@ export function getResponseMetadata({
model, model,
created, created,
}: { }: {
id?: string | undefined | null; id?: string | undefined | null
created?: number | undefined | null; created?: number | undefined | null
model?: string | undefined | null; model?: string | undefined | null
}) { }) {
return { return {
id: id ?? undefined, id: id ?? undefined,
modelId: model ?? undefined, modelId: model ?? undefined,
timestamp: created != null ? new Date(created * 1000) : undefined, timestamp: created != null ? new Date(created * 1000) : undefined,
}; }
} }

View File

@@ -1,19 +1,17 @@
import type { LanguageModelV2FinishReason } from '@ai-sdk/provider'; import type { LanguageModelV2FinishReason } from "@ai-sdk/provider"
export function mapOpenAICompatibleFinishReason( export function mapOpenAICompatibleFinishReason(finishReason: string | null | undefined): LanguageModelV2FinishReason {
finishReason: string | null | undefined,
): LanguageModelV2FinishReason {
switch (finishReason) { switch (finishReason) {
case 'stop': case "stop":
return 'stop'; return "stop"
case 'length': case "length":
return 'length'; return "length"
case 'content_filter': case "content_filter":
return 'content-filter'; return "content-filter"
case 'function_call': case "function_call":
case 'tool_calls': case "tool_calls":
return 'tool-calls'; return "tool-calls"
default: default:
return 'unknown'; return "unknown"
} }
} }

View File

@@ -1,74 +1,64 @@
import type { JSONValue } from '@ai-sdk/provider'; import type { JSONValue } from "@ai-sdk/provider"
export type OpenAICompatibleChatPrompt = Array<OpenAICompatibleMessage>; export type OpenAICompatibleChatPrompt = Array<OpenAICompatibleMessage>
export type OpenAICompatibleMessage = export type OpenAICompatibleMessage =
| OpenAICompatibleSystemMessage | OpenAICompatibleSystemMessage
| OpenAICompatibleUserMessage | OpenAICompatibleUserMessage
| OpenAICompatibleAssistantMessage | OpenAICompatibleAssistantMessage
| OpenAICompatibleToolMessage; | OpenAICompatibleToolMessage
// Allow for arbitrary additional properties for general purpose // Allow for arbitrary additional properties for general purpose
// provider-metadata-specific extensibility. // provider-metadata-specific extensibility.
type JsonRecord<T = never> = Record< type JsonRecord<T = never> = Record<string, JSONValue | JSONValue[] | T | T[] | undefined>
string,
JSONValue | JSONValue[] | T | T[] | undefined
>;
export interface OpenAICompatibleSystemMessage export interface OpenAICompatibleSystemMessage extends JsonRecord<OpenAICompatibleSystemContentPart> {
extends JsonRecord<OpenAICompatibleSystemContentPart> { role: "system"
role: 'system'; content: string | Array<OpenAICompatibleSystemContentPart>
content: string | Array<OpenAICompatibleSystemContentPart>;
} }
export interface OpenAICompatibleSystemContentPart export interface OpenAICompatibleSystemContentPart extends JsonRecord {
extends JsonRecord { type: "text"
type: 'text'; text: string
text: string;
} }
export interface OpenAICompatibleUserMessage export interface OpenAICompatibleUserMessage extends JsonRecord<OpenAICompatibleContentPart> {
extends JsonRecord<OpenAICompatibleContentPart> { role: "user"
role: 'user'; content: string | Array<OpenAICompatibleContentPart>
content: string | Array<OpenAICompatibleContentPart>;
} }
export type OpenAICompatibleContentPart = export type OpenAICompatibleContentPart = OpenAICompatibleContentPartText | OpenAICompatibleContentPartImage
| OpenAICompatibleContentPartText
| OpenAICompatibleContentPartImage;
export interface OpenAICompatibleContentPartImage extends JsonRecord { export interface OpenAICompatibleContentPartImage extends JsonRecord {
type: 'image_url'; type: "image_url"
image_url: { url: string }; image_url: { url: string }
} }
export interface OpenAICompatibleContentPartText extends JsonRecord { export interface OpenAICompatibleContentPartText extends JsonRecord {
type: 'text'; type: "text"
text: string; text: string
} }
export interface OpenAICompatibleAssistantMessage export interface OpenAICompatibleAssistantMessage extends JsonRecord<OpenAICompatibleMessageToolCall> {
extends JsonRecord<OpenAICompatibleMessageToolCall> { role: "assistant"
role: 'assistant'; content?: string | null
content?: string | null; tool_calls?: Array<OpenAICompatibleMessageToolCall>
tool_calls?: Array<OpenAICompatibleMessageToolCall>;
// Copilot-specific reasoning fields // Copilot-specific reasoning fields
reasoning_text?: string; reasoning_text?: string
reasoning_opaque?: string; reasoning_opaque?: string
} }
export interface OpenAICompatibleMessageToolCall extends JsonRecord { export interface OpenAICompatibleMessageToolCall extends JsonRecord {
type: 'function'; type: "function"
id: string; id: string
function: { function: {
arguments: string; arguments: string
name: string; name: string
}; }
} }
export interface OpenAICompatibleToolMessage export interface OpenAICompatibleToolMessage extends JsonRecord {
extends JsonRecord { role: "tool"
role: 'tool'; content: string
content: string; tool_call_id: string
tool_call_id: string;
} }

View File

@@ -7,7 +7,7 @@ import {
type LanguageModelV2FinishReason, type LanguageModelV2FinishReason,
type LanguageModelV2StreamPart, type LanguageModelV2StreamPart,
type SharedV2ProviderMetadata, type SharedV2ProviderMetadata,
} from '@ai-sdk/provider'; } from "@ai-sdk/provider"
import { import {
combineHeaders, combineHeaders,
createEventSourceResponseHandler, createEventSourceResponseHandler,
@@ -20,80 +20,68 @@ import {
type ParseResult, type ParseResult,
postJsonToApi, postJsonToApi,
type ResponseHandler, type ResponseHandler,
} from '@ai-sdk/provider-utils'; } from "@ai-sdk/provider-utils"
import { z } from 'zod/v4'; import { z } from "zod/v4"
import { convertToOpenAICompatibleChatMessages } from './convert-to-openai-compatible-chat-messages'; import { convertToOpenAICompatibleChatMessages } from "./convert-to-openai-compatible-chat-messages"
import { getResponseMetadata } from './get-response-metadata'; import { getResponseMetadata } from "./get-response-metadata"
import { mapOpenAICompatibleFinishReason } from './map-openai-compatible-finish-reason'; import { mapOpenAICompatibleFinishReason } from "./map-openai-compatible-finish-reason"
import { import { type OpenAICompatibleChatModelId, openaiCompatibleProviderOptions } from "./openai-compatible-chat-options"
type OpenAICompatibleChatModelId, import { defaultOpenAICompatibleErrorStructure, type ProviderErrorStructure } from "../openai-compatible-error"
openaiCompatibleProviderOptions, import type { MetadataExtractor } from "./openai-compatible-metadata-extractor"
} from './openai-compatible-chat-options'; import { prepareTools } from "./openai-compatible-prepare-tools"
import {
defaultOpenAICompatibleErrorStructure,
type ProviderErrorStructure,
} from '../openai-compatible-error';
import type { MetadataExtractor } from './openai-compatible-metadata-extractor';
import { prepareTools } from './openai-compatible-prepare-tools';
export type OpenAICompatibleChatConfig = { export type OpenAICompatibleChatConfig = {
provider: string; provider: string
headers: () => Record<string, string | undefined>; headers: () => Record<string, string | undefined>
url: (options: { modelId: string; path: string }) => string; url: (options: { modelId: string; path: string }) => string
fetch?: FetchFunction; fetch?: FetchFunction
includeUsage?: boolean; includeUsage?: boolean
errorStructure?: ProviderErrorStructure<any>; errorStructure?: ProviderErrorStructure<any>
metadataExtractor?: MetadataExtractor; metadataExtractor?: MetadataExtractor
/** /**
* Whether the model supports structured outputs. * Whether the model supports structured outputs.
*/ */
supportsStructuredOutputs?: boolean; supportsStructuredOutputs?: boolean
/** /**
* The supported URLs for the model. * The supported URLs for the model.
*/ */
supportedUrls?: () => LanguageModelV2['supportedUrls']; supportedUrls?: () => LanguageModelV2["supportedUrls"]
}; }
export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 { export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
readonly specificationVersion = 'v2'; readonly specificationVersion = "v2"
readonly supportsStructuredOutputs: boolean; readonly supportsStructuredOutputs: boolean
readonly modelId: OpenAICompatibleChatModelId; readonly modelId: OpenAICompatibleChatModelId
private readonly config: OpenAICompatibleChatConfig; private readonly config: OpenAICompatibleChatConfig
private readonly failedResponseHandler: ResponseHandler<APICallError>; private readonly failedResponseHandler: ResponseHandler<APICallError>
private readonly chunkSchema; // type inferred via constructor private readonly chunkSchema // type inferred via constructor
constructor( constructor(modelId: OpenAICompatibleChatModelId, config: OpenAICompatibleChatConfig) {
modelId: OpenAICompatibleChatModelId, this.modelId = modelId
config: OpenAICompatibleChatConfig, this.config = config
) {
this.modelId = modelId;
this.config = config;
// initialize error handling: // initialize error handling:
const errorStructure = const errorStructure = config.errorStructure ?? defaultOpenAICompatibleErrorStructure
config.errorStructure ?? defaultOpenAICompatibleErrorStructure; this.chunkSchema = createOpenAICompatibleChatChunkSchema(errorStructure.errorSchema)
this.chunkSchema = createOpenAICompatibleChatChunkSchema( this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure)
errorStructure.errorSchema,
);
this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure);
this.supportsStructuredOutputs = config.supportsStructuredOutputs ?? false; this.supportsStructuredOutputs = config.supportsStructuredOutputs ?? false
} }
get provider(): string { get provider(): string {
return this.config.provider; return this.config.provider
} }
private get providerOptionsName(): string { private get providerOptionsName(): string {
return this.config.provider.split('.')[0].trim(); return this.config.provider.split(".")[0].trim()
} }
get supportedUrls() { get supportedUrls() {
return this.config.supportedUrls?.() ?? {}; return this.config.supportedUrls?.() ?? {}
} }
private async getArgs({ private async getArgs({
@@ -110,13 +98,13 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
seed, seed,
toolChoice, toolChoice,
tools, tools,
}: Parameters<LanguageModelV2['doGenerate']>[0]) { }: Parameters<LanguageModelV2["doGenerate"]>[0]) {
const warnings: LanguageModelV2CallWarning[] = []; const warnings: LanguageModelV2CallWarning[] = []
// Parse provider options // Parse provider options
const compatibleOptions = Object.assign( const compatibleOptions = Object.assign(
(await parseProviderOptions({ (await parseProviderOptions({
provider: 'copilot', provider: "copilot",
providerOptions, providerOptions,
schema: openaiCompatibleProviderOptions, schema: openaiCompatibleProviderOptions,
})) ?? {}, })) ?? {},
@@ -125,23 +113,18 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
providerOptions, providerOptions,
schema: openaiCompatibleProviderOptions, schema: openaiCompatibleProviderOptions,
})) ?? {}, })) ?? {},
); )
if (topK != null) { if (topK != null) {
warnings.push({ type: 'unsupported-setting', setting: 'topK' }); warnings.push({ type: "unsupported-setting", setting: "topK" })
} }
if ( if (responseFormat?.type === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
responseFormat?.type === 'json' &&
responseFormat.schema != null &&
!this.supportsStructuredOutputs
) {
warnings.push({ warnings.push({
type: 'unsupported-setting', type: "unsupported-setting",
setting: 'responseFormat', setting: "responseFormat",
details: details: "JSON response format schema is only supported with structuredOutputs",
'JSON response format schema is only supported with structuredOutputs', })
});
} }
const { const {
@@ -151,7 +134,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
} = prepareTools({ } = prepareTools({
tools, tools,
toolChoice, toolChoice,
}); })
return { return {
args: { args: {
@@ -168,28 +151,24 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
frequency_penalty: frequencyPenalty, frequency_penalty: frequencyPenalty,
presence_penalty: presencePenalty, presence_penalty: presencePenalty,
response_format: response_format:
responseFormat?.type === 'json' responseFormat?.type === "json"
? this.supportsStructuredOutputs === true && ? this.supportsStructuredOutputs === true && responseFormat.schema != null
responseFormat.schema != null
? { ? {
type: 'json_schema', type: "json_schema",
json_schema: { json_schema: {
schema: responseFormat.schema, schema: responseFormat.schema,
name: responseFormat.name ?? 'response', name: responseFormat.name ?? "response",
description: responseFormat.description, description: responseFormat.description,
}, },
} }
: { type: 'json_object' } : { type: "json_object" }
: undefined, : undefined,
stop: stopSequences, stop: stopSequences,
seed, seed,
...Object.fromEntries( ...Object.fromEntries(
Object.entries( Object.entries(providerOptions?.[this.providerOptionsName] ?? {}).filter(
providerOptions?.[this.providerOptionsName] ?? {}, ([key]) => !Object.keys(openaiCompatibleProviderOptions.shape).includes(key),
).filter(
([key]) =>
!Object.keys(openaiCompatibleProviderOptions.shape).includes(key),
), ),
), ),
@@ -207,15 +186,15 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
thinking_budget: compatibleOptions.thinking_budget, thinking_budget: compatibleOptions.thinking_budget,
}, },
warnings: [...warnings, ...toolWarnings], warnings: [...warnings, ...toolWarnings],
}; }
} }
async doGenerate( async doGenerate(
options: Parameters<LanguageModelV2['doGenerate']>[0], options: Parameters<LanguageModelV2["doGenerate"]>[0],
): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>> { ): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
const { args, warnings } = await this.getArgs({ ...options }); const { args, warnings } = await this.getArgs({ ...options })
const body = JSON.stringify(args); const body = JSON.stringify(args)
const { const {
responseHeaders, responseHeaders,
@@ -223,50 +202,48 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
rawValue: rawResponse, rawValue: rawResponse,
} = await postJsonToApi({ } = await postJsonToApi({
url: this.config.url({ url: this.config.url({
path: '/chat/completions', path: "/chat/completions",
modelId: this.modelId, modelId: this.modelId,
}), }),
headers: combineHeaders(this.config.headers(), options.headers), headers: combineHeaders(this.config.headers(), options.headers),
body: args, body: args,
failedResponseHandler: this.failedResponseHandler, failedResponseHandler: this.failedResponseHandler,
successfulResponseHandler: createJsonResponseHandler( successfulResponseHandler: createJsonResponseHandler(OpenAICompatibleChatResponseSchema),
OpenAICompatibleChatResponseSchema,
),
abortSignal: options.abortSignal, abortSignal: options.abortSignal,
fetch: this.config.fetch, fetch: this.config.fetch,
}); })
const choice = responseBody.choices[0]; const choice = responseBody.choices[0]
const content: Array<LanguageModelV2Content> = []; const content: Array<LanguageModelV2Content> = []
// text content: // text content:
const text = choice.message.content; const text = choice.message.content
if (text != null && text.length > 0) { if (text != null && text.length > 0) {
content.push({ type: 'text', text }); content.push({ type: "text", text })
} }
// reasoning content (Copilot uses reasoning_text): // reasoning content (Copilot uses reasoning_text):
const reasoning = choice.message.reasoning_text; const reasoning = choice.message.reasoning_text
if (reasoning != null && reasoning.length > 0) { if (reasoning != null && reasoning.length > 0) {
content.push({ content.push({
type: 'reasoning', type: "reasoning",
text: reasoning, text: reasoning,
// Include reasoning_opaque for Copilot multi-turn reasoning // Include reasoning_opaque for Copilot multi-turn reasoning
providerMetadata: choice.message.reasoning_opaque providerMetadata: choice.message.reasoning_opaque
? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } } ? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
: undefined, : undefined,
}); })
} }
// tool calls: // tool calls:
if (choice.message.tool_calls != null) { if (choice.message.tool_calls != null) {
for (const toolCall of choice.message.tool_calls) { for (const toolCall of choice.message.tool_calls) {
content.push({ content.push({
type: 'tool-call', type: "tool-call",
toolCallId: toolCall.id ?? generateId(), toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name, toolName: toolCall.function.name,
input: toolCall.function.arguments!, input: toolCall.function.arguments!,
}); })
} }
} }
@@ -276,16 +253,15 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
...(await this.config.metadataExtractor?.extractMetadata?.({ ...(await this.config.metadataExtractor?.extractMetadata?.({
parsedBody: rawResponse, parsedBody: rawResponse,
})), })),
}; }
const completionTokenDetails = const completionTokenDetails = responseBody.usage?.completion_tokens_details
responseBody.usage?.completion_tokens_details;
if (completionTokenDetails?.accepted_prediction_tokens != null) { if (completionTokenDetails?.accepted_prediction_tokens != null) {
providerMetadata[this.providerOptionsName].acceptedPredictionTokens = providerMetadata[this.providerOptionsName].acceptedPredictionTokens =
completionTokenDetails?.accepted_prediction_tokens; completionTokenDetails?.accepted_prediction_tokens
} }
if (completionTokenDetails?.rejected_prediction_tokens != null) { if (completionTokenDetails?.rejected_prediction_tokens != null) {
providerMetadata[this.providerOptionsName].rejectedPredictionTokens = providerMetadata[this.providerOptionsName].rejectedPredictionTokens =
completionTokenDetails?.rejected_prediction_tokens; completionTokenDetails?.rejected_prediction_tokens
} }
return { return {
@@ -295,11 +271,8 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
inputTokens: responseBody.usage?.prompt_tokens ?? undefined, inputTokens: responseBody.usage?.prompt_tokens ?? undefined,
outputTokens: responseBody.usage?.completion_tokens ?? undefined, outputTokens: responseBody.usage?.completion_tokens ?? undefined,
totalTokens: responseBody.usage?.total_tokens ?? undefined, totalTokens: responseBody.usage?.total_tokens ?? undefined,
reasoningTokens: reasoningTokens: responseBody.usage?.completion_tokens_details?.reasoning_tokens ?? undefined,
responseBody.usage?.completion_tokens_details?.reasoning_tokens ?? cachedInputTokens: responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
undefined,
cachedInputTokens:
responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
}, },
providerMetadata, providerMetadata,
request: { body }, request: { body },
@@ -309,65 +282,60 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
body: rawResponse, body: rawResponse,
}, },
warnings, warnings,
}; }
} }
async doStream( async doStream(
options: Parameters<LanguageModelV2['doStream']>[0], options: Parameters<LanguageModelV2["doStream"]>[0],
): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>> { ): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
const { args, warnings } = await this.getArgs({ ...options }); const { args, warnings } = await this.getArgs({ ...options })
const body = { const body = {
...args, ...args,
stream: true, stream: true,
// only include stream_options when in strict compatibility mode: // only include stream_options when in strict compatibility mode:
stream_options: this.config.includeUsage stream_options: this.config.includeUsage ? { include_usage: true } : undefined,
? { include_usage: true } }
: undefined,
};
const metadataExtractor = const metadataExtractor = this.config.metadataExtractor?.createStreamExtractor()
this.config.metadataExtractor?.createStreamExtractor();
const { responseHeaders, value: response } = await postJsonToApi({ const { responseHeaders, value: response } = await postJsonToApi({
url: this.config.url({ url: this.config.url({
path: '/chat/completions', path: "/chat/completions",
modelId: this.modelId, modelId: this.modelId,
}), }),
headers: combineHeaders(this.config.headers(), options.headers), headers: combineHeaders(this.config.headers(), options.headers),
body, body,
failedResponseHandler: this.failedResponseHandler, failedResponseHandler: this.failedResponseHandler,
successfulResponseHandler: createEventSourceResponseHandler( successfulResponseHandler: createEventSourceResponseHandler(this.chunkSchema),
this.chunkSchema,
),
abortSignal: options.abortSignal, abortSignal: options.abortSignal,
fetch: this.config.fetch, fetch: this.config.fetch,
}); })
const toolCalls: Array<{ const toolCalls: Array<{
id: string; id: string
type: 'function'; type: "function"
function: { function: {
name: string; name: string
arguments: string; arguments: string
}; }
hasFinished: boolean; hasFinished: boolean
}> = []; }> = []
let finishReason: LanguageModelV2FinishReason = 'unknown'; let finishReason: LanguageModelV2FinishReason = "unknown"
const usage: { const usage: {
completionTokens: number | undefined; completionTokens: number | undefined
completionTokensDetails: { completionTokensDetails: {
reasoningTokens: number | undefined; reasoningTokens: number | undefined
acceptedPredictionTokens: number | undefined; acceptedPredictionTokens: number | undefined
rejectedPredictionTokens: number | undefined; rejectedPredictionTokens: number | undefined
}; }
promptTokens: number | undefined; promptTokens: number | undefined
promptTokensDetails: { promptTokensDetails: {
cachedTokens: number | undefined; cachedTokens: number | undefined
}; }
totalTokens: number | undefined; totalTokens: number | undefined
} = { } = {
completionTokens: undefined, completionTokens: undefined,
completionTokensDetails: { completionTokensDetails: {
@@ -380,54 +348,51 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
cachedTokens: undefined, cachedTokens: undefined,
}, },
totalTokens: undefined, totalTokens: undefined,
}; }
let isFirstChunk = true; let isFirstChunk = true
const providerOptionsName = this.providerOptionsName; const providerOptionsName = this.providerOptionsName
let isActiveReasoning = false; let isActiveReasoning = false
let isActiveText = false; let isActiveText = false
let reasoningOpaque: string | undefined; let reasoningOpaque: string | undefined
return { return {
stream: response.pipeThrough( stream: response.pipeThrough(
new TransformStream< new TransformStream<ParseResult<z.infer<typeof this.chunkSchema>>, LanguageModelV2StreamPart>({
ParseResult<z.infer<typeof this.chunkSchema>>,
LanguageModelV2StreamPart
>({
start(controller) { start(controller) {
controller.enqueue({ type: 'stream-start', warnings }); controller.enqueue({ type: "stream-start", warnings })
}, },
// TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX // TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
transform(chunk, controller) { transform(chunk, controller) {
// Emit raw chunk if requested (before anything else) // Emit raw chunk if requested (before anything else)
if (options.includeRawChunks) { if (options.includeRawChunks) {
controller.enqueue({ type: 'raw', rawValue: chunk.rawValue }); controller.enqueue({ type: "raw", rawValue: chunk.rawValue })
} }
// handle failed chunk parsing / validation: // handle failed chunk parsing / validation:
if (!chunk.success) { if (!chunk.success) {
finishReason = 'error'; finishReason = "error"
controller.enqueue({ type: 'error', error: chunk.error }); controller.enqueue({ type: "error", error: chunk.error })
return; return
} }
const value = chunk.value; const value = chunk.value
metadataExtractor?.processChunk(chunk.rawValue); metadataExtractor?.processChunk(chunk.rawValue)
// handle error chunks: // handle error chunks:
if ('error' in value) { if ("error" in value) {
finishReason = 'error'; finishReason = "error"
controller.enqueue({ type: 'error', error: value.error.message }); controller.enqueue({ type: "error", error: value.error.message })
return; return
} }
if (isFirstChunk) { if (isFirstChunk) {
isFirstChunk = false; isFirstChunk = false
controller.enqueue({ controller.enqueue({
type: 'response-metadata', type: "response-metadata",
...getResponseMetadata(value), ...getResponseMetadata(value),
}); })
} }
if (value.usage != null) { if (value.usage != null) {
@@ -437,46 +402,38 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
total_tokens, total_tokens,
prompt_tokens_details, prompt_tokens_details,
completion_tokens_details, completion_tokens_details,
} = value.usage; } = value.usage
usage.promptTokens = prompt_tokens ?? undefined; usage.promptTokens = prompt_tokens ?? undefined
usage.completionTokens = completion_tokens ?? undefined; usage.completionTokens = completion_tokens ?? undefined
usage.totalTokens = total_tokens ?? undefined; usage.totalTokens = total_tokens ?? undefined
if (completion_tokens_details?.reasoning_tokens != null) { if (completion_tokens_details?.reasoning_tokens != null) {
usage.completionTokensDetails.reasoningTokens = usage.completionTokensDetails.reasoningTokens = completion_tokens_details?.reasoning_tokens
completion_tokens_details?.reasoning_tokens;
} }
if ( if (completion_tokens_details?.accepted_prediction_tokens != null) {
completion_tokens_details?.accepted_prediction_tokens != null
) {
usage.completionTokensDetails.acceptedPredictionTokens = usage.completionTokensDetails.acceptedPredictionTokens =
completion_tokens_details?.accepted_prediction_tokens; completion_tokens_details?.accepted_prediction_tokens
} }
if ( if (completion_tokens_details?.rejected_prediction_tokens != null) {
completion_tokens_details?.rejected_prediction_tokens != null
) {
usage.completionTokensDetails.rejectedPredictionTokens = usage.completionTokensDetails.rejectedPredictionTokens =
completion_tokens_details?.rejected_prediction_tokens; completion_tokens_details?.rejected_prediction_tokens
} }
if (prompt_tokens_details?.cached_tokens != null) { if (prompt_tokens_details?.cached_tokens != null) {
usage.promptTokensDetails.cachedTokens = usage.promptTokensDetails.cachedTokens = prompt_tokens_details?.cached_tokens
prompt_tokens_details?.cached_tokens;
} }
} }
const choice = value.choices[0]; const choice = value.choices[0]
if (choice?.finish_reason != null) { if (choice?.finish_reason != null) {
finishReason = mapOpenAICompatibleFinishReason( finishReason = mapOpenAICompatibleFinishReason(choice.finish_reason)
choice.finish_reason,
);
} }
if (choice?.delta == null) { if (choice?.delta == null) {
return; return
} }
const delta = choice.delta; const delta = choice.delta
// Capture reasoning_opaque for Copilot multi-turn reasoning // Capture reasoning_opaque for Copilot multi-turn reasoning
if (delta.reasoning_opaque) { if (delta.reasoning_opaque) {
@@ -484,28 +441,28 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
throw new InvalidResponseDataError({ throw new InvalidResponseDataError({
data: delta, data: delta,
message: message:
'Multiple reasoning_opaque values received in a single response. Only one thinking part per response is supported.', "Multiple reasoning_opaque values received in a single response. Only one thinking part per response is supported.",
}); })
} }
reasoningOpaque = delta.reasoning_opaque; reasoningOpaque = delta.reasoning_opaque
} }
// enqueue reasoning before text deltas (Copilot uses reasoning_text): // enqueue reasoning before text deltas (Copilot uses reasoning_text):
const reasoningContent = delta.reasoning_text; const reasoningContent = delta.reasoning_text
if (reasoningContent) { if (reasoningContent) {
if (!isActiveReasoning) { if (!isActiveReasoning) {
controller.enqueue({ controller.enqueue({
type: 'reasoning-start', type: "reasoning-start",
id: 'reasoning-0', id: "reasoning-0",
}); })
isActiveReasoning = true; isActiveReasoning = true
} }
controller.enqueue({ controller.enqueue({
type: 'reasoning-delta', type: "reasoning-delta",
id: 'reasoning-0', id: "reasoning-0",
delta: reasoningContent, delta: reasoningContent,
}); })
} }
if (delta.content) { if (delta.content) {
@@ -513,25 +470,23 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
// This handles the case where reasoning_opaque and content come in the same chunk // This handles the case where reasoning_opaque and content come in the same chunk
if (isActiveReasoning && !isActiveText) { if (isActiveReasoning && !isActiveText) {
controller.enqueue({ controller.enqueue({
type: 'reasoning-end', type: "reasoning-end",
id: 'reasoning-0', id: "reasoning-0",
providerMetadata: reasoningOpaque providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
? { copilot: { reasoningOpaque } } })
: undefined, isActiveReasoning = false
});
isActiveReasoning = false;
} }
if (!isActiveText) { if (!isActiveText) {
controller.enqueue({ type: 'text-start', id: 'txt-0' }); controller.enqueue({ type: "text-start", id: "txt-0" })
isActiveText = true; isActiveText = true
} }
controller.enqueue({ controller.enqueue({
type: 'text-delta', type: "text-delta",
id: 'txt-0', id: "txt-0",
delta: delta.content, delta: delta.content,
}); })
} }
if (delta.tool_calls != null) { if (delta.tool_calls != null) {
@@ -539,102 +494,96 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
// This handles the case where reasoning goes directly to tool calls with no content // This handles the case where reasoning goes directly to tool calls with no content
if (isActiveReasoning) { if (isActiveReasoning) {
controller.enqueue({ controller.enqueue({
type: 'reasoning-end', type: "reasoning-end",
id: 'reasoning-0', id: "reasoning-0",
providerMetadata: reasoningOpaque providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
? { copilot: { reasoningOpaque } } })
: undefined, isActiveReasoning = false
});
isActiveReasoning = false;
} }
for (const toolCallDelta of delta.tool_calls) { for (const toolCallDelta of delta.tool_calls) {
const index = toolCallDelta.index; const index = toolCallDelta.index
if (toolCalls[index] == null) { if (toolCalls[index] == null) {
if (toolCallDelta.id == null) { if (toolCallDelta.id == null) {
throw new InvalidResponseDataError({ throw new InvalidResponseDataError({
data: toolCallDelta, data: toolCallDelta,
message: `Expected 'id' to be a string.`, message: `Expected 'id' to be a string.`,
}); })
} }
if (toolCallDelta.function?.name == null) { if (toolCallDelta.function?.name == null) {
throw new InvalidResponseDataError({ throw new InvalidResponseDataError({
data: toolCallDelta, data: toolCallDelta,
message: `Expected 'function.name' to be a string.`, message: `Expected 'function.name' to be a string.`,
}); })
} }
controller.enqueue({ controller.enqueue({
type: 'tool-input-start', type: "tool-input-start",
id: toolCallDelta.id, id: toolCallDelta.id,
toolName: toolCallDelta.function.name, toolName: toolCallDelta.function.name,
}); })
toolCalls[index] = { toolCalls[index] = {
id: toolCallDelta.id, id: toolCallDelta.id,
type: 'function', type: "function",
function: { function: {
name: toolCallDelta.function.name, name: toolCallDelta.function.name,
arguments: toolCallDelta.function.arguments ?? '', arguments: toolCallDelta.function.arguments ?? "",
}, },
hasFinished: false, hasFinished: false,
}; }
const toolCall = toolCalls[index]; const toolCall = toolCalls[index]
if ( if (toolCall.function?.name != null && toolCall.function?.arguments != null) {
toolCall.function?.name != null &&
toolCall.function?.arguments != null
) {
// send delta if the argument text has already started: // send delta if the argument text has already started:
if (toolCall.function.arguments.length > 0) { if (toolCall.function.arguments.length > 0) {
controller.enqueue({ controller.enqueue({
type: 'tool-input-delta', type: "tool-input-delta",
id: toolCall.id, id: toolCall.id,
delta: toolCall.function.arguments, delta: toolCall.function.arguments,
}); })
} }
// check if tool call is complete // check if tool call is complete
// (some providers send the full tool call in one chunk): // (some providers send the full tool call in one chunk):
if (isParsableJson(toolCall.function.arguments)) { if (isParsableJson(toolCall.function.arguments)) {
controller.enqueue({ controller.enqueue({
type: 'tool-input-end', type: "tool-input-end",
id: toolCall.id, id: toolCall.id,
}); })
controller.enqueue({ controller.enqueue({
type: 'tool-call', type: "tool-call",
toolCallId: toolCall.id ?? generateId(), toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name, toolName: toolCall.function.name,
input: toolCall.function.arguments, input: toolCall.function.arguments,
}); })
toolCall.hasFinished = true; toolCall.hasFinished = true
} }
} }
continue; continue
} }
// existing tool call, merge if not finished // existing tool call, merge if not finished
const toolCall = toolCalls[index]; const toolCall = toolCalls[index]
if (toolCall.hasFinished) { if (toolCall.hasFinished) {
continue; continue
} }
if (toolCallDelta.function?.arguments != null) { if (toolCallDelta.function?.arguments != null) {
toolCall.function!.arguments += toolCall.function!.arguments += toolCallDelta.function?.arguments ?? ""
toolCallDelta.function?.arguments ?? '';
} }
// send delta // send delta
controller.enqueue({ controller.enqueue({
type: 'tool-input-delta', type: "tool-input-delta",
id: toolCall.id, id: toolCall.id,
delta: toolCallDelta.function.arguments ?? '', delta: toolCallDelta.function.arguments ?? "",
}); })
// check if tool call is complete // check if tool call is complete
if ( if (
@@ -643,17 +592,17 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
isParsableJson(toolCall.function.arguments) isParsableJson(toolCall.function.arguments)
) { ) {
controller.enqueue({ controller.enqueue({
type: 'tool-input-end', type: "tool-input-end",
id: toolCall.id, id: toolCall.id,
}); })
controller.enqueue({ controller.enqueue({
type: 'tool-call', type: "tool-call",
toolCallId: toolCall.id ?? generateId(), toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name, toolName: toolCall.function.name,
input: toolCall.function.arguments, input: toolCall.function.arguments,
}); })
toolCall.hasFinished = true; toolCall.hasFinished = true
} }
} }
} }
@@ -662,77 +611,65 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
flush(controller) { flush(controller) {
if (isActiveReasoning) { if (isActiveReasoning) {
controller.enqueue({ controller.enqueue({
type: 'reasoning-end', type: "reasoning-end",
id: 'reasoning-0', id: "reasoning-0",
// Include reasoning_opaque for Copilot multi-turn reasoning // Include reasoning_opaque for Copilot multi-turn reasoning
providerMetadata: reasoningOpaque providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
? { copilot: { reasoningOpaque } } })
: undefined,
});
} }
if (isActiveText) { if (isActiveText) {
controller.enqueue({ type: 'text-end', id: 'txt-0' }); controller.enqueue({ type: "text-end", id: "txt-0" })
} }
// go through all tool calls and send the ones that are not finished // go through all tool calls and send the ones that are not finished
for (const toolCall of toolCalls.filter( for (const toolCall of toolCalls.filter((toolCall) => !toolCall.hasFinished)) {
toolCall => !toolCall.hasFinished,
)) {
controller.enqueue({ controller.enqueue({
type: 'tool-input-end', type: "tool-input-end",
id: toolCall.id, id: toolCall.id,
}); })
controller.enqueue({ controller.enqueue({
type: 'tool-call', type: "tool-call",
toolCallId: toolCall.id ?? generateId(), toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name, toolName: toolCall.function.name,
input: toolCall.function.arguments, input: toolCall.function.arguments,
}); })
} }
const providerMetadata: SharedV2ProviderMetadata = { const providerMetadata: SharedV2ProviderMetadata = {
[providerOptionsName]: {}, [providerOptionsName]: {},
// Include reasoning_opaque for Copilot multi-turn reasoning // Include reasoning_opaque for Copilot multi-turn reasoning
...(reasoningOpaque ...(reasoningOpaque ? { copilot: { reasoningOpaque } } : {}),
? { copilot: { reasoningOpaque } }
: {}),
...metadataExtractor?.buildMetadata(), ...metadataExtractor?.buildMetadata(),
};
if (
usage.completionTokensDetails.acceptedPredictionTokens != null
) {
providerMetadata[providerOptionsName].acceptedPredictionTokens =
usage.completionTokensDetails.acceptedPredictionTokens;
} }
if ( if (usage.completionTokensDetails.acceptedPredictionTokens != null) {
usage.completionTokensDetails.rejectedPredictionTokens != null providerMetadata[providerOptionsName].acceptedPredictionTokens =
) { usage.completionTokensDetails.acceptedPredictionTokens
}
if (usage.completionTokensDetails.rejectedPredictionTokens != null) {
providerMetadata[providerOptionsName].rejectedPredictionTokens = providerMetadata[providerOptionsName].rejectedPredictionTokens =
usage.completionTokensDetails.rejectedPredictionTokens; usage.completionTokensDetails.rejectedPredictionTokens
} }
controller.enqueue({ controller.enqueue({
type: 'finish', type: "finish",
finishReason, finishReason,
usage: { usage: {
inputTokens: usage.promptTokens ?? undefined, inputTokens: usage.promptTokens ?? undefined,
outputTokens: usage.completionTokens ?? undefined, outputTokens: usage.completionTokens ?? undefined,
totalTokens: usage.totalTokens ?? undefined, totalTokens: usage.totalTokens ?? undefined,
reasoningTokens: reasoningTokens: usage.completionTokensDetails.reasoningTokens ?? undefined,
usage.completionTokensDetails.reasoningTokens ?? undefined, cachedInputTokens: usage.promptTokensDetails.cachedTokens ?? undefined,
cachedInputTokens:
usage.promptTokensDetails.cachedTokens ?? undefined,
}, },
providerMetadata, providerMetadata,
}); })
}, },
}), }),
), ),
request: { body }, request: { body },
response: { headers: responseHeaders }, response: { headers: responseHeaders },
}; }
} }
} }
@@ -754,7 +691,7 @@ const openaiCompatibleTokenUsageSchema = z
}) })
.nullish(), .nullish(),
}) })
.nullish(); .nullish()
// limited version of the schema, focussed on what is needed for the implementation // limited version of the schema, focussed on what is needed for the implementation
// this approach limits breakages when the API changes and increases efficiency // this approach limits breakages when the API changes and increases efficiency
@@ -765,7 +702,7 @@ const OpenAICompatibleChatResponseSchema = z.object({
choices: z.array( choices: z.array(
z.object({ z.object({
message: z.object({ message: z.object({
role: z.literal('assistant').nullish(), role: z.literal("assistant").nullish(),
content: z.string().nullish(), content: z.string().nullish(),
// Copilot-specific reasoning fields // Copilot-specific reasoning fields
reasoning_text: z.string().nullish(), reasoning_text: z.string().nullish(),
@@ -786,15 +723,11 @@ const OpenAICompatibleChatResponseSchema = z.object({
}), }),
), ),
usage: openaiCompatibleTokenUsageSchema, usage: openaiCompatibleTokenUsageSchema,
}); })
// limited version of the schema, focussed on what is needed for the implementation // limited version of the schema, focussed on what is needed for the implementation
// this approach limits breakages when the API changes and increases efficiency // this approach limits breakages when the API changes and increases efficiency
const createOpenAICompatibleChatChunkSchema = < const createOpenAICompatibleChatChunkSchema = <ERROR_SCHEMA extends z.core.$ZodType>(errorSchema: ERROR_SCHEMA) =>
ERROR_SCHEMA extends z.core.$ZodType,
>(
errorSchema: ERROR_SCHEMA,
) =>
z.union([ z.union([
z.object({ z.object({
id: z.string().nullish(), id: z.string().nullish(),
@@ -804,7 +737,7 @@ const createOpenAICompatibleChatChunkSchema = <
z.object({ z.object({
delta: z delta: z
.object({ .object({
role: z.enum(['assistant']).nullish(), role: z.enum(["assistant"]).nullish(),
content: z.string().nullish(), content: z.string().nullish(),
// Copilot-specific reasoning fields // Copilot-specific reasoning fields
reasoning_text: z.string().nullish(), reasoning_text: z.string().nullish(),
@@ -829,4 +762,4 @@ const createOpenAICompatibleChatChunkSchema = <
usage: openaiCompatibleTokenUsageSchema, usage: openaiCompatibleTokenUsageSchema,
}), }),
errorSchema, errorSchema,
]); ])

View File

@@ -1,6 +1,6 @@
import { z } from 'zod/v4'; import { z } from "zod/v4"
export type OpenAICompatibleChatModelId = string; export type OpenAICompatibleChatModelId = string
export const openaiCompatibleProviderOptions = z.object({ export const openaiCompatibleProviderOptions = z.object({
/** /**
@@ -23,8 +23,6 @@ export const openaiCompatibleProviderOptions = z.object({
* Copilot thinking_budget used for Anthropic models. * Copilot thinking_budget used for Anthropic models.
*/ */
thinking_budget: z.number().optional(), thinking_budget: z.number().optional(),
}); })
export type OpenAICompatibleProviderOptions = z.infer< export type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>
typeof openaiCompatibleProviderOptions
>;

View File

@@ -1,4 +1,4 @@
import type { SharedV2ProviderMetadata } from '@ai-sdk/provider'; import type { SharedV2ProviderMetadata } from "@ai-sdk/provider"
/** /**
Extracts provider-specific metadata from API responses. Extracts provider-specific metadata from API responses.
@@ -14,11 +14,7 @@ export type MetadataExtractor = {
* @returns Provider-specific metadata or undefined if no metadata is available. * @returns Provider-specific metadata or undefined if no metadata is available.
* The metadata should be under a key indicating the provider id. * The metadata should be under a key indicating the provider id.
*/ */
extractMetadata: ({ extractMetadata: ({ parsedBody }: { parsedBody: unknown }) => Promise<SharedV2ProviderMetadata | undefined>
parsedBody,
}: {
parsedBody: unknown;
}) => Promise<SharedV2ProviderMetadata | undefined>;
/** /**
* Creates an extractor for handling streaming responses. The returned object provides * Creates an extractor for handling streaming responses. The returned object provides
@@ -34,7 +30,7 @@ export type MetadataExtractor = {
* *
* @param parsedChunk - The parsed JSON response chunk from the provider's API * @param parsedChunk - The parsed JSON response chunk from the provider's API
*/ */
processChunk(parsedChunk: unknown): void; processChunk(parsedChunk: unknown): void
/** /**
* Builds the metadata object after all chunks have been processed. * Builds the metadata object after all chunks have been processed.
@@ -43,6 +39,6 @@ export type MetadataExtractor = {
* @returns Provider-specific metadata or undefined if no metadata is available. * @returns Provider-specific metadata or undefined if no metadata is available.
* The metadata should be under a key indicating the provider id. * The metadata should be under a key indicating the provider id.
*/ */
buildMetadata(): SharedV2ProviderMetadata | undefined; buildMetadata(): SharedV2ProviderMetadata | undefined
}; }
}; }

View File

@@ -2,91 +2,86 @@ import {
type LanguageModelV2CallOptions, type LanguageModelV2CallOptions,
type LanguageModelV2CallWarning, type LanguageModelV2CallWarning,
UnsupportedFunctionalityError, UnsupportedFunctionalityError,
} from '@ai-sdk/provider'; } from "@ai-sdk/provider"
export function prepareTools({ export function prepareTools({
tools, tools,
toolChoice, toolChoice,
}: { }: {
tools: LanguageModelV2CallOptions['tools']; tools: LanguageModelV2CallOptions["tools"]
toolChoice?: LanguageModelV2CallOptions['toolChoice']; toolChoice?: LanguageModelV2CallOptions["toolChoice"]
}): { }): {
tools: tools:
| undefined | undefined
| Array<{ | Array<{
type: 'function'; type: "function"
function: { function: {
name: string; name: string
description: string | undefined; description: string | undefined
parameters: unknown; parameters: unknown
}; }
}>; }>
toolChoice: toolChoice: { type: "function"; function: { name: string } } | "auto" | "none" | "required" | undefined
| { type: 'function'; function: { name: string } } toolWarnings: LanguageModelV2CallWarning[]
| 'auto'
| 'none'
| 'required'
| undefined;
toolWarnings: LanguageModelV2CallWarning[];
} { } {
// when the tools array is empty, change it to undefined to prevent errors: // when the tools array is empty, change it to undefined to prevent errors:
tools = tools?.length ? tools : undefined; tools = tools?.length ? tools : undefined
const toolWarnings: LanguageModelV2CallWarning[] = []; const toolWarnings: LanguageModelV2CallWarning[] = []
if (tools == null) { if (tools == null) {
return { tools: undefined, toolChoice: undefined, toolWarnings }; return { tools: undefined, toolChoice: undefined, toolWarnings }
} }
const openaiCompatTools: Array<{ const openaiCompatTools: Array<{
type: 'function'; type: "function"
function: { function: {
name: string; name: string
description: string | undefined; description: string | undefined
parameters: unknown; parameters: unknown
}; }
}> = []; }> = []
for (const tool of tools) { for (const tool of tools) {
if (tool.type === 'provider-defined') { if (tool.type === "provider-defined") {
toolWarnings.push({ type: 'unsupported-tool', tool }); toolWarnings.push({ type: "unsupported-tool", tool })
} else { } else {
openaiCompatTools.push({ openaiCompatTools.push({
type: 'function', type: "function",
function: { function: {
name: tool.name, name: tool.name,
description: tool.description, description: tool.description,
parameters: tool.inputSchema, parameters: tool.inputSchema,
}, },
}); })
} }
} }
if (toolChoice == null) { if (toolChoice == null) {
return { tools: openaiCompatTools, toolChoice: undefined, toolWarnings }; return { tools: openaiCompatTools, toolChoice: undefined, toolWarnings }
} }
const type = toolChoice.type; const type = toolChoice.type
switch (type) { switch (type) {
case 'auto': case "auto":
case 'none': case "none":
case 'required': case "required":
return { tools: openaiCompatTools, toolChoice: type, toolWarnings }; return { tools: openaiCompatTools, toolChoice: type, toolWarnings }
case 'tool': case "tool":
return { return {
tools: openaiCompatTools, tools: openaiCompatTools,
toolChoice: { toolChoice: {
type: 'function', type: "function",
function: { name: toolChoice.toolName }, function: { name: toolChoice.toolName },
}, },
toolWarnings, toolWarnings,
}; }
default: { default: {
const _exhaustiveCheck: never = type; const _exhaustiveCheck: never = type
throw new UnsupportedFunctionalityError({ throw new UnsupportedFunctionalityError({
functionality: `tool choice type: ${_exhaustiveCheck}`, functionality: `tool choice type: ${_exhaustiveCheck}`,
}); })
} }
} }
} }

View File

@@ -1,4 +1,4 @@
import { z, type ZodType } from 'zod/v4'; import { z, type ZodType } from "zod/v4"
export const openaiCompatibleErrorDataSchema = z.object({ export const openaiCompatibleErrorDataSchema = z.object({
error: z.object({ error: z.object({
@@ -11,20 +11,17 @@ export const openaiCompatibleErrorDataSchema = z.object({
param: z.any().nullish(), param: z.any().nullish(),
code: z.union([z.string(), z.number()]).nullish(), code: z.union([z.string(), z.number()]).nullish(),
}), }),
}); })
export type OpenAICompatibleErrorData = z.infer< export type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>
typeof openaiCompatibleErrorDataSchema
>;
export type ProviderErrorStructure<T> = { export type ProviderErrorStructure<T> = {
errorSchema: ZodType<T>; errorSchema: ZodType<T>
errorToMessage: (error: T) => string; errorToMessage: (error: T) => string
isRetryable?: (response: Response, error?: T) => boolean; isRetryable?: (response: Response, error?: T) => boolean
}; }
export const defaultOpenAICompatibleErrorStructure: ProviderErrorStructure<OpenAICompatibleErrorData> = export const defaultOpenAICompatibleErrorStructure: ProviderErrorStructure<OpenAICompatibleErrorData> = {
{
errorSchema: openaiCompatibleErrorDataSchema, errorSchema: openaiCompatibleErrorDataSchema,
errorToMessage: data => data.error.message, errorToMessage: (data) => data.error.message,
}; }

View File

@@ -464,7 +464,7 @@ describe("full conversation", () => {
expect(result).toHaveLength(4) expect(result).toHaveLength(4)
const systemMsg = result[0]; const systemMsg = result[0]
expect(systemMsg.role).toBe("system") expect(systemMsg.role).toBe("system")
// Assistant message should have reasoning fields // Assistant message should have reasoning fields

View File

@@ -355,7 +355,9 @@ describe("doStream", () => {
// Check text content // Check text content
const textDeltas = parts.filter((p) => p.type === "text-delta") const textDeltas = parts.filter((p) => p.type === "text-delta")
expect(textDeltas).toHaveLength(1) expect(textDeltas).toHaveLength(1)
expect((textDeltas[0] as { delta: string }).delta).toContain("Okay, I need to check out the project's file structure.") expect((textDeltas[0] as { delta: string }).delta).toContain(
"Okay, I need to check out the project's file structure.",
)
// Check tool call // Check tool call
const toolParts = parts.filter( const toolParts = parts.filter(

View File

@@ -1108,7 +1108,7 @@ describe("ProviderTransform.message - providerOptions key remapping", () => {
role: "user", role: "user",
content: "Hello", content: "Hello",
providerOptions: { providerOptions: {
"copilot": { someOption: "value" }, copilot: { someOption: "value" },
}, },
}, },
] as any[] ] as any[]