chore: generate

This commit is contained in:
opencode-agent[bot]
2026-01-31 02:35:22 +00:00
parent d9f18e4006
commit 644f0d4e92
12 changed files with 407 additions and 506 deletions

View File

@@ -2,64 +2,57 @@ import {
type LanguageModelV2Prompt,
type SharedV2ProviderMetadata,
UnsupportedFunctionalityError,
} from '@ai-sdk/provider';
import type { OpenAICompatibleChatPrompt } from './openai-compatible-api-types';
import { convertToBase64 } from '@ai-sdk/provider-utils';
} from "@ai-sdk/provider"
import type { OpenAICompatibleChatPrompt } from "./openai-compatible-api-types"
import { convertToBase64 } from "@ai-sdk/provider-utils"
function getOpenAIMetadata(message: {
providerOptions?: SharedV2ProviderMetadata;
}) {
return message?.providerOptions?.copilot ?? {};
function getOpenAIMetadata(message: { providerOptions?: SharedV2ProviderMetadata }) {
return message?.providerOptions?.copilot ?? {}
}
export function convertToOpenAICompatibleChatMessages(
prompt: LanguageModelV2Prompt,
): OpenAICompatibleChatPrompt {
const messages: OpenAICompatibleChatPrompt = [];
export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Prompt): OpenAICompatibleChatPrompt {
const messages: OpenAICompatibleChatPrompt = []
for (const { role, content, ...message } of prompt) {
const metadata = getOpenAIMetadata({ ...message });
const metadata = getOpenAIMetadata({ ...message })
switch (role) {
case 'system': {
case "system": {
messages.push({
role: 'system',
role: "system",
content: [
{
type: 'text',
type: "text",
text: content,
},
],
...metadata,
});
break;
})
break
}
case 'user': {
if (content.length === 1 && content[0].type === 'text') {
case "user": {
if (content.length === 1 && content[0].type === "text") {
messages.push({
role: 'user',
role: "user",
content: content[0].text,
...getOpenAIMetadata(content[0]),
});
break;
})
break
}
messages.push({
role: 'user',
content: content.map(part => {
const partMetadata = getOpenAIMetadata(part);
role: "user",
content: content.map((part) => {
const partMetadata = getOpenAIMetadata(part)
switch (part.type) {
case 'text': {
return { type: 'text', text: part.text, ...partMetadata };
case "text": {
return { type: "text", text: part.text, ...partMetadata }
}
case 'file': {
if (part.mediaType.startsWith('image/')) {
const mediaType =
part.mediaType === 'image/*'
? 'image/jpeg'
: part.mediaType;
case "file": {
if (part.mediaType.startsWith("image/")) {
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType
return {
type: 'image_url',
type: "image_url",
image_url: {
url:
part.data instanceof URL
@@ -67,111 +60,110 @@ export function convertToOpenAICompatibleChatMessages(
: `data:${mediaType};base64,${convertToBase64(part.data)}`,
},
...partMetadata,
};
}
} else {
throw new UnsupportedFunctionalityError({
functionality: `file part media type ${part.mediaType}`,
});
})
}
}
}
}),
...metadata,
});
})
break;
break
}
case 'assistant': {
let text = '';
let reasoningText: string | undefined;
let reasoningOpaque: string | undefined;
case "assistant": {
let text = ""
let reasoningText: string | undefined
let reasoningOpaque: string | undefined
const toolCalls: Array<{
id: string;
type: 'function';
function: { name: string; arguments: string };
}> = [];
id: string
type: "function"
function: { name: string; arguments: string }
}> = []
for (const part of content) {
const partMetadata = getOpenAIMetadata(part);
const partMetadata = getOpenAIMetadata(part)
// Check for reasoningOpaque on any part (may be attached to text/tool-call)
const partOpaque = (
part.providerOptions as { copilot?: { reasoningOpaque?: string } }
)?.copilot?.reasoningOpaque;
const partOpaque = (part.providerOptions as { copilot?: { reasoningOpaque?: string } })?.copilot
?.reasoningOpaque
if (partOpaque && !reasoningOpaque) {
reasoningOpaque = partOpaque;
reasoningOpaque = partOpaque
}
switch (part.type) {
case 'text': {
text += part.text;
break;
case "text": {
text += part.text
break
}
case 'reasoning': {
reasoningText = part.text;
break;
case "reasoning": {
reasoningText = part.text
break
}
case 'tool-call': {
case "tool-call": {
toolCalls.push({
id: part.toolCallId,
type: 'function',
type: "function",
function: {
name: part.toolName,
arguments: JSON.stringify(part.input),
},
...partMetadata,
});
break;
})
break
}
}
}
messages.push({
role: 'assistant',
role: "assistant",
content: text || null,
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
reasoning_text: reasoningText,
reasoning_opaque: reasoningOpaque,
...metadata,
});
})
break;
break
}
case 'tool': {
case "tool": {
for (const toolResponse of content) {
const output = toolResponse.output;
const output = toolResponse.output
let contentValue: string;
let contentValue: string
switch (output.type) {
case 'text':
case 'error-text':
contentValue = output.value;
break;
case 'content':
case 'json':
case 'error-json':
contentValue = JSON.stringify(output.value);
break;
case "text":
case "error-text":
contentValue = output.value
break
case "content":
case "json":
case "error-json":
contentValue = JSON.stringify(output.value)
break
}
const toolResponseMetadata = getOpenAIMetadata(toolResponse);
const toolResponseMetadata = getOpenAIMetadata(toolResponse)
messages.push({
role: 'tool',
role: "tool",
tool_call_id: toolResponse.toolCallId,
content: contentValue,
...toolResponseMetadata,
});
})
}
break;
break
}
default: {
const _exhaustiveCheck: never = role;
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
const _exhaustiveCheck: never = role
throw new Error(`Unsupported role: ${_exhaustiveCheck}`)
}
}
}
return messages;
return messages
}

View File

@@ -3,13 +3,13 @@ export function getResponseMetadata({
model,
created,
}: {
id?: string | undefined | null;
created?: number | undefined | null;
model?: string | undefined | null;
id?: string | undefined | null
created?: number | undefined | null
model?: string | undefined | null
}) {
return {
id: id ?? undefined,
modelId: model ?? undefined,
timestamp: created != null ? new Date(created * 1000) : undefined,
};
}
}

View File

@@ -1,19 +1,17 @@
import type { LanguageModelV2FinishReason } from '@ai-sdk/provider';
import type { LanguageModelV2FinishReason } from "@ai-sdk/provider"
export function mapOpenAICompatibleFinishReason(
finishReason: string | null | undefined,
): LanguageModelV2FinishReason {
export function mapOpenAICompatibleFinishReason(finishReason: string | null | undefined): LanguageModelV2FinishReason {
switch (finishReason) {
case 'stop':
return 'stop';
case 'length':
return 'length';
case 'content_filter':
return 'content-filter';
case 'function_call':
case 'tool_calls':
return 'tool-calls';
case "stop":
return "stop"
case "length":
return "length"
case "content_filter":
return "content-filter"
case "function_call":
case "tool_calls":
return "tool-calls"
default:
return 'unknown';
return "unknown"
}
}

View File

@@ -1,74 +1,64 @@
import type { JSONValue } from '@ai-sdk/provider';
import type { JSONValue } from "@ai-sdk/provider"
export type OpenAICompatibleChatPrompt = Array<OpenAICompatibleMessage>;
export type OpenAICompatibleChatPrompt = Array<OpenAICompatibleMessage>
export type OpenAICompatibleMessage =
| OpenAICompatibleSystemMessage
| OpenAICompatibleUserMessage
| OpenAICompatibleAssistantMessage
| OpenAICompatibleToolMessage;
| OpenAICompatibleToolMessage
// Allow for arbitrary additional properties for general purpose
// provider-metadata-specific extensibility.
type JsonRecord<T = never> = Record<
string,
JSONValue | JSONValue[] | T | T[] | undefined
>;
type JsonRecord<T = never> = Record<string, JSONValue | JSONValue[] | T | T[] | undefined>
export interface OpenAICompatibleSystemMessage
extends JsonRecord<OpenAICompatibleSystemContentPart> {
role: 'system';
content: string | Array<OpenAICompatibleSystemContentPart>;
export interface OpenAICompatibleSystemMessage extends JsonRecord<OpenAICompatibleSystemContentPart> {
role: "system"
content: string | Array<OpenAICompatibleSystemContentPart>
}
export interface OpenAICompatibleSystemContentPart
extends JsonRecord {
type: 'text';
text: string;
export interface OpenAICompatibleSystemContentPart extends JsonRecord {
type: "text"
text: string
}
export interface OpenAICompatibleUserMessage
extends JsonRecord<OpenAICompatibleContentPart> {
role: 'user';
content: string | Array<OpenAICompatibleContentPart>;
export interface OpenAICompatibleUserMessage extends JsonRecord<OpenAICompatibleContentPart> {
role: "user"
content: string | Array<OpenAICompatibleContentPart>
}
export type OpenAICompatibleContentPart =
| OpenAICompatibleContentPartText
| OpenAICompatibleContentPartImage;
export type OpenAICompatibleContentPart = OpenAICompatibleContentPartText | OpenAICompatibleContentPartImage
export interface OpenAICompatibleContentPartImage extends JsonRecord {
type: 'image_url';
image_url: { url: string };
type: "image_url"
image_url: { url: string }
}
export interface OpenAICompatibleContentPartText extends JsonRecord {
type: 'text';
text: string;
type: "text"
text: string
}
export interface OpenAICompatibleAssistantMessage
extends JsonRecord<OpenAICompatibleMessageToolCall> {
role: 'assistant';
content?: string | null;
tool_calls?: Array<OpenAICompatibleMessageToolCall>;
export interface OpenAICompatibleAssistantMessage extends JsonRecord<OpenAICompatibleMessageToolCall> {
role: "assistant"
content?: string | null
tool_calls?: Array<OpenAICompatibleMessageToolCall>
// Copilot-specific reasoning fields
reasoning_text?: string;
reasoning_opaque?: string;
reasoning_text?: string
reasoning_opaque?: string
}
export interface OpenAICompatibleMessageToolCall extends JsonRecord {
type: 'function';
id: string;
type: "function"
id: string
function: {
arguments: string;
name: string;
};
arguments: string
name: string
}
}
export interface OpenAICompatibleToolMessage
extends JsonRecord {
role: 'tool';
content: string;
tool_call_id: string;
export interface OpenAICompatibleToolMessage extends JsonRecord {
role: "tool"
content: string
tool_call_id: string
}

View File

@@ -7,7 +7,7 @@ import {
type LanguageModelV2FinishReason,
type LanguageModelV2StreamPart,
type SharedV2ProviderMetadata,
} from '@ai-sdk/provider';
} from "@ai-sdk/provider"
import {
combineHeaders,
createEventSourceResponseHandler,
@@ -20,80 +20,68 @@ import {
type ParseResult,
postJsonToApi,
type ResponseHandler,
} from '@ai-sdk/provider-utils';
import { z } from 'zod/v4';
import { convertToOpenAICompatibleChatMessages } from './convert-to-openai-compatible-chat-messages';
import { getResponseMetadata } from './get-response-metadata';
import { mapOpenAICompatibleFinishReason } from './map-openai-compatible-finish-reason';
import {
type OpenAICompatibleChatModelId,
openaiCompatibleProviderOptions,
} from './openai-compatible-chat-options';
import {
defaultOpenAICompatibleErrorStructure,
type ProviderErrorStructure,
} from '../openai-compatible-error';
import type { MetadataExtractor } from './openai-compatible-metadata-extractor';
import { prepareTools } from './openai-compatible-prepare-tools';
} from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
import { convertToOpenAICompatibleChatMessages } from "./convert-to-openai-compatible-chat-messages"
import { getResponseMetadata } from "./get-response-metadata"
import { mapOpenAICompatibleFinishReason } from "./map-openai-compatible-finish-reason"
import { type OpenAICompatibleChatModelId, openaiCompatibleProviderOptions } from "./openai-compatible-chat-options"
import { defaultOpenAICompatibleErrorStructure, type ProviderErrorStructure } from "../openai-compatible-error"
import type { MetadataExtractor } from "./openai-compatible-metadata-extractor"
import { prepareTools } from "./openai-compatible-prepare-tools"
export type OpenAICompatibleChatConfig = {
provider: string;
headers: () => Record<string, string | undefined>;
url: (options: { modelId: string; path: string }) => string;
fetch?: FetchFunction;
includeUsage?: boolean;
errorStructure?: ProviderErrorStructure<any>;
metadataExtractor?: MetadataExtractor;
provider: string
headers: () => Record<string, string | undefined>
url: (options: { modelId: string; path: string }) => string
fetch?: FetchFunction
includeUsage?: boolean
errorStructure?: ProviderErrorStructure<any>
metadataExtractor?: MetadataExtractor
/**
* Whether the model supports structured outputs.
*/
supportsStructuredOutputs?: boolean;
supportsStructuredOutputs?: boolean
/**
* The supported URLs for the model.
*/
supportedUrls?: () => LanguageModelV2['supportedUrls'];
};
supportedUrls?: () => LanguageModelV2["supportedUrls"]
}
export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
readonly specificationVersion = 'v2';
readonly specificationVersion = "v2"
readonly supportsStructuredOutputs: boolean;
readonly supportsStructuredOutputs: boolean
readonly modelId: OpenAICompatibleChatModelId;
private readonly config: OpenAICompatibleChatConfig;
private readonly failedResponseHandler: ResponseHandler<APICallError>;
private readonly chunkSchema; // type inferred via constructor
readonly modelId: OpenAICompatibleChatModelId
private readonly config: OpenAICompatibleChatConfig
private readonly failedResponseHandler: ResponseHandler<APICallError>
private readonly chunkSchema // type inferred via constructor
constructor(
modelId: OpenAICompatibleChatModelId,
config: OpenAICompatibleChatConfig,
) {
this.modelId = modelId;
this.config = config;
constructor(modelId: OpenAICompatibleChatModelId, config: OpenAICompatibleChatConfig) {
this.modelId = modelId
this.config = config
// initialize error handling:
const errorStructure =
config.errorStructure ?? defaultOpenAICompatibleErrorStructure;
this.chunkSchema = createOpenAICompatibleChatChunkSchema(
errorStructure.errorSchema,
);
this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure);
const errorStructure = config.errorStructure ?? defaultOpenAICompatibleErrorStructure
this.chunkSchema = createOpenAICompatibleChatChunkSchema(errorStructure.errorSchema)
this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure)
this.supportsStructuredOutputs = config.supportsStructuredOutputs ?? false;
this.supportsStructuredOutputs = config.supportsStructuredOutputs ?? false
}
get provider(): string {
return this.config.provider;
return this.config.provider
}
private get providerOptionsName(): string {
return this.config.provider.split('.')[0].trim();
return this.config.provider.split(".")[0].trim()
}
get supportedUrls() {
return this.config.supportedUrls?.() ?? {};
return this.config.supportedUrls?.() ?? {}
}
private async getArgs({
@@ -110,13 +98,13 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
seed,
toolChoice,
tools,
}: Parameters<LanguageModelV2['doGenerate']>[0]) {
const warnings: LanguageModelV2CallWarning[] = [];
}: Parameters<LanguageModelV2["doGenerate"]>[0]) {
const warnings: LanguageModelV2CallWarning[] = []
// Parse provider options
const compatibleOptions = Object.assign(
(await parseProviderOptions({
provider: 'copilot',
provider: "copilot",
providerOptions,
schema: openaiCompatibleProviderOptions,
})) ?? {},
@@ -125,23 +113,18 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
providerOptions,
schema: openaiCompatibleProviderOptions,
})) ?? {},
);
)
if (topK != null) {
warnings.push({ type: 'unsupported-setting', setting: 'topK' });
warnings.push({ type: "unsupported-setting", setting: "topK" })
}
if (
responseFormat?.type === 'json' &&
responseFormat.schema != null &&
!this.supportsStructuredOutputs
) {
if (responseFormat?.type === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
warnings.push({
type: 'unsupported-setting',
setting: 'responseFormat',
details:
'JSON response format schema is only supported with structuredOutputs',
});
type: "unsupported-setting",
setting: "responseFormat",
details: "JSON response format schema is only supported with structuredOutputs",
})
}
const {
@@ -151,7 +134,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
} = prepareTools({
tools,
toolChoice,
});
})
return {
args: {
@@ -168,28 +151,24 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
frequency_penalty: frequencyPenalty,
presence_penalty: presencePenalty,
response_format:
responseFormat?.type === 'json'
? this.supportsStructuredOutputs === true &&
responseFormat.schema != null
responseFormat?.type === "json"
? this.supportsStructuredOutputs === true && responseFormat.schema != null
? {
type: 'json_schema',
type: "json_schema",
json_schema: {
schema: responseFormat.schema,
name: responseFormat.name ?? 'response',
name: responseFormat.name ?? "response",
description: responseFormat.description,
},
}
: { type: 'json_object' }
: { type: "json_object" }
: undefined,
stop: stopSequences,
seed,
...Object.fromEntries(
Object.entries(
providerOptions?.[this.providerOptionsName] ?? {},
).filter(
([key]) =>
!Object.keys(openaiCompatibleProviderOptions.shape).includes(key),
Object.entries(providerOptions?.[this.providerOptionsName] ?? {}).filter(
([key]) => !Object.keys(openaiCompatibleProviderOptions.shape).includes(key),
),
),
@@ -207,15 +186,15 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
thinking_budget: compatibleOptions.thinking_budget,
},
warnings: [...warnings, ...toolWarnings],
};
}
}
async doGenerate(
options: Parameters<LanguageModelV2['doGenerate']>[0],
): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>> {
const { args, warnings } = await this.getArgs({ ...options });
options: Parameters<LanguageModelV2["doGenerate"]>[0],
): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
const { args, warnings } = await this.getArgs({ ...options })
const body = JSON.stringify(args);
const body = JSON.stringify(args)
const {
responseHeaders,
@@ -223,50 +202,48 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
rawValue: rawResponse,
} = await postJsonToApi({
url: this.config.url({
path: '/chat/completions',
path: "/chat/completions",
modelId: this.modelId,
}),
headers: combineHeaders(this.config.headers(), options.headers),
body: args,
failedResponseHandler: this.failedResponseHandler,
successfulResponseHandler: createJsonResponseHandler(
OpenAICompatibleChatResponseSchema,
),
successfulResponseHandler: createJsonResponseHandler(OpenAICompatibleChatResponseSchema),
abortSignal: options.abortSignal,
fetch: this.config.fetch,
});
})
const choice = responseBody.choices[0];
const content: Array<LanguageModelV2Content> = [];
const choice = responseBody.choices[0]
const content: Array<LanguageModelV2Content> = []
// text content:
const text = choice.message.content;
const text = choice.message.content
if (text != null && text.length > 0) {
content.push({ type: 'text', text });
content.push({ type: "text", text })
}
// reasoning content (Copilot uses reasoning_text):
const reasoning = choice.message.reasoning_text;
const reasoning = choice.message.reasoning_text
if (reasoning != null && reasoning.length > 0) {
content.push({
type: 'reasoning',
type: "reasoning",
text: reasoning,
// Include reasoning_opaque for Copilot multi-turn reasoning
providerMetadata: choice.message.reasoning_opaque
? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
: undefined,
});
})
}
// tool calls:
if (choice.message.tool_calls != null) {
for (const toolCall of choice.message.tool_calls) {
content.push({
type: 'tool-call',
type: "tool-call",
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments!,
});
})
}
}
@@ -276,16 +253,15 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
...(await this.config.metadataExtractor?.extractMetadata?.({
parsedBody: rawResponse,
})),
};
const completionTokenDetails =
responseBody.usage?.completion_tokens_details;
}
const completionTokenDetails = responseBody.usage?.completion_tokens_details
if (completionTokenDetails?.accepted_prediction_tokens != null) {
providerMetadata[this.providerOptionsName].acceptedPredictionTokens =
completionTokenDetails?.accepted_prediction_tokens;
completionTokenDetails?.accepted_prediction_tokens
}
if (completionTokenDetails?.rejected_prediction_tokens != null) {
providerMetadata[this.providerOptionsName].rejectedPredictionTokens =
completionTokenDetails?.rejected_prediction_tokens;
completionTokenDetails?.rejected_prediction_tokens
}
return {
@@ -295,11 +271,8 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
inputTokens: responseBody.usage?.prompt_tokens ?? undefined,
outputTokens: responseBody.usage?.completion_tokens ?? undefined,
totalTokens: responseBody.usage?.total_tokens ?? undefined,
reasoningTokens:
responseBody.usage?.completion_tokens_details?.reasoning_tokens ??
undefined,
cachedInputTokens:
responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
reasoningTokens: responseBody.usage?.completion_tokens_details?.reasoning_tokens ?? undefined,
cachedInputTokens: responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
},
providerMetadata,
request: { body },
@@ -309,65 +282,60 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
body: rawResponse,
},
warnings,
};
}
}
async doStream(
options: Parameters<LanguageModelV2['doStream']>[0],
): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>> {
const { args, warnings } = await this.getArgs({ ...options });
options: Parameters<LanguageModelV2["doStream"]>[0],
): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
const { args, warnings } = await this.getArgs({ ...options })
const body = {
...args,
stream: true,
// only include stream_options when in strict compatibility mode:
stream_options: this.config.includeUsage
? { include_usage: true }
: undefined,
};
stream_options: this.config.includeUsage ? { include_usage: true } : undefined,
}
const metadataExtractor =
this.config.metadataExtractor?.createStreamExtractor();
const metadataExtractor = this.config.metadataExtractor?.createStreamExtractor()
const { responseHeaders, value: response } = await postJsonToApi({
url: this.config.url({
path: '/chat/completions',
path: "/chat/completions",
modelId: this.modelId,
}),
headers: combineHeaders(this.config.headers(), options.headers),
body,
failedResponseHandler: this.failedResponseHandler,
successfulResponseHandler: createEventSourceResponseHandler(
this.chunkSchema,
),
successfulResponseHandler: createEventSourceResponseHandler(this.chunkSchema),
abortSignal: options.abortSignal,
fetch: this.config.fetch,
});
})
const toolCalls: Array<{
id: string;
type: 'function';
id: string
type: "function"
function: {
name: string;
arguments: string;
};
hasFinished: boolean;
}> = [];
name: string
arguments: string
}
hasFinished: boolean
}> = []
let finishReason: LanguageModelV2FinishReason = 'unknown';
let finishReason: LanguageModelV2FinishReason = "unknown"
const usage: {
completionTokens: number | undefined;
completionTokens: number | undefined
completionTokensDetails: {
reasoningTokens: number | undefined;
acceptedPredictionTokens: number | undefined;
rejectedPredictionTokens: number | undefined;
};
promptTokens: number | undefined;
reasoningTokens: number | undefined
acceptedPredictionTokens: number | undefined
rejectedPredictionTokens: number | undefined
}
promptTokens: number | undefined
promptTokensDetails: {
cachedTokens: number | undefined;
};
totalTokens: number | undefined;
cachedTokens: number | undefined
}
totalTokens: number | undefined
} = {
completionTokens: undefined,
completionTokensDetails: {
@@ -380,54 +348,51 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
cachedTokens: undefined,
},
totalTokens: undefined,
};
let isFirstChunk = true;
const providerOptionsName = this.providerOptionsName;
let isActiveReasoning = false;
let isActiveText = false;
let reasoningOpaque: string | undefined;
}
let isFirstChunk = true
const providerOptionsName = this.providerOptionsName
let isActiveReasoning = false
let isActiveText = false
let reasoningOpaque: string | undefined
return {
stream: response.pipeThrough(
new TransformStream<
ParseResult<z.infer<typeof this.chunkSchema>>,
LanguageModelV2StreamPart
>({
new TransformStream<ParseResult<z.infer<typeof this.chunkSchema>>, LanguageModelV2StreamPart>({
start(controller) {
controller.enqueue({ type: 'stream-start', warnings });
controller.enqueue({ type: "stream-start", warnings })
},
// TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
transform(chunk, controller) {
// Emit raw chunk if requested (before anything else)
if (options.includeRawChunks) {
controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });
controller.enqueue({ type: "raw", rawValue: chunk.rawValue })
}
// handle failed chunk parsing / validation:
if (!chunk.success) {
finishReason = 'error';
controller.enqueue({ type: 'error', error: chunk.error });
return;
finishReason = "error"
controller.enqueue({ type: "error", error: chunk.error })
return
}
const value = chunk.value;
const value = chunk.value
metadataExtractor?.processChunk(chunk.rawValue);
metadataExtractor?.processChunk(chunk.rawValue)
// handle error chunks:
if ('error' in value) {
finishReason = 'error';
controller.enqueue({ type: 'error', error: value.error.message });
return;
if ("error" in value) {
finishReason = "error"
controller.enqueue({ type: "error", error: value.error.message })
return
}
if (isFirstChunk) {
isFirstChunk = false;
isFirstChunk = false
controller.enqueue({
type: 'response-metadata',
type: "response-metadata",
...getResponseMetadata(value),
});
})
}
if (value.usage != null) {
@@ -437,46 +402,38 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
total_tokens,
prompt_tokens_details,
completion_tokens_details,
} = value.usage;
} = value.usage
usage.promptTokens = prompt_tokens ?? undefined;
usage.completionTokens = completion_tokens ?? undefined;
usage.totalTokens = total_tokens ?? undefined;
usage.promptTokens = prompt_tokens ?? undefined
usage.completionTokens = completion_tokens ?? undefined
usage.totalTokens = total_tokens ?? undefined
if (completion_tokens_details?.reasoning_tokens != null) {
usage.completionTokensDetails.reasoningTokens =
completion_tokens_details?.reasoning_tokens;
usage.completionTokensDetails.reasoningTokens = completion_tokens_details?.reasoning_tokens
}
if (
completion_tokens_details?.accepted_prediction_tokens != null
) {
if (completion_tokens_details?.accepted_prediction_tokens != null) {
usage.completionTokensDetails.acceptedPredictionTokens =
completion_tokens_details?.accepted_prediction_tokens;
completion_tokens_details?.accepted_prediction_tokens
}
if (
completion_tokens_details?.rejected_prediction_tokens != null
) {
if (completion_tokens_details?.rejected_prediction_tokens != null) {
usage.completionTokensDetails.rejectedPredictionTokens =
completion_tokens_details?.rejected_prediction_tokens;
completion_tokens_details?.rejected_prediction_tokens
}
if (prompt_tokens_details?.cached_tokens != null) {
usage.promptTokensDetails.cachedTokens =
prompt_tokens_details?.cached_tokens;
usage.promptTokensDetails.cachedTokens = prompt_tokens_details?.cached_tokens
}
}
const choice = value.choices[0];
const choice = value.choices[0]
if (choice?.finish_reason != null) {
finishReason = mapOpenAICompatibleFinishReason(
choice.finish_reason,
);
finishReason = mapOpenAICompatibleFinishReason(choice.finish_reason)
}
if (choice?.delta == null) {
return;
return
}
const delta = choice.delta;
const delta = choice.delta
// Capture reasoning_opaque for Copilot multi-turn reasoning
if (delta.reasoning_opaque) {
@@ -484,28 +441,28 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
throw new InvalidResponseDataError({
data: delta,
message:
'Multiple reasoning_opaque values received in a single response. Only one thinking part per response is supported.',
});
"Multiple reasoning_opaque values received in a single response. Only one thinking part per response is supported.",
})
}
reasoningOpaque = delta.reasoning_opaque;
reasoningOpaque = delta.reasoning_opaque
}
// enqueue reasoning before text deltas (Copilot uses reasoning_text):
const reasoningContent = delta.reasoning_text;
const reasoningContent = delta.reasoning_text
if (reasoningContent) {
if (!isActiveReasoning) {
controller.enqueue({
type: 'reasoning-start',
id: 'reasoning-0',
});
isActiveReasoning = true;
type: "reasoning-start",
id: "reasoning-0",
})
isActiveReasoning = true
}
controller.enqueue({
type: 'reasoning-delta',
id: 'reasoning-0',
type: "reasoning-delta",
id: "reasoning-0",
delta: reasoningContent,
});
})
}
if (delta.content) {
@@ -513,25 +470,23 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
// This handles the case where reasoning_opaque and content come in the same chunk
if (isActiveReasoning && !isActiveText) {
controller.enqueue({
type: 'reasoning-end',
id: 'reasoning-0',
providerMetadata: reasoningOpaque
? { copilot: { reasoningOpaque } }
: undefined,
});
isActiveReasoning = false;
type: "reasoning-end",
id: "reasoning-0",
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
isActiveReasoning = false
}
if (!isActiveText) {
controller.enqueue({ type: 'text-start', id: 'txt-0' });
isActiveText = true;
controller.enqueue({ type: "text-start", id: "txt-0" })
isActiveText = true
}
controller.enqueue({
type: 'text-delta',
id: 'txt-0',
type: "text-delta",
id: "txt-0",
delta: delta.content,
});
})
}
if (delta.tool_calls != null) {
@@ -539,102 +494,96 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
// This handles the case where reasoning goes directly to tool calls with no content
if (isActiveReasoning) {
controller.enqueue({
type: 'reasoning-end',
id: 'reasoning-0',
providerMetadata: reasoningOpaque
? { copilot: { reasoningOpaque } }
: undefined,
});
isActiveReasoning = false;
type: "reasoning-end",
id: "reasoning-0",
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
isActiveReasoning = false
}
for (const toolCallDelta of delta.tool_calls) {
const index = toolCallDelta.index;
const index = toolCallDelta.index
if (toolCalls[index] == null) {
if (toolCallDelta.id == null) {
throw new InvalidResponseDataError({
data: toolCallDelta,
message: `Expected 'id' to be a string.`,
});
})
}
if (toolCallDelta.function?.name == null) {
throw new InvalidResponseDataError({
data: toolCallDelta,
message: `Expected 'function.name' to be a string.`,
});
})
}
controller.enqueue({
type: 'tool-input-start',
type: "tool-input-start",
id: toolCallDelta.id,
toolName: toolCallDelta.function.name,
});
})
toolCalls[index] = {
id: toolCallDelta.id,
type: 'function',
type: "function",
function: {
name: toolCallDelta.function.name,
arguments: toolCallDelta.function.arguments ?? '',
arguments: toolCallDelta.function.arguments ?? "",
},
hasFinished: false,
};
}
const toolCall = toolCalls[index];
const toolCall = toolCalls[index]
if (
toolCall.function?.name != null &&
toolCall.function?.arguments != null
) {
if (toolCall.function?.name != null && toolCall.function?.arguments != null) {
// send delta if the argument text has already started:
if (toolCall.function.arguments.length > 0) {
controller.enqueue({
type: 'tool-input-delta',
type: "tool-input-delta",
id: toolCall.id,
delta: toolCall.function.arguments,
});
})
}
// check if tool call is complete
// (some providers send the full tool call in one chunk):
if (isParsableJson(toolCall.function.arguments)) {
controller.enqueue({
type: 'tool-input-end',
type: "tool-input-end",
id: toolCall.id,
});
})
controller.enqueue({
type: 'tool-call',
type: "tool-call",
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments,
});
toolCall.hasFinished = true;
})
toolCall.hasFinished = true
}
}
continue;
continue
}
// existing tool call, merge if not finished
const toolCall = toolCalls[index];
const toolCall = toolCalls[index]
if (toolCall.hasFinished) {
continue;
continue
}
if (toolCallDelta.function?.arguments != null) {
toolCall.function!.arguments +=
toolCallDelta.function?.arguments ?? '';
toolCall.function!.arguments += toolCallDelta.function?.arguments ?? ""
}
// send delta
controller.enqueue({
type: 'tool-input-delta',
type: "tool-input-delta",
id: toolCall.id,
delta: toolCallDelta.function.arguments ?? '',
});
delta: toolCallDelta.function.arguments ?? "",
})
// check if tool call is complete
if (
@@ -643,17 +592,17 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
isParsableJson(toolCall.function.arguments)
) {
controller.enqueue({
type: 'tool-input-end',
type: "tool-input-end",
id: toolCall.id,
});
})
controller.enqueue({
type: 'tool-call',
type: "tool-call",
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments,
});
toolCall.hasFinished = true;
})
toolCall.hasFinished = true
}
}
}
@@ -662,77 +611,65 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
flush(controller) {
if (isActiveReasoning) {
controller.enqueue({
type: 'reasoning-end',
id: 'reasoning-0',
type: "reasoning-end",
id: "reasoning-0",
// Include reasoning_opaque for Copilot multi-turn reasoning
providerMetadata: reasoningOpaque
? { copilot: { reasoningOpaque } }
: undefined,
});
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
}
if (isActiveText) {
controller.enqueue({ type: 'text-end', id: 'txt-0' });
controller.enqueue({ type: "text-end", id: "txt-0" })
}
// go through all tool calls and send the ones that are not finished
for (const toolCall of toolCalls.filter(
toolCall => !toolCall.hasFinished,
)) {
for (const toolCall of toolCalls.filter((toolCall) => !toolCall.hasFinished)) {
controller.enqueue({
type: 'tool-input-end',
type: "tool-input-end",
id: toolCall.id,
});
})
controller.enqueue({
type: 'tool-call',
type: "tool-call",
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments,
});
})
}
const providerMetadata: SharedV2ProviderMetadata = {
[providerOptionsName]: {},
// Include reasoning_opaque for Copilot multi-turn reasoning
...(reasoningOpaque
? { copilot: { reasoningOpaque } }
: {}),
...(reasoningOpaque ? { copilot: { reasoningOpaque } } : {}),
...metadataExtractor?.buildMetadata(),
};
if (
usage.completionTokensDetails.acceptedPredictionTokens != null
) {
providerMetadata[providerOptionsName].acceptedPredictionTokens =
usage.completionTokensDetails.acceptedPredictionTokens;
}
if (
usage.completionTokensDetails.rejectedPredictionTokens != null
) {
if (usage.completionTokensDetails.acceptedPredictionTokens != null) {
providerMetadata[providerOptionsName].acceptedPredictionTokens =
usage.completionTokensDetails.acceptedPredictionTokens
}
if (usage.completionTokensDetails.rejectedPredictionTokens != null) {
providerMetadata[providerOptionsName].rejectedPredictionTokens =
usage.completionTokensDetails.rejectedPredictionTokens;
usage.completionTokensDetails.rejectedPredictionTokens
}
controller.enqueue({
type: 'finish',
type: "finish",
finishReason,
usage: {
inputTokens: usage.promptTokens ?? undefined,
outputTokens: usage.completionTokens ?? undefined,
totalTokens: usage.totalTokens ?? undefined,
reasoningTokens:
usage.completionTokensDetails.reasoningTokens ?? undefined,
cachedInputTokens:
usage.promptTokensDetails.cachedTokens ?? undefined,
reasoningTokens: usage.completionTokensDetails.reasoningTokens ?? undefined,
cachedInputTokens: usage.promptTokensDetails.cachedTokens ?? undefined,
},
providerMetadata,
});
})
},
}),
),
request: { body },
response: { headers: responseHeaders },
};
}
}
}
@@ -754,7 +691,7 @@ const openaiCompatibleTokenUsageSchema = z
})
.nullish(),
})
.nullish();
.nullish()
// limited version of the schema, focussed on what is needed for the implementation
// this approach limits breakages when the API changes and increases efficiency
@@ -765,7 +702,7 @@ const OpenAICompatibleChatResponseSchema = z.object({
choices: z.array(
z.object({
message: z.object({
role: z.literal('assistant').nullish(),
role: z.literal("assistant").nullish(),
content: z.string().nullish(),
// Copilot-specific reasoning fields
reasoning_text: z.string().nullish(),
@@ -786,15 +723,11 @@ const OpenAICompatibleChatResponseSchema = z.object({
}),
),
usage: openaiCompatibleTokenUsageSchema,
});
})
// limited version of the schema, focussed on what is needed for the implementation
// this approach limits breakages when the API changes and increases efficiency
const createOpenAICompatibleChatChunkSchema = <
ERROR_SCHEMA extends z.core.$ZodType,
>(
errorSchema: ERROR_SCHEMA,
) =>
const createOpenAICompatibleChatChunkSchema = <ERROR_SCHEMA extends z.core.$ZodType>(errorSchema: ERROR_SCHEMA) =>
z.union([
z.object({
id: z.string().nullish(),
@@ -804,7 +737,7 @@ const createOpenAICompatibleChatChunkSchema = <
z.object({
delta: z
.object({
role: z.enum(['assistant']).nullish(),
role: z.enum(["assistant"]).nullish(),
content: z.string().nullish(),
// Copilot-specific reasoning fields
reasoning_text: z.string().nullish(),
@@ -829,4 +762,4 @@ const createOpenAICompatibleChatChunkSchema = <
usage: openaiCompatibleTokenUsageSchema,
}),
errorSchema,
]);
])

View File

@@ -1,6 +1,6 @@
import { z } from 'zod/v4';
import { z } from "zod/v4"
export type OpenAICompatibleChatModelId = string;
export type OpenAICompatibleChatModelId = string
export const openaiCompatibleProviderOptions = z.object({
/**
@@ -23,8 +23,6 @@ export const openaiCompatibleProviderOptions = z.object({
* Copilot thinking_budget used for Anthropic models.
*/
thinking_budget: z.number().optional(),
});
})
export type OpenAICompatibleProviderOptions = z.infer<
typeof openaiCompatibleProviderOptions
>;
export type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>

View File

@@ -1,4 +1,4 @@
import type { SharedV2ProviderMetadata } from '@ai-sdk/provider';
import type { SharedV2ProviderMetadata } from "@ai-sdk/provider"
/**
Extracts provider-specific metadata from API responses.
@@ -14,11 +14,7 @@ export type MetadataExtractor = {
* @returns Provider-specific metadata or undefined if no metadata is available.
* The metadata should be under a key indicating the provider id.
*/
extractMetadata: ({
parsedBody,
}: {
parsedBody: unknown;
}) => Promise<SharedV2ProviderMetadata | undefined>;
extractMetadata: ({ parsedBody }: { parsedBody: unknown }) => Promise<SharedV2ProviderMetadata | undefined>
/**
* Creates an extractor for handling streaming responses. The returned object provides
@@ -34,7 +30,7 @@ export type MetadataExtractor = {
*
* @param parsedChunk - The parsed JSON response chunk from the provider's API
*/
processChunk(parsedChunk: unknown): void;
processChunk(parsedChunk: unknown): void
/**
* Builds the metadata object after all chunks have been processed.
@@ -43,6 +39,6 @@ export type MetadataExtractor = {
* @returns Provider-specific metadata or undefined if no metadata is available.
* The metadata should be under a key indicating the provider id.
*/
buildMetadata(): SharedV2ProviderMetadata | undefined;
};
};
buildMetadata(): SharedV2ProviderMetadata | undefined
}
}

View File

@@ -2,91 +2,86 @@ import {
type LanguageModelV2CallOptions,
type LanguageModelV2CallWarning,
UnsupportedFunctionalityError,
} from '@ai-sdk/provider';
} from "@ai-sdk/provider"
export function prepareTools({
tools,
toolChoice,
}: {
tools: LanguageModelV2CallOptions['tools'];
toolChoice?: LanguageModelV2CallOptions['toolChoice'];
tools: LanguageModelV2CallOptions["tools"]
toolChoice?: LanguageModelV2CallOptions["toolChoice"]
}): {
tools:
| undefined
| Array<{
type: 'function';
type: "function"
function: {
name: string;
description: string | undefined;
parameters: unknown;
};
}>;
toolChoice:
| { type: 'function'; function: { name: string } }
| 'auto'
| 'none'
| 'required'
| undefined;
toolWarnings: LanguageModelV2CallWarning[];
name: string
description: string | undefined
parameters: unknown
}
}>
toolChoice: { type: "function"; function: { name: string } } | "auto" | "none" | "required" | undefined
toolWarnings: LanguageModelV2CallWarning[]
} {
// when the tools array is empty, change it to undefined to prevent errors:
tools = tools?.length ? tools : undefined;
tools = tools?.length ? tools : undefined
const toolWarnings: LanguageModelV2CallWarning[] = [];
const toolWarnings: LanguageModelV2CallWarning[] = []
if (tools == null) {
return { tools: undefined, toolChoice: undefined, toolWarnings };
return { tools: undefined, toolChoice: undefined, toolWarnings }
}
const openaiCompatTools: Array<{
type: 'function';
type: "function"
function: {
name: string;
description: string | undefined;
parameters: unknown;
};
}> = [];
name: string
description: string | undefined
parameters: unknown
}
}> = []
for (const tool of tools) {
if (tool.type === 'provider-defined') {
toolWarnings.push({ type: 'unsupported-tool', tool });
if (tool.type === "provider-defined") {
toolWarnings.push({ type: "unsupported-tool", tool })
} else {
openaiCompatTools.push({
type: 'function',
type: "function",
function: {
name: tool.name,
description: tool.description,
parameters: tool.inputSchema,
},
});
})
}
}
if (toolChoice == null) {
return { tools: openaiCompatTools, toolChoice: undefined, toolWarnings };
return { tools: openaiCompatTools, toolChoice: undefined, toolWarnings }
}
const type = toolChoice.type;
const type = toolChoice.type
switch (type) {
case 'auto':
case 'none':
case 'required':
return { tools: openaiCompatTools, toolChoice: type, toolWarnings };
case 'tool':
case "auto":
case "none":
case "required":
return { tools: openaiCompatTools, toolChoice: type, toolWarnings }
case "tool":
return {
tools: openaiCompatTools,
toolChoice: {
type: 'function',
type: "function",
function: { name: toolChoice.toolName },
},
toolWarnings,
};
}
default: {
const _exhaustiveCheck: never = type;
const _exhaustiveCheck: never = type
throw new UnsupportedFunctionalityError({
functionality: `tool choice type: ${_exhaustiveCheck}`,
});
})
}
}
}

View File

@@ -1,4 +1,4 @@
import { z, type ZodType } from 'zod/v4';
import { z, type ZodType } from "zod/v4"
export const openaiCompatibleErrorDataSchema = z.object({
error: z.object({
@@ -11,20 +11,17 @@ export const openaiCompatibleErrorDataSchema = z.object({
param: z.any().nullish(),
code: z.union([z.string(), z.number()]).nullish(),
}),
});
})
export type OpenAICompatibleErrorData = z.infer<
typeof openaiCompatibleErrorDataSchema
>;
export type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>
export type ProviderErrorStructure<T> = {
errorSchema: ZodType<T>;
errorToMessage: (error: T) => string;
isRetryable?: (response: Response, error?: T) => boolean;
};
errorSchema: ZodType<T>
errorToMessage: (error: T) => string
isRetryable?: (response: Response, error?: T) => boolean
}
export const defaultOpenAICompatibleErrorStructure: ProviderErrorStructure<OpenAICompatibleErrorData> =
{
errorSchema: openaiCompatibleErrorDataSchema,
errorToMessage: data => data.error.message,
};
export const defaultOpenAICompatibleErrorStructure: ProviderErrorStructure<OpenAICompatibleErrorData> = {
errorSchema: openaiCompatibleErrorDataSchema,
errorToMessage: (data) => data.error.message,
}

View File

@@ -464,7 +464,7 @@ describe("full conversation", () => {
expect(result).toHaveLength(4)
const systemMsg = result[0];
const systemMsg = result[0]
expect(systemMsg.role).toBe("system")
// Assistant message should have reasoning fields

View File

@@ -355,7 +355,9 @@ describe("doStream", () => {
// Check text content
const textDeltas = parts.filter((p) => p.type === "text-delta")
expect(textDeltas).toHaveLength(1)
expect((textDeltas[0] as { delta: string }).delta).toContain("Okay, I need to check out the project's file structure.")
expect((textDeltas[0] as { delta: string }).delta).toContain(
"Okay, I need to check out the project's file structure.",
)
// Check tool call
const toolParts = parts.filter(

View File

@@ -1108,7 +1108,7 @@ describe("ProviderTransform.message - providerOptions key remapping", () => {
role: "user",
content: "Hello",
providerOptions: {
"copilot": { someOption: "value" },
copilot: { someOption: "value" },
},
},
] as any[]