mirror of
https://github.com/anomalyco/opencode.git
synced 2026-02-01 22:48:16 +00:00
feat(opencode): add copilot specific provider to properly handle copilot reasoning tokens (#8900)
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com> Co-authored-by: Aiden Cline <63023139+rekram1-node@users.noreply.github.com> Co-authored-by: Aiden Cline <aidenpcline@gmail.com>
This commit is contained in:
@@ -24,7 +24,7 @@ import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic"
|
||||
import { createOpenAI } from "@ai-sdk/openai"
|
||||
import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
|
||||
import { createOpenRouter, type LanguageModelV2 } from "@openrouter/ai-sdk-provider"
|
||||
import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/openai-compatible/src"
|
||||
import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/copilot"
|
||||
import { createXai } from "@ai-sdk/xai"
|
||||
import { createMistral } from "@ai-sdk/mistral"
|
||||
import { createGroq } from "@ai-sdk/groq"
|
||||
|
||||
@@ -0,0 +1,177 @@
|
||||
import {
|
||||
type LanguageModelV2Prompt,
|
||||
type SharedV2ProviderMetadata,
|
||||
UnsupportedFunctionalityError,
|
||||
} from '@ai-sdk/provider';
|
||||
import type { OpenAICompatibleChatPrompt } from './openai-compatible-api-types';
|
||||
import { convertToBase64 } from '@ai-sdk/provider-utils';
|
||||
|
||||
function getOpenAIMetadata(message: {
|
||||
providerOptions?: SharedV2ProviderMetadata;
|
||||
}) {
|
||||
return message?.providerOptions?.copilot ?? {};
|
||||
}
|
||||
|
||||
export function convertToOpenAICompatibleChatMessages(
|
||||
prompt: LanguageModelV2Prompt,
|
||||
): OpenAICompatibleChatPrompt {
|
||||
const messages: OpenAICompatibleChatPrompt = [];
|
||||
for (const { role, content, ...message } of prompt) {
|
||||
const metadata = getOpenAIMetadata({ ...message });
|
||||
switch (role) {
|
||||
case 'system': {
|
||||
messages.push({
|
||||
role: 'system',
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: content,
|
||||
},
|
||||
],
|
||||
...metadata,
|
||||
});
|
||||
break;
|
||||
}
|
||||
|
||||
case 'user': {
|
||||
if (content.length === 1 && content[0].type === 'text') {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: content[0].text,
|
||||
...getOpenAIMetadata(content[0]),
|
||||
});
|
||||
break;
|
||||
}
|
||||
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: content.map(part => {
|
||||
const partMetadata = getOpenAIMetadata(part);
|
||||
switch (part.type) {
|
||||
case 'text': {
|
||||
return { type: 'text', text: part.text, ...partMetadata };
|
||||
}
|
||||
case 'file': {
|
||||
if (part.mediaType.startsWith('image/')) {
|
||||
const mediaType =
|
||||
part.mediaType === 'image/*'
|
||||
? 'image/jpeg'
|
||||
: part.mediaType;
|
||||
|
||||
return {
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url:
|
||||
part.data instanceof URL
|
||||
? part.data.toString()
|
||||
: `data:${mediaType};base64,${convertToBase64(part.data)}`,
|
||||
},
|
||||
...partMetadata,
|
||||
};
|
||||
} else {
|
||||
throw new UnsupportedFunctionalityError({
|
||||
functionality: `file part media type ${part.mediaType}`,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}),
|
||||
...metadata,
|
||||
});
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case 'assistant': {
|
||||
let text = '';
|
||||
let reasoningText: string | undefined;
|
||||
let reasoningOpaque: string | undefined;
|
||||
const toolCalls: Array<{
|
||||
id: string;
|
||||
type: 'function';
|
||||
function: { name: string; arguments: string };
|
||||
}> = [];
|
||||
|
||||
for (const part of content) {
|
||||
const partMetadata = getOpenAIMetadata(part);
|
||||
// Check for reasoningOpaque on any part (may be attached to text/tool-call)
|
||||
const partOpaque = (
|
||||
part.providerOptions as { copilot?: { reasoningOpaque?: string } }
|
||||
)?.copilot?.reasoningOpaque;
|
||||
if (partOpaque && !reasoningOpaque) {
|
||||
reasoningOpaque = partOpaque;
|
||||
}
|
||||
|
||||
switch (part.type) {
|
||||
case 'text': {
|
||||
text += part.text;
|
||||
break;
|
||||
}
|
||||
case 'reasoning': {
|
||||
reasoningText = part.text;
|
||||
break;
|
||||
}
|
||||
case 'tool-call': {
|
||||
toolCalls.push({
|
||||
id: part.toolCallId,
|
||||
type: 'function',
|
||||
function: {
|
||||
name: part.toolName,
|
||||
arguments: JSON.stringify(part.input),
|
||||
},
|
||||
...partMetadata,
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
messages.push({
|
||||
role: 'assistant',
|
||||
content: text || null,
|
||||
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
|
||||
reasoning_text: reasoningText,
|
||||
reasoning_opaque: reasoningOpaque,
|
||||
...metadata,
|
||||
});
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case 'tool': {
|
||||
for (const toolResponse of content) {
|
||||
const output = toolResponse.output;
|
||||
|
||||
let contentValue: string;
|
||||
switch (output.type) {
|
||||
case 'text':
|
||||
case 'error-text':
|
||||
contentValue = output.value;
|
||||
break;
|
||||
case 'content':
|
||||
case 'json':
|
||||
case 'error-json':
|
||||
contentValue = JSON.stringify(output.value);
|
||||
break;
|
||||
}
|
||||
|
||||
const toolResponseMetadata = getOpenAIMetadata(toolResponse);
|
||||
messages.push({
|
||||
role: 'tool',
|
||||
tool_call_id: toolResponse.toolCallId,
|
||||
content: contentValue,
|
||||
...toolResponseMetadata,
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
const _exhaustiveCheck: never = role;
|
||||
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return messages;
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
export function getResponseMetadata({
|
||||
id,
|
||||
model,
|
||||
created,
|
||||
}: {
|
||||
id?: string | undefined | null;
|
||||
created?: number | undefined | null;
|
||||
model?: string | undefined | null;
|
||||
}) {
|
||||
return {
|
||||
id: id ?? undefined,
|
||||
modelId: model ?? undefined,
|
||||
timestamp: created != null ? new Date(created * 1000) : undefined,
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
import type { LanguageModelV2FinishReason } from '@ai-sdk/provider';
|
||||
|
||||
export function mapOpenAICompatibleFinishReason(
|
||||
finishReason: string | null | undefined,
|
||||
): LanguageModelV2FinishReason {
|
||||
switch (finishReason) {
|
||||
case 'stop':
|
||||
return 'stop';
|
||||
case 'length':
|
||||
return 'length';
|
||||
case 'content_filter':
|
||||
return 'content-filter';
|
||||
case 'function_call':
|
||||
case 'tool_calls':
|
||||
return 'tool-calls';
|
||||
default:
|
||||
return 'unknown';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
import type { JSONValue } from '@ai-sdk/provider';
|
||||
|
||||
export type OpenAICompatibleChatPrompt = Array<OpenAICompatibleMessage>;
|
||||
|
||||
export type OpenAICompatibleMessage =
|
||||
| OpenAICompatibleSystemMessage
|
||||
| OpenAICompatibleUserMessage
|
||||
| OpenAICompatibleAssistantMessage
|
||||
| OpenAICompatibleToolMessage;
|
||||
|
||||
// Allow for arbitrary additional properties for general purpose
|
||||
// provider-metadata-specific extensibility.
|
||||
type JsonRecord<T = never> = Record<
|
||||
string,
|
||||
JSONValue | JSONValue[] | T | T[] | undefined
|
||||
>;
|
||||
|
||||
export interface OpenAICompatibleSystemMessage
|
||||
extends JsonRecord<OpenAICompatibleSystemContentPart> {
|
||||
role: 'system';
|
||||
content: string | Array<OpenAICompatibleSystemContentPart>;
|
||||
}
|
||||
|
||||
export interface OpenAICompatibleSystemContentPart
|
||||
extends JsonRecord {
|
||||
type: 'text';
|
||||
text: string;
|
||||
}
|
||||
|
||||
export interface OpenAICompatibleUserMessage
|
||||
extends JsonRecord<OpenAICompatibleContentPart> {
|
||||
role: 'user';
|
||||
content: string | Array<OpenAICompatibleContentPart>;
|
||||
}
|
||||
|
||||
export type OpenAICompatibleContentPart =
|
||||
| OpenAICompatibleContentPartText
|
||||
| OpenAICompatibleContentPartImage;
|
||||
|
||||
export interface OpenAICompatibleContentPartImage extends JsonRecord {
|
||||
type: 'image_url';
|
||||
image_url: { url: string };
|
||||
}
|
||||
|
||||
export interface OpenAICompatibleContentPartText extends JsonRecord {
|
||||
type: 'text';
|
||||
text: string;
|
||||
}
|
||||
|
||||
export interface OpenAICompatibleAssistantMessage
|
||||
extends JsonRecord<OpenAICompatibleMessageToolCall> {
|
||||
role: 'assistant';
|
||||
content?: string | null;
|
||||
tool_calls?: Array<OpenAICompatibleMessageToolCall>;
|
||||
// Copilot-specific reasoning fields
|
||||
reasoning_text?: string;
|
||||
reasoning_opaque?: string;
|
||||
}
|
||||
|
||||
export interface OpenAICompatibleMessageToolCall extends JsonRecord {
|
||||
type: 'function';
|
||||
id: string;
|
||||
function: {
|
||||
arguments: string;
|
||||
name: string;
|
||||
};
|
||||
}
|
||||
|
||||
export interface OpenAICompatibleToolMessage
|
||||
extends JsonRecord {
|
||||
role: 'tool';
|
||||
content: string;
|
||||
tool_call_id: string;
|
||||
}
|
||||
@@ -0,0 +1,832 @@
|
||||
import {
|
||||
APICallError,
|
||||
InvalidResponseDataError,
|
||||
type LanguageModelV2,
|
||||
type LanguageModelV2CallWarning,
|
||||
type LanguageModelV2Content,
|
||||
type LanguageModelV2FinishReason,
|
||||
type LanguageModelV2StreamPart,
|
||||
type SharedV2ProviderMetadata,
|
||||
} from '@ai-sdk/provider';
|
||||
import {
|
||||
combineHeaders,
|
||||
createEventSourceResponseHandler,
|
||||
createJsonErrorResponseHandler,
|
||||
createJsonResponseHandler,
|
||||
type FetchFunction,
|
||||
generateId,
|
||||
isParsableJson,
|
||||
parseProviderOptions,
|
||||
type ParseResult,
|
||||
postJsonToApi,
|
||||
type ResponseHandler,
|
||||
} from '@ai-sdk/provider-utils';
|
||||
import { z } from 'zod/v4';
|
||||
import { convertToOpenAICompatibleChatMessages } from './convert-to-openai-compatible-chat-messages';
|
||||
import { getResponseMetadata } from './get-response-metadata';
|
||||
import { mapOpenAICompatibleFinishReason } from './map-openai-compatible-finish-reason';
|
||||
import {
|
||||
type OpenAICompatibleChatModelId,
|
||||
openaiCompatibleProviderOptions,
|
||||
} from './openai-compatible-chat-options';
|
||||
import {
|
||||
defaultOpenAICompatibleErrorStructure,
|
||||
type ProviderErrorStructure,
|
||||
} from '../openai-compatible-error';
|
||||
import type { MetadataExtractor } from './openai-compatible-metadata-extractor';
|
||||
import { prepareTools } from './openai-compatible-prepare-tools';
|
||||
|
||||
export type OpenAICompatibleChatConfig = {
|
||||
provider: string;
|
||||
headers: () => Record<string, string | undefined>;
|
||||
url: (options: { modelId: string; path: string }) => string;
|
||||
fetch?: FetchFunction;
|
||||
includeUsage?: boolean;
|
||||
errorStructure?: ProviderErrorStructure<any>;
|
||||
metadataExtractor?: MetadataExtractor;
|
||||
|
||||
/**
|
||||
* Whether the model supports structured outputs.
|
||||
*/
|
||||
supportsStructuredOutputs?: boolean;
|
||||
|
||||
/**
|
||||
* The supported URLs for the model.
|
||||
*/
|
||||
supportedUrls?: () => LanguageModelV2['supportedUrls'];
|
||||
};
|
||||
|
||||
export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
readonly specificationVersion = 'v2';
|
||||
|
||||
readonly supportsStructuredOutputs: boolean;
|
||||
|
||||
readonly modelId: OpenAICompatibleChatModelId;
|
||||
private readonly config: OpenAICompatibleChatConfig;
|
||||
private readonly failedResponseHandler: ResponseHandler<APICallError>;
|
||||
private readonly chunkSchema; // type inferred via constructor
|
||||
|
||||
constructor(
|
||||
modelId: OpenAICompatibleChatModelId,
|
||||
config: OpenAICompatibleChatConfig,
|
||||
) {
|
||||
this.modelId = modelId;
|
||||
this.config = config;
|
||||
|
||||
// initialize error handling:
|
||||
const errorStructure =
|
||||
config.errorStructure ?? defaultOpenAICompatibleErrorStructure;
|
||||
this.chunkSchema = createOpenAICompatibleChatChunkSchema(
|
||||
errorStructure.errorSchema,
|
||||
);
|
||||
this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure);
|
||||
|
||||
this.supportsStructuredOutputs = config.supportsStructuredOutputs ?? false;
|
||||
}
|
||||
|
||||
get provider(): string {
|
||||
return this.config.provider;
|
||||
}
|
||||
|
||||
private get providerOptionsName(): string {
|
||||
return this.config.provider.split('.')[0].trim();
|
||||
}
|
||||
|
||||
get supportedUrls() {
|
||||
return this.config.supportedUrls?.() ?? {};
|
||||
}
|
||||
|
||||
private async getArgs({
|
||||
prompt,
|
||||
maxOutputTokens,
|
||||
temperature,
|
||||
topP,
|
||||
topK,
|
||||
frequencyPenalty,
|
||||
presencePenalty,
|
||||
providerOptions,
|
||||
stopSequences,
|
||||
responseFormat,
|
||||
seed,
|
||||
toolChoice,
|
||||
tools,
|
||||
}: Parameters<LanguageModelV2['doGenerate']>[0]) {
|
||||
const warnings: LanguageModelV2CallWarning[] = [];
|
||||
|
||||
// Parse provider options
|
||||
const compatibleOptions = Object.assign(
|
||||
(await parseProviderOptions({
|
||||
provider: 'copilot',
|
||||
providerOptions,
|
||||
schema: openaiCompatibleProviderOptions,
|
||||
})) ?? {},
|
||||
(await parseProviderOptions({
|
||||
provider: this.providerOptionsName,
|
||||
providerOptions,
|
||||
schema: openaiCompatibleProviderOptions,
|
||||
})) ?? {},
|
||||
);
|
||||
|
||||
if (topK != null) {
|
||||
warnings.push({ type: 'unsupported-setting', setting: 'topK' });
|
||||
}
|
||||
|
||||
if (
|
||||
responseFormat?.type === 'json' &&
|
||||
responseFormat.schema != null &&
|
||||
!this.supportsStructuredOutputs
|
||||
) {
|
||||
warnings.push({
|
||||
type: 'unsupported-setting',
|
||||
setting: 'responseFormat',
|
||||
details:
|
||||
'JSON response format schema is only supported with structuredOutputs',
|
||||
});
|
||||
}
|
||||
|
||||
const {
|
||||
tools: openaiTools,
|
||||
toolChoice: openaiToolChoice,
|
||||
toolWarnings,
|
||||
} = prepareTools({
|
||||
tools,
|
||||
toolChoice,
|
||||
});
|
||||
|
||||
return {
|
||||
args: {
|
||||
// model id:
|
||||
model: this.modelId,
|
||||
|
||||
// model specific settings:
|
||||
user: compatibleOptions.user,
|
||||
|
||||
// standardized settings:
|
||||
max_tokens: maxOutputTokens,
|
||||
temperature,
|
||||
top_p: topP,
|
||||
frequency_penalty: frequencyPenalty,
|
||||
presence_penalty: presencePenalty,
|
||||
response_format:
|
||||
responseFormat?.type === 'json'
|
||||
? this.supportsStructuredOutputs === true &&
|
||||
responseFormat.schema != null
|
||||
? {
|
||||
type: 'json_schema',
|
||||
json_schema: {
|
||||
schema: responseFormat.schema,
|
||||
name: responseFormat.name ?? 'response',
|
||||
description: responseFormat.description,
|
||||
},
|
||||
}
|
||||
: { type: 'json_object' }
|
||||
: undefined,
|
||||
|
||||
stop: stopSequences,
|
||||
seed,
|
||||
...Object.fromEntries(
|
||||
Object.entries(
|
||||
providerOptions?.[this.providerOptionsName] ?? {},
|
||||
).filter(
|
||||
([key]) =>
|
||||
!Object.keys(openaiCompatibleProviderOptions.shape).includes(key),
|
||||
),
|
||||
),
|
||||
|
||||
reasoning_effort: compatibleOptions.reasoningEffort,
|
||||
verbosity: compatibleOptions.textVerbosity,
|
||||
|
||||
// messages:
|
||||
messages: convertToOpenAICompatibleChatMessages(prompt),
|
||||
|
||||
// tools:
|
||||
tools: openaiTools,
|
||||
tool_choice: openaiToolChoice,
|
||||
|
||||
// thinking_budget
|
||||
thinking_budget: compatibleOptions.thinking_budget,
|
||||
},
|
||||
warnings: [...warnings, ...toolWarnings],
|
||||
};
|
||||
}
|
||||
|
||||
async doGenerate(
|
||||
options: Parameters<LanguageModelV2['doGenerate']>[0],
|
||||
): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>> {
|
||||
const { args, warnings } = await this.getArgs({ ...options });
|
||||
|
||||
const body = JSON.stringify(args);
|
||||
|
||||
const {
|
||||
responseHeaders,
|
||||
value: responseBody,
|
||||
rawValue: rawResponse,
|
||||
} = await postJsonToApi({
|
||||
url: this.config.url({
|
||||
path: '/chat/completions',
|
||||
modelId: this.modelId,
|
||||
}),
|
||||
headers: combineHeaders(this.config.headers(), options.headers),
|
||||
body: args,
|
||||
failedResponseHandler: this.failedResponseHandler,
|
||||
successfulResponseHandler: createJsonResponseHandler(
|
||||
OpenAICompatibleChatResponseSchema,
|
||||
),
|
||||
abortSignal: options.abortSignal,
|
||||
fetch: this.config.fetch,
|
||||
});
|
||||
|
||||
const choice = responseBody.choices[0];
|
||||
const content: Array<LanguageModelV2Content> = [];
|
||||
|
||||
// text content:
|
||||
const text = choice.message.content;
|
||||
if (text != null && text.length > 0) {
|
||||
content.push({ type: 'text', text });
|
||||
}
|
||||
|
||||
// reasoning content (Copilot uses reasoning_text):
|
||||
const reasoning = choice.message.reasoning_text;
|
||||
if (reasoning != null && reasoning.length > 0) {
|
||||
content.push({
|
||||
type: 'reasoning',
|
||||
text: reasoning,
|
||||
// Include reasoning_opaque for Copilot multi-turn reasoning
|
||||
providerMetadata: choice.message.reasoning_opaque
|
||||
? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
|
||||
: undefined,
|
||||
});
|
||||
}
|
||||
|
||||
// tool calls:
|
||||
if (choice.message.tool_calls != null) {
|
||||
for (const toolCall of choice.message.tool_calls) {
|
||||
content.push({
|
||||
type: 'tool-call',
|
||||
toolCallId: toolCall.id ?? generateId(),
|
||||
toolName: toolCall.function.name,
|
||||
input: toolCall.function.arguments!,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// provider metadata:
|
||||
const providerMetadata: SharedV2ProviderMetadata = {
|
||||
[this.providerOptionsName]: {},
|
||||
...(await this.config.metadataExtractor?.extractMetadata?.({
|
||||
parsedBody: rawResponse,
|
||||
})),
|
||||
};
|
||||
const completionTokenDetails =
|
||||
responseBody.usage?.completion_tokens_details;
|
||||
if (completionTokenDetails?.accepted_prediction_tokens != null) {
|
||||
providerMetadata[this.providerOptionsName].acceptedPredictionTokens =
|
||||
completionTokenDetails?.accepted_prediction_tokens;
|
||||
}
|
||||
if (completionTokenDetails?.rejected_prediction_tokens != null) {
|
||||
providerMetadata[this.providerOptionsName].rejectedPredictionTokens =
|
||||
completionTokenDetails?.rejected_prediction_tokens;
|
||||
}
|
||||
|
||||
return {
|
||||
content,
|
||||
finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
|
||||
usage: {
|
||||
inputTokens: responseBody.usage?.prompt_tokens ?? undefined,
|
||||
outputTokens: responseBody.usage?.completion_tokens ?? undefined,
|
||||
totalTokens: responseBody.usage?.total_tokens ?? undefined,
|
||||
reasoningTokens:
|
||||
responseBody.usage?.completion_tokens_details?.reasoning_tokens ??
|
||||
undefined,
|
||||
cachedInputTokens:
|
||||
responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
|
||||
},
|
||||
providerMetadata,
|
||||
request: { body },
|
||||
response: {
|
||||
...getResponseMetadata(responseBody),
|
||||
headers: responseHeaders,
|
||||
body: rawResponse,
|
||||
},
|
||||
warnings,
|
||||
};
|
||||
}
|
||||
|
||||
async doStream(
|
||||
options: Parameters<LanguageModelV2['doStream']>[0],
|
||||
): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>> {
|
||||
const { args, warnings } = await this.getArgs({ ...options });
|
||||
|
||||
const body = {
|
||||
...args,
|
||||
stream: true,
|
||||
|
||||
// only include stream_options when in strict compatibility mode:
|
||||
stream_options: this.config.includeUsage
|
||||
? { include_usage: true }
|
||||
: undefined,
|
||||
};
|
||||
|
||||
const metadataExtractor =
|
||||
this.config.metadataExtractor?.createStreamExtractor();
|
||||
|
||||
const { responseHeaders, value: response } = await postJsonToApi({
|
||||
url: this.config.url({
|
||||
path: '/chat/completions',
|
||||
modelId: this.modelId,
|
||||
}),
|
||||
headers: combineHeaders(this.config.headers(), options.headers),
|
||||
body,
|
||||
failedResponseHandler: this.failedResponseHandler,
|
||||
successfulResponseHandler: createEventSourceResponseHandler(
|
||||
this.chunkSchema,
|
||||
),
|
||||
abortSignal: options.abortSignal,
|
||||
fetch: this.config.fetch,
|
||||
});
|
||||
|
||||
const toolCalls: Array<{
|
||||
id: string;
|
||||
type: 'function';
|
||||
function: {
|
||||
name: string;
|
||||
arguments: string;
|
||||
};
|
||||
hasFinished: boolean;
|
||||
}> = [];
|
||||
|
||||
let finishReason: LanguageModelV2FinishReason = 'unknown';
|
||||
const usage: {
|
||||
completionTokens: number | undefined;
|
||||
completionTokensDetails: {
|
||||
reasoningTokens: number | undefined;
|
||||
acceptedPredictionTokens: number | undefined;
|
||||
rejectedPredictionTokens: number | undefined;
|
||||
};
|
||||
promptTokens: number | undefined;
|
||||
promptTokensDetails: {
|
||||
cachedTokens: number | undefined;
|
||||
};
|
||||
totalTokens: number | undefined;
|
||||
} = {
|
||||
completionTokens: undefined,
|
||||
completionTokensDetails: {
|
||||
reasoningTokens: undefined,
|
||||
acceptedPredictionTokens: undefined,
|
||||
rejectedPredictionTokens: undefined,
|
||||
},
|
||||
promptTokens: undefined,
|
||||
promptTokensDetails: {
|
||||
cachedTokens: undefined,
|
||||
},
|
||||
totalTokens: undefined,
|
||||
};
|
||||
let isFirstChunk = true;
|
||||
const providerOptionsName = this.providerOptionsName;
|
||||
let isActiveReasoning = false;
|
||||
let isActiveText = false;
|
||||
let reasoningOpaque: string | undefined;
|
||||
|
||||
return {
|
||||
stream: response.pipeThrough(
|
||||
new TransformStream<
|
||||
ParseResult<z.infer<typeof this.chunkSchema>>,
|
||||
LanguageModelV2StreamPart
|
||||
>({
|
||||
start(controller) {
|
||||
controller.enqueue({ type: 'stream-start', warnings });
|
||||
},
|
||||
|
||||
// TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
|
||||
transform(chunk, controller) {
|
||||
// Emit raw chunk if requested (before anything else)
|
||||
if (options.includeRawChunks) {
|
||||
controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });
|
||||
}
|
||||
|
||||
// handle failed chunk parsing / validation:
|
||||
if (!chunk.success) {
|
||||
finishReason = 'error';
|
||||
controller.enqueue({ type: 'error', error: chunk.error });
|
||||
return;
|
||||
}
|
||||
const value = chunk.value;
|
||||
|
||||
metadataExtractor?.processChunk(chunk.rawValue);
|
||||
|
||||
// handle error chunks:
|
||||
if ('error' in value) {
|
||||
finishReason = 'error';
|
||||
controller.enqueue({ type: 'error', error: value.error.message });
|
||||
return;
|
||||
}
|
||||
|
||||
if (isFirstChunk) {
|
||||
isFirstChunk = false;
|
||||
|
||||
controller.enqueue({
|
||||
type: 'response-metadata',
|
||||
...getResponseMetadata(value),
|
||||
});
|
||||
}
|
||||
|
||||
if (value.usage != null) {
|
||||
const {
|
||||
prompt_tokens,
|
||||
completion_tokens,
|
||||
total_tokens,
|
||||
prompt_tokens_details,
|
||||
completion_tokens_details,
|
||||
} = value.usage;
|
||||
|
||||
usage.promptTokens = prompt_tokens ?? undefined;
|
||||
usage.completionTokens = completion_tokens ?? undefined;
|
||||
usage.totalTokens = total_tokens ?? undefined;
|
||||
if (completion_tokens_details?.reasoning_tokens != null) {
|
||||
usage.completionTokensDetails.reasoningTokens =
|
||||
completion_tokens_details?.reasoning_tokens;
|
||||
}
|
||||
if (
|
||||
completion_tokens_details?.accepted_prediction_tokens != null
|
||||
) {
|
||||
usage.completionTokensDetails.acceptedPredictionTokens =
|
||||
completion_tokens_details?.accepted_prediction_tokens;
|
||||
}
|
||||
if (
|
||||
completion_tokens_details?.rejected_prediction_tokens != null
|
||||
) {
|
||||
usage.completionTokensDetails.rejectedPredictionTokens =
|
||||
completion_tokens_details?.rejected_prediction_tokens;
|
||||
}
|
||||
if (prompt_tokens_details?.cached_tokens != null) {
|
||||
usage.promptTokensDetails.cachedTokens =
|
||||
prompt_tokens_details?.cached_tokens;
|
||||
}
|
||||
}
|
||||
|
||||
const choice = value.choices[0];
|
||||
|
||||
if (choice?.finish_reason != null) {
|
||||
finishReason = mapOpenAICompatibleFinishReason(
|
||||
choice.finish_reason,
|
||||
);
|
||||
}
|
||||
|
||||
if (choice?.delta == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
const delta = choice.delta;
|
||||
|
||||
// Capture reasoning_opaque for Copilot multi-turn reasoning
|
||||
if (delta.reasoning_opaque) {
|
||||
if (reasoningOpaque != null) {
|
||||
throw new InvalidResponseDataError({
|
||||
data: delta,
|
||||
message:
|
||||
'Multiple reasoning_opaque values received in a single response. Only one thinking part per response is supported.',
|
||||
});
|
||||
}
|
||||
reasoningOpaque = delta.reasoning_opaque;
|
||||
}
|
||||
|
||||
// enqueue reasoning before text deltas (Copilot uses reasoning_text):
|
||||
const reasoningContent = delta.reasoning_text;
|
||||
if (reasoningContent) {
|
||||
if (!isActiveReasoning) {
|
||||
controller.enqueue({
|
||||
type: 'reasoning-start',
|
||||
id: 'reasoning-0',
|
||||
});
|
||||
isActiveReasoning = true;
|
||||
}
|
||||
|
||||
controller.enqueue({
|
||||
type: 'reasoning-delta',
|
||||
id: 'reasoning-0',
|
||||
delta: reasoningContent,
|
||||
});
|
||||
}
|
||||
|
||||
if (delta.content) {
|
||||
// If reasoning was active and we're starting text, end reasoning first
|
||||
// This handles the case where reasoning_opaque and content come in the same chunk
|
||||
if (isActiveReasoning && !isActiveText) {
|
||||
controller.enqueue({
|
||||
type: 'reasoning-end',
|
||||
id: 'reasoning-0',
|
||||
providerMetadata: reasoningOpaque
|
||||
? { copilot: { reasoningOpaque } }
|
||||
: undefined,
|
||||
});
|
||||
isActiveReasoning = false;
|
||||
}
|
||||
|
||||
if (!isActiveText) {
|
||||
controller.enqueue({ type: 'text-start', id: 'txt-0' });
|
||||
isActiveText = true;
|
||||
}
|
||||
|
||||
controller.enqueue({
|
||||
type: 'text-delta',
|
||||
id: 'txt-0',
|
||||
delta: delta.content,
|
||||
});
|
||||
}
|
||||
|
||||
if (delta.tool_calls != null) {
|
||||
// If reasoning was active and we're starting tool calls, end reasoning first
|
||||
// This handles the case where reasoning goes directly to tool calls with no content
|
||||
if (isActiveReasoning) {
|
||||
controller.enqueue({
|
||||
type: 'reasoning-end',
|
||||
id: 'reasoning-0',
|
||||
providerMetadata: reasoningOpaque
|
||||
? { copilot: { reasoningOpaque } }
|
||||
: undefined,
|
||||
});
|
||||
isActiveReasoning = false;
|
||||
}
|
||||
for (const toolCallDelta of delta.tool_calls) {
|
||||
const index = toolCallDelta.index;
|
||||
|
||||
if (toolCalls[index] == null) {
|
||||
if (toolCallDelta.id == null) {
|
||||
throw new InvalidResponseDataError({
|
||||
data: toolCallDelta,
|
||||
message: `Expected 'id' to be a string.`,
|
||||
});
|
||||
}
|
||||
|
||||
if (toolCallDelta.function?.name == null) {
|
||||
throw new InvalidResponseDataError({
|
||||
data: toolCallDelta,
|
||||
message: `Expected 'function.name' to be a string.`,
|
||||
});
|
||||
}
|
||||
|
||||
controller.enqueue({
|
||||
type: 'tool-input-start',
|
||||
id: toolCallDelta.id,
|
||||
toolName: toolCallDelta.function.name,
|
||||
});
|
||||
|
||||
toolCalls[index] = {
|
||||
id: toolCallDelta.id,
|
||||
type: 'function',
|
||||
function: {
|
||||
name: toolCallDelta.function.name,
|
||||
arguments: toolCallDelta.function.arguments ?? '',
|
||||
},
|
||||
hasFinished: false,
|
||||
};
|
||||
|
||||
const toolCall = toolCalls[index];
|
||||
|
||||
if (
|
||||
toolCall.function?.name != null &&
|
||||
toolCall.function?.arguments != null
|
||||
) {
|
||||
// send delta if the argument text has already started:
|
||||
if (toolCall.function.arguments.length > 0) {
|
||||
controller.enqueue({
|
||||
type: 'tool-input-delta',
|
||||
id: toolCall.id,
|
||||
delta: toolCall.function.arguments,
|
||||
});
|
||||
}
|
||||
|
||||
// check if tool call is complete
|
||||
// (some providers send the full tool call in one chunk):
|
||||
if (isParsableJson(toolCall.function.arguments)) {
|
||||
controller.enqueue({
|
||||
type: 'tool-input-end',
|
||||
id: toolCall.id,
|
||||
});
|
||||
|
||||
controller.enqueue({
|
||||
type: 'tool-call',
|
||||
toolCallId: toolCall.id ?? generateId(),
|
||||
toolName: toolCall.function.name,
|
||||
input: toolCall.function.arguments,
|
||||
});
|
||||
toolCall.hasFinished = true;
|
||||
}
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// existing tool call, merge if not finished
|
||||
const toolCall = toolCalls[index];
|
||||
|
||||
if (toolCall.hasFinished) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (toolCallDelta.function?.arguments != null) {
|
||||
toolCall.function!.arguments +=
|
||||
toolCallDelta.function?.arguments ?? '';
|
||||
}
|
||||
|
||||
// send delta
|
||||
controller.enqueue({
|
||||
type: 'tool-input-delta',
|
||||
id: toolCall.id,
|
||||
delta: toolCallDelta.function.arguments ?? '',
|
||||
});
|
||||
|
||||
// check if tool call is complete
|
||||
if (
|
||||
toolCall.function?.name != null &&
|
||||
toolCall.function?.arguments != null &&
|
||||
isParsableJson(toolCall.function.arguments)
|
||||
) {
|
||||
controller.enqueue({
|
||||
type: 'tool-input-end',
|
||||
id: toolCall.id,
|
||||
});
|
||||
|
||||
controller.enqueue({
|
||||
type: 'tool-call',
|
||||
toolCallId: toolCall.id ?? generateId(),
|
||||
toolName: toolCall.function.name,
|
||||
input: toolCall.function.arguments,
|
||||
});
|
||||
toolCall.hasFinished = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
flush(controller) {
|
||||
if (isActiveReasoning) {
|
||||
controller.enqueue({
|
||||
type: 'reasoning-end',
|
||||
id: 'reasoning-0',
|
||||
// Include reasoning_opaque for Copilot multi-turn reasoning
|
||||
providerMetadata: reasoningOpaque
|
||||
? { copilot: { reasoningOpaque } }
|
||||
: undefined,
|
||||
});
|
||||
}
|
||||
|
||||
if (isActiveText) {
|
||||
controller.enqueue({ type: 'text-end', id: 'txt-0' });
|
||||
}
|
||||
|
||||
// go through all tool calls and send the ones that are not finished
|
||||
for (const toolCall of toolCalls.filter(
|
||||
toolCall => !toolCall.hasFinished,
|
||||
)) {
|
||||
controller.enqueue({
|
||||
type: 'tool-input-end',
|
||||
id: toolCall.id,
|
||||
});
|
||||
|
||||
controller.enqueue({
|
||||
type: 'tool-call',
|
||||
toolCallId: toolCall.id ?? generateId(),
|
||||
toolName: toolCall.function.name,
|
||||
input: toolCall.function.arguments,
|
||||
});
|
||||
}
|
||||
|
||||
const providerMetadata: SharedV2ProviderMetadata = {
|
||||
[providerOptionsName]: {},
|
||||
// Include reasoning_opaque for Copilot multi-turn reasoning
|
||||
...(reasoningOpaque
|
||||
? { copilot: { reasoningOpaque } }
|
||||
: {}),
|
||||
...metadataExtractor?.buildMetadata(),
|
||||
};
|
||||
if (
|
||||
usage.completionTokensDetails.acceptedPredictionTokens != null
|
||||
) {
|
||||
providerMetadata[providerOptionsName].acceptedPredictionTokens =
|
||||
usage.completionTokensDetails.acceptedPredictionTokens;
|
||||
}
|
||||
if (
|
||||
usage.completionTokensDetails.rejectedPredictionTokens != null
|
||||
) {
|
||||
providerMetadata[providerOptionsName].rejectedPredictionTokens =
|
||||
usage.completionTokensDetails.rejectedPredictionTokens;
|
||||
}
|
||||
|
||||
controller.enqueue({
|
||||
type: 'finish',
|
||||
finishReason,
|
||||
usage: {
|
||||
inputTokens: usage.promptTokens ?? undefined,
|
||||
outputTokens: usage.completionTokens ?? undefined,
|
||||
totalTokens: usage.totalTokens ?? undefined,
|
||||
reasoningTokens:
|
||||
usage.completionTokensDetails.reasoningTokens ?? undefined,
|
||||
cachedInputTokens:
|
||||
usage.promptTokensDetails.cachedTokens ?? undefined,
|
||||
},
|
||||
providerMetadata,
|
||||
});
|
||||
},
|
||||
}),
|
||||
),
|
||||
request: { body },
|
||||
response: { headers: responseHeaders },
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const openaiCompatibleTokenUsageSchema = z
|
||||
.object({
|
||||
prompt_tokens: z.number().nullish(),
|
||||
completion_tokens: z.number().nullish(),
|
||||
total_tokens: z.number().nullish(),
|
||||
prompt_tokens_details: z
|
||||
.object({
|
||||
cached_tokens: z.number().nullish(),
|
||||
})
|
||||
.nullish(),
|
||||
completion_tokens_details: z
|
||||
.object({
|
||||
reasoning_tokens: z.number().nullish(),
|
||||
accepted_prediction_tokens: z.number().nullish(),
|
||||
rejected_prediction_tokens: z.number().nullish(),
|
||||
})
|
||||
.nullish(),
|
||||
})
|
||||
.nullish();
|
||||
|
||||
// limited version of the schema, focussed on what is needed for the implementation
|
||||
// this approach limits breakages when the API changes and increases efficiency
|
||||
const OpenAICompatibleChatResponseSchema = z.object({
|
||||
id: z.string().nullish(),
|
||||
created: z.number().nullish(),
|
||||
model: z.string().nullish(),
|
||||
choices: z.array(
|
||||
z.object({
|
||||
message: z.object({
|
||||
role: z.literal('assistant').nullish(),
|
||||
content: z.string().nullish(),
|
||||
// Copilot-specific reasoning fields
|
||||
reasoning_text: z.string().nullish(),
|
||||
reasoning_opaque: z.string().nullish(),
|
||||
tool_calls: z
|
||||
.array(
|
||||
z.object({
|
||||
id: z.string().nullish(),
|
||||
function: z.object({
|
||||
name: z.string(),
|
||||
arguments: z.string(),
|
||||
}),
|
||||
}),
|
||||
)
|
||||
.nullish(),
|
||||
}),
|
||||
finish_reason: z.string().nullish(),
|
||||
}),
|
||||
),
|
||||
usage: openaiCompatibleTokenUsageSchema,
|
||||
});
|
||||
|
||||
// limited version of the schema, focussed on what is needed for the implementation
|
||||
// this approach limits breakages when the API changes and increases efficiency
|
||||
const createOpenAICompatibleChatChunkSchema = <
|
||||
ERROR_SCHEMA extends z.core.$ZodType,
|
||||
>(
|
||||
errorSchema: ERROR_SCHEMA,
|
||||
) =>
|
||||
z.union([
|
||||
z.object({
|
||||
id: z.string().nullish(),
|
||||
created: z.number().nullish(),
|
||||
model: z.string().nullish(),
|
||||
choices: z.array(
|
||||
z.object({
|
||||
delta: z
|
||||
.object({
|
||||
role: z.enum(['assistant']).nullish(),
|
||||
content: z.string().nullish(),
|
||||
// Copilot-specific reasoning fields
|
||||
reasoning_text: z.string().nullish(),
|
||||
reasoning_opaque: z.string().nullish(),
|
||||
tool_calls: z
|
||||
.array(
|
||||
z.object({
|
||||
index: z.number(),
|
||||
id: z.string().nullish(),
|
||||
function: z.object({
|
||||
name: z.string().nullish(),
|
||||
arguments: z.string().nullish(),
|
||||
}),
|
||||
}),
|
||||
)
|
||||
.nullish(),
|
||||
})
|
||||
.nullish(),
|
||||
finish_reason: z.string().nullish(),
|
||||
}),
|
||||
),
|
||||
usage: openaiCompatibleTokenUsageSchema,
|
||||
}),
|
||||
errorSchema,
|
||||
]);
|
||||
@@ -0,0 +1,30 @@
|
||||
import { z } from 'zod/v4';
|
||||
|
||||
export type OpenAICompatibleChatModelId = string;
|
||||
|
||||
export const openaiCompatibleProviderOptions = z.object({
|
||||
/**
|
||||
* A unique identifier representing your end-user, which can help the provider to
|
||||
* monitor and detect abuse.
|
||||
*/
|
||||
user: z.string().optional(),
|
||||
|
||||
/**
|
||||
* Reasoning effort for reasoning models. Defaults to `medium`.
|
||||
*/
|
||||
reasoningEffort: z.string().optional(),
|
||||
|
||||
/**
|
||||
* Controls the verbosity of the generated text. Defaults to `medium`.
|
||||
*/
|
||||
textVerbosity: z.string().optional(),
|
||||
|
||||
/**
|
||||
* Copilot thinking_budget used for Anthropic models.
|
||||
*/
|
||||
thinking_budget: z.number().optional(),
|
||||
});
|
||||
|
||||
export type OpenAICompatibleProviderOptions = z.infer<
|
||||
typeof openaiCompatibleProviderOptions
|
||||
>;
|
||||
@@ -0,0 +1,48 @@
|
||||
import type { SharedV2ProviderMetadata } from '@ai-sdk/provider';
|
||||
|
||||
/**
|
||||
Extracts provider-specific metadata from API responses.
|
||||
Used to standardize metadata handling across different LLM providers while allowing
|
||||
provider-specific metadata to be captured.
|
||||
*/
|
||||
export type MetadataExtractor = {
|
||||
/**
|
||||
* Extracts provider metadata from a complete, non-streaming response.
|
||||
*
|
||||
* @param parsedBody - The parsed response JSON body from the provider's API.
|
||||
*
|
||||
* @returns Provider-specific metadata or undefined if no metadata is available.
|
||||
* The metadata should be under a key indicating the provider id.
|
||||
*/
|
||||
extractMetadata: ({
|
||||
parsedBody,
|
||||
}: {
|
||||
parsedBody: unknown;
|
||||
}) => Promise<SharedV2ProviderMetadata | undefined>;
|
||||
|
||||
/**
|
||||
* Creates an extractor for handling streaming responses. The returned object provides
|
||||
* methods to process individual chunks and build the final metadata from the accumulated
|
||||
* stream data.
|
||||
*
|
||||
* @returns An object with methods to process chunks and build metadata from a stream
|
||||
*/
|
||||
createStreamExtractor: () => {
|
||||
/**
|
||||
* Process an individual chunk from the stream. Called for each chunk in the response stream
|
||||
* to accumulate metadata throughout the streaming process.
|
||||
*
|
||||
* @param parsedChunk - The parsed JSON response chunk from the provider's API
|
||||
*/
|
||||
processChunk(parsedChunk: unknown): void;
|
||||
|
||||
/**
|
||||
* Builds the metadata object after all chunks have been processed.
|
||||
* Called at the end of the stream to generate the complete provider metadata.
|
||||
*
|
||||
* @returns Provider-specific metadata or undefined if no metadata is available.
|
||||
* The metadata should be under a key indicating the provider id.
|
||||
*/
|
||||
buildMetadata(): SharedV2ProviderMetadata | undefined;
|
||||
};
|
||||
};
|
||||
@@ -0,0 +1,92 @@
|
||||
import {
|
||||
type LanguageModelV2CallOptions,
|
||||
type LanguageModelV2CallWarning,
|
||||
UnsupportedFunctionalityError,
|
||||
} from '@ai-sdk/provider';
|
||||
|
||||
export function prepareTools({
|
||||
tools,
|
||||
toolChoice,
|
||||
}: {
|
||||
tools: LanguageModelV2CallOptions['tools'];
|
||||
toolChoice?: LanguageModelV2CallOptions['toolChoice'];
|
||||
}): {
|
||||
tools:
|
||||
| undefined
|
||||
| Array<{
|
||||
type: 'function';
|
||||
function: {
|
||||
name: string;
|
||||
description: string | undefined;
|
||||
parameters: unknown;
|
||||
};
|
||||
}>;
|
||||
toolChoice:
|
||||
| { type: 'function'; function: { name: string } }
|
||||
| 'auto'
|
||||
| 'none'
|
||||
| 'required'
|
||||
| undefined;
|
||||
toolWarnings: LanguageModelV2CallWarning[];
|
||||
} {
|
||||
// when the tools array is empty, change it to undefined to prevent errors:
|
||||
tools = tools?.length ? tools : undefined;
|
||||
|
||||
const toolWarnings: LanguageModelV2CallWarning[] = [];
|
||||
|
||||
if (tools == null) {
|
||||
return { tools: undefined, toolChoice: undefined, toolWarnings };
|
||||
}
|
||||
|
||||
const openaiCompatTools: Array<{
|
||||
type: 'function';
|
||||
function: {
|
||||
name: string;
|
||||
description: string | undefined;
|
||||
parameters: unknown;
|
||||
};
|
||||
}> = [];
|
||||
|
||||
for (const tool of tools) {
|
||||
if (tool.type === 'provider-defined') {
|
||||
toolWarnings.push({ type: 'unsupported-tool', tool });
|
||||
} else {
|
||||
openaiCompatTools.push({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: tool.inputSchema,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (toolChoice == null) {
|
||||
return { tools: openaiCompatTools, toolChoice: undefined, toolWarnings };
|
||||
}
|
||||
|
||||
const type = toolChoice.type;
|
||||
|
||||
switch (type) {
|
||||
case 'auto':
|
||||
case 'none':
|
||||
case 'required':
|
||||
return { tools: openaiCompatTools, toolChoice: type, toolWarnings };
|
||||
case 'tool':
|
||||
return {
|
||||
tools: openaiCompatTools,
|
||||
toolChoice: {
|
||||
type: 'function',
|
||||
function: { name: toolChoice.toolName },
|
||||
},
|
||||
toolWarnings,
|
||||
};
|
||||
default: {
|
||||
const _exhaustiveCheck: never = type;
|
||||
throw new UnsupportedFunctionalityError({
|
||||
functionality: `tool choice type: ${_exhaustiveCheck}`,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { LanguageModelV2 } from "@ai-sdk/provider"
|
||||
import { OpenAICompatibleChatLanguageModel } from "@ai-sdk/openai-compatible"
|
||||
import { type FetchFunction, withoutTrailingSlash, withUserAgentSuffix } from "@ai-sdk/provider-utils"
|
||||
import { OpenAICompatibleChatLanguageModel } from "./chat/openai-compatible-chat-language-model"
|
||||
import { OpenAIResponsesLanguageModel } from "./responses/openai-responses-language-model"
|
||||
|
||||
// Import the version or define it
|
||||
2
packages/opencode/src/provider/sdk/copilot/index.ts
Normal file
2
packages/opencode/src/provider/sdk/copilot/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export { createOpenaiCompatible, openaiCompatible } from "./copilot-provider"
|
||||
export type { OpenaiCompatibleProvider, OpenaiCompatibleProviderSettings } from "./copilot-provider"
|
||||
@@ -0,0 +1,30 @@
|
||||
import { z, type ZodType } from 'zod/v4';
|
||||
|
||||
export const openaiCompatibleErrorDataSchema = z.object({
|
||||
error: z.object({
|
||||
message: z.string(),
|
||||
|
||||
// The additional information below is handled loosely to support
|
||||
// OpenAI-compatible providers that have slightly different error
|
||||
// responses:
|
||||
type: z.string().nullish(),
|
||||
param: z.any().nullish(),
|
||||
code: z.union([z.string(), z.number()]).nullish(),
|
||||
}),
|
||||
});
|
||||
|
||||
export type OpenAICompatibleErrorData = z.infer<
|
||||
typeof openaiCompatibleErrorDataSchema
|
||||
>;
|
||||
|
||||
export type ProviderErrorStructure<T> = {
|
||||
errorSchema: ZodType<T>;
|
||||
errorToMessage: (error: T) => string;
|
||||
isRetryable?: (response: Response, error?: T) => boolean;
|
||||
};
|
||||
|
||||
export const defaultOpenAICompatibleErrorStructure: ProviderErrorStructure<OpenAICompatibleErrorData> =
|
||||
{
|
||||
errorSchema: openaiCompatibleErrorDataSchema,
|
||||
errorToMessage: data => data.error.message,
|
||||
};
|
||||
@@ -183,7 +183,7 @@ export async function convertToOpenAIResponsesInput({
|
||||
|
||||
case "reasoning": {
|
||||
const providerOptions = await parseProviderOptions({
|
||||
provider: "openai",
|
||||
provider: "copilot",
|
||||
providerOptions: part.providerOptions,
|
||||
schema: openaiResponsesReasoningProviderOptionsSchema,
|
||||
})
|
||||
@@ -194,7 +194,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
}
|
||||
|
||||
const openaiOptions = await parseProviderOptions({
|
||||
provider: "openai",
|
||||
provider: "copilot",
|
||||
providerOptions,
|
||||
schema: openaiResponsesProviderOptionsSchema,
|
||||
})
|
||||
@@ -1,2 +0,0 @@
|
||||
export { createOpenaiCompatible, openaiCompatible } from "./openai-compatible-provider"
|
||||
export type { OpenaiCompatibleProvider, OpenaiCompatibleProviderSettings } from "./openai-compatible-provider"
|
||||
@@ -20,6 +20,7 @@ export namespace ProviderTransform {
|
||||
function sdkKey(npm: string): string | undefined {
|
||||
switch (npm) {
|
||||
case "@ai-sdk/github-copilot":
|
||||
return "copilot"
|
||||
case "@ai-sdk/openai":
|
||||
case "@ai-sdk/azure":
|
||||
return "openai"
|
||||
@@ -179,6 +180,9 @@ export namespace ProviderTransform {
|
||||
openaiCompatible: {
|
||||
cache_control: { type: "ephemeral" },
|
||||
},
|
||||
copilot: {
|
||||
copilot_cache_control: { type: "ephemeral" },
|
||||
},
|
||||
}
|
||||
|
||||
for (const msg of unique([...system, ...final])) {
|
||||
@@ -353,6 +357,15 @@ export namespace ProviderTransform {
|
||||
return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
|
||||
|
||||
case "@ai-sdk/github-copilot":
|
||||
if (model.id.includes("gemini")) {
|
||||
// currently github copilot only returns thinking
|
||||
return {}
|
||||
}
|
||||
if (model.id.includes("claude")) {
|
||||
return {
|
||||
thinking: { thinking_budget: 4000 },
|
||||
}
|
||||
}
|
||||
const copilotEfforts = iife(() => {
|
||||
if (id.includes("5.1-codex-max") || id.includes("5.2")) return [...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
|
||||
return WIDELY_SUPPORTED_EFFORTS
|
||||
|
||||
@@ -148,14 +148,15 @@ export namespace LLM {
|
||||
},
|
||||
)
|
||||
|
||||
const maxOutputTokens = isCodex
|
||||
? undefined
|
||||
: ProviderTransform.maxOutputTokens(
|
||||
input.model.api.npm,
|
||||
params.options,
|
||||
input.model.limit.output,
|
||||
OUTPUT_TOKEN_MAX,
|
||||
)
|
||||
const maxOutputTokens =
|
||||
isCodex || provider.id.includes("github-copilot")
|
||||
? undefined
|
||||
: ProviderTransform.maxOutputTokens(
|
||||
input.model.api.npm,
|
||||
params.options,
|
||||
input.model.limit.output,
|
||||
OUTPUT_TOKEN_MAX,
|
||||
)
|
||||
|
||||
const tools = await resolveTools(input)
|
||||
|
||||
|
||||
@@ -0,0 +1,478 @@
|
||||
import { convertToOpenAICompatibleChatMessages as convertToCopilotMessages } from "@/provider/sdk/copilot/chat/convert-to-openai-compatible-chat-messages"
|
||||
import { describe, test, expect } from "bun:test"
|
||||
|
||||
describe("user messages", () => {
|
||||
test("should convert messages with only a text part to a string content", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "Hello" }],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([{ role: "user", content: "Hello" }])
|
||||
})
|
||||
|
||||
test("should convert messages with image parts", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Hello" },
|
||||
{
|
||||
type: "file",
|
||||
data: Buffer.from([0, 1, 2, 3]).toString("base64"),
|
||||
mediaType: "image/png",
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Hello" },
|
||||
{
|
||||
type: "image_url",
|
||||
image_url: { url: "data:image/png;base64,AAECAw==" },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should convert messages with image parts from Uint8Array", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Hi" },
|
||||
{
|
||||
type: "file",
|
||||
data: new Uint8Array([0, 1, 2, 3]),
|
||||
mediaType: "image/png",
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Hi" },
|
||||
{
|
||||
type: "image_url",
|
||||
image_url: { url: "data:image/png;base64,AAECAw==" },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should handle URL-based images", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "file",
|
||||
data: new URL("https://example.com/image.jpg"),
|
||||
mediaType: "image/*",
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "image_url",
|
||||
image_url: { url: "https://example.com/image.jpg" },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should handle multiple text parts without flattening", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Part 1" },
|
||||
{ type: "text", text: "Part 2" },
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Part 1" },
|
||||
{ type: "text", text: "Part 2" },
|
||||
],
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe("assistant messages", () => {
|
||||
test("should convert assistant text messages", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "Hello back!" }],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Hello back!",
|
||||
tool_calls: undefined,
|
||||
reasoning_text: undefined,
|
||||
reasoning_opaque: undefined,
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should handle assistant message with null content when only tool calls", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "tool-call",
|
||||
toolCallId: "call1",
|
||||
toolName: "calculator",
|
||||
input: { a: 1, b: 2 },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: null,
|
||||
tool_calls: [
|
||||
{
|
||||
id: "call1",
|
||||
type: "function",
|
||||
function: {
|
||||
name: "calculator",
|
||||
arguments: JSON.stringify({ a: 1, b: 2 }),
|
||||
},
|
||||
},
|
||||
],
|
||||
reasoning_text: undefined,
|
||||
reasoning_opaque: undefined,
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should concatenate multiple text parts", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: "First part. " },
|
||||
{ type: "text", text: "Second part." },
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result[0].content).toBe("First part. Second part.")
|
||||
})
|
||||
})
|
||||
|
||||
describe("tool calls", () => {
|
||||
test("should stringify arguments to tool calls", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "tool-call",
|
||||
input: { foo: "bar123" },
|
||||
toolCallId: "quux",
|
||||
toolName: "thwomp",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "tool",
|
||||
content: [
|
||||
{
|
||||
type: "tool-result",
|
||||
toolCallId: "quux",
|
||||
toolName: "thwomp",
|
||||
output: { type: "json", value: { oof: "321rab" } },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: null,
|
||||
tool_calls: [
|
||||
{
|
||||
id: "quux",
|
||||
type: "function",
|
||||
function: {
|
||||
name: "thwomp",
|
||||
arguments: JSON.stringify({ foo: "bar123" }),
|
||||
},
|
||||
},
|
||||
],
|
||||
reasoning_text: undefined,
|
||||
reasoning_opaque: undefined,
|
||||
},
|
||||
{
|
||||
role: "tool",
|
||||
tool_call_id: "quux",
|
||||
content: JSON.stringify({ oof: "321rab" }),
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should handle text output type in tool results", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "tool",
|
||||
content: [
|
||||
{
|
||||
type: "tool-result",
|
||||
toolCallId: "call-1",
|
||||
toolName: "getWeather",
|
||||
output: { type: "text", value: "It is sunny today" },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "tool",
|
||||
tool_call_id: "call-1",
|
||||
content: "It is sunny today",
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should handle multiple tool results as separate messages", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "tool",
|
||||
content: [
|
||||
{
|
||||
type: "tool-result",
|
||||
toolCallId: "call1",
|
||||
toolName: "api1",
|
||||
output: { type: "text", value: "Result 1" },
|
||||
},
|
||||
{
|
||||
type: "tool-result",
|
||||
toolCallId: "call2",
|
||||
toolName: "api2",
|
||||
output: { type: "text", value: "Result 2" },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toHaveLength(2)
|
||||
expect(result[0]).toEqual({
|
||||
role: "tool",
|
||||
tool_call_id: "call1",
|
||||
content: "Result 1",
|
||||
})
|
||||
expect(result[1]).toEqual({
|
||||
role: "tool",
|
||||
tool_call_id: "call2",
|
||||
content: "Result 2",
|
||||
})
|
||||
})
|
||||
|
||||
test("should handle text plus multiple tool calls", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: "Checking... " },
|
||||
{
|
||||
type: "tool-call",
|
||||
toolCallId: "call1",
|
||||
toolName: "searchTool",
|
||||
input: { query: "Weather" },
|
||||
},
|
||||
{ type: "text", text: "Almost there..." },
|
||||
{
|
||||
type: "tool-call",
|
||||
toolCallId: "call2",
|
||||
toolName: "mapsTool",
|
||||
input: { location: "Paris" },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Checking... Almost there...",
|
||||
tool_calls: [
|
||||
{
|
||||
id: "call1",
|
||||
type: "function",
|
||||
function: {
|
||||
name: "searchTool",
|
||||
arguments: JSON.stringify({ query: "Weather" }),
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "call2",
|
||||
type: "function",
|
||||
function: {
|
||||
name: "mapsTool",
|
||||
arguments: JSON.stringify({ location: "Paris" }),
|
||||
},
|
||||
},
|
||||
],
|
||||
reasoning_text: undefined,
|
||||
reasoning_opaque: undefined,
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe("reasoning (copilot-specific)", () => {
|
||||
test("should include reasoning_text from reasoning part", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "reasoning", text: "Let me think about this..." },
|
||||
{ type: "text", text: "The answer is 42." },
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: "The answer is 42.",
|
||||
tool_calls: undefined,
|
||||
reasoning_text: "Let me think about this...",
|
||||
reasoning_opaque: undefined,
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should include reasoning_opaque from providerOptions", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "reasoning",
|
||||
text: "Thinking...",
|
||||
providerOptions: {
|
||||
copilot: { reasoningOpaque: "opaque-signature-123" },
|
||||
},
|
||||
},
|
||||
{ type: "text", text: "Done!" },
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Done!",
|
||||
tool_calls: undefined,
|
||||
reasoning_text: "Thinking...",
|
||||
reasoning_opaque: "opaque-signature-123",
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should handle reasoning-only assistant message", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "reasoning",
|
||||
text: "Just thinking, no response yet",
|
||||
providerOptions: {
|
||||
copilot: { reasoningOpaque: "sig-abc" },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: null,
|
||||
tool_calls: undefined,
|
||||
reasoning_text: "Just thinking, no response yet",
|
||||
reasoning_opaque: "sig-abc",
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe("full conversation", () => {
|
||||
test("should convert a multi-turn conversation with reasoning", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "system",
|
||||
content: "You are a helpful assistant.",
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "What is 2+2?" }],
|
||||
},
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "reasoning",
|
||||
text: "Let me calculate 2+2...",
|
||||
providerOptions: {
|
||||
copilot: { reasoningOpaque: "sig-abc" },
|
||||
},
|
||||
},
|
||||
{ type: "text", text: "2+2 equals 4." },
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "What about 3+3?" }],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toHaveLength(4)
|
||||
|
||||
const systemMsg = result[0];
|
||||
expect(systemMsg.role).toBe("system")
|
||||
|
||||
// Assistant message should have reasoning fields
|
||||
const assistantMsg = result[2] as {
|
||||
reasoning_text?: string
|
||||
reasoning_opaque?: string
|
||||
}
|
||||
expect(assistantMsg.reasoning_text).toBe("Let me calculate 2+2...")
|
||||
expect(assistantMsg.reasoning_opaque).toBe("sig-abc")
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,555 @@
|
||||
import { OpenAICompatibleChatLanguageModel } from "@/provider/sdk/copilot/chat/openai-compatible-chat-language-model"
|
||||
import { describe, test, expect, mock } from "bun:test"
|
||||
import type { LanguageModelV2Prompt } from "@ai-sdk/provider"
|
||||
|
||||
async function convertReadableStreamToArray<T>(stream: ReadableStream<T>): Promise<T[]> {
|
||||
const reader = stream.getReader()
|
||||
const result: T[] = []
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
result.push(value)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
const TEST_PROMPT: LanguageModelV2Prompt = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
|
||||
|
||||
// Fixtures from copilot_test.exs
|
||||
const FIXTURES = {
|
||||
basicText: [
|
||||
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gemini-2.0-flash-001","choices":[{"index":0,"delta":{"role":"assistant","content":"Hello"},"finish_reason":null}]}`,
|
||||
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gemini-2.0-flash-001","choices":[{"index":0,"delta":{"content":" world"},"finish_reason":null}]}`,
|
||||
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gemini-2.0-flash-001","choices":[{"index":0,"delta":{"content":"!"},"finish_reason":"stop"}]}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
|
||||
reasoningWithToolCalls: [
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Understanding Dayzee's Purpose**\\n\\nI'm starting to get a better handle on \`dayzee\`.\\n\\n"}}],"created":1764940861,"id":"OdwyabKMI9yel7oPlbzgwQM","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Assessing Dayzee's Functionality**\\n\\nI've reviewed the files.\\n\\n"}}],"created":1764940862,"id":"OdwyabKMI9yel7oPlbzgwQM","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\\"filePath\\":\\"/README.md\\"}","name":"read_file"},"id":"call_abc123","index":0,"type":"function"}],"reasoning_opaque":"4CUQ6696CwSXOdQ5rtvDimqA91tBzfmga4ieRbmZ5P67T2NLW3"}}],"created":1764940862,"id":"OdwyabKMI9yel7oPlbzgwQM","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\\"filePath\\":\\"/mix.exs\\"}","name":"read_file"},"id":"call_def456","index":1,"type":"function"}]}}],"created":1764940862,"id":"OdwyabKMI9yel7oPlbzgwQM","usage":{"completion_tokens":53,"prompt_tokens":19581,"prompt_tokens_details":{"cached_tokens":17068},"total_tokens":19768,"reasoning_tokens":134},"model":"gemini-3-pro-preview"}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
|
||||
reasoningWithOpaqueAtEnd: [
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Analyzing the Inquiry's Nature**\\n\\nI'm currently parsing the user's question.\\n\\n"}}],"created":1765201729,"id":"Ptc2afqsCIHqlOoP653UiAI","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Reconciling User's Input**\\n\\nI'm grappling with the context.\\n\\n"}}],"created":1765201730,"id":"Ptc2afqsCIHqlOoP653UiAI","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":"I am Tidewave, a highly skilled AI coding agent.\\n\\n","role":"assistant"}}],"created":1765201730,"id":"Ptc2afqsCIHqlOoP653UiAI","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"finish_reason":"stop","index":0,"delta":{"content":"How can I help you?","role":"assistant","reasoning_opaque":"/PMlTqxqSJZnUBDHgnnJKLVI4eZQ"}}],"created":1765201730,"id":"Ptc2afqsCIHqlOoP653UiAI","usage":{"completion_tokens":59,"prompt_tokens":5778,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":5932,"reasoning_tokens":95},"model":"gemini-3-pro-preview"}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
|
||||
// Case where reasoning_opaque and content come in the SAME chunk
|
||||
reasoningWithOpaqueAndContentSameChunk: [
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Understanding the Query's Nature**\\n\\nI'm currently grappling with the user's philosophical query.\\n\\n"}}],"created":1766062103,"id":"FPhDacixL9zrlOoPqLSuyQ4","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Framing the Response's Core**\\n\\nNow, I'm structuring my response.\\n\\n"}}],"created":1766062103,"id":"FPhDacixL9zrlOoPqLSuyQ4","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":"Of course. I'm thinking right now.","role":"assistant","reasoning_opaque":"ExXaGwW7jBo39OXRe9EPoFGN1rOtLJBx"}}],"created":1766062103,"id":"FPhDacixL9zrlOoPqLSuyQ4","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
|
||||
`data: {"choices":[{"finish_reason":"stop","index":0,"delta":{"content":" What's on your mind?","role":"assistant"}}],"created":1766062103,"id":"FPhDacixL9zrlOoPqLSuyQ4","usage":{"completion_tokens":78,"prompt_tokens":3767,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":3915,"reasoning_tokens":70},"model":"gemini-2.5-pro"}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
|
||||
// Case where reasoning_opaque and content come in same chunk, followed by tool calls
|
||||
reasoningWithOpaqueContentAndToolCalls: [
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Analyzing the Structure**\\n\\nI'm currently trying to get a handle on the project's layout. My initial focus is on the file structure itself, specifically the directory organization. I'm hoping this will illuminate how different components interact. I'll need to identify the key modules and their dependencies.\\n\\n\\n"}}],"created":1766066995,"id":"MQtEafqbFYTZsbwPwuCVoAg","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":"Okay, I need to check out the project's file structure.","role":"assistant","reasoning_opaque":"WHOd3dYFnxEBOsKUXjbX6c2rJa0fS214FHbsj+A3Q+i63SFo7H/92RsownAzyo0h2qEy3cOcrvAatsMx51eCKiMSqt4dYWZhd5YVSgF0CehkpDbWBP/SoRqLU1dhCmUJV/6b5uYFBOzKLBGNadyhI7T1gWFlXntwc6SNjH6DujnFPeVr+L8DdOoUJGJrw2aOfm9NtkXA6wZh9t7dt+831yIIImjD9MHczuXoXj8K7tyLpIJ9KlVXMhnO4IKSYNdKRtoHlGTmudAp5MgH/vLWb6oSsL+ZJl/OdF3WBOeanGhYNoByCRDSvR7anAR/9m5zf9yUax+u/nFg+gzmhFacnzZGtSmcvJ4/4HWKNtUkRASTKeN94DXB8j1ptB/i6ldaMAz2ZyU+sbjPWI8aI4fKJ2MuO01u3uE87xVwpWiM+0rahIzJsllI5edwOaOFtF4tnlCTQafbxHwCZR62uON2E+IjGzW80MzyfYrbLBJKS5zTeHCgPYQSNaKzPfpzkQvdwo3JUnJYcEHgGeKzkq5sbvS5qitCYI7Xue0V98S6/KnUSPnDQBjNnas2i6BqJV2vuCEU/Y3ucrlKVbuRIFCZXCyLzrsGeRLRKlrf5S/HDAQ04IOPQVQhBPvhX0nDjhZB"}}],"created":1766066995,"id":"MQtEafqbFYTZsbwPwuCVoAg","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
|
||||
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{}","name":"list_project_files"},"id":"call_MHxqRDd5WVo3NU8wUXRaMmc0MFE","index":0,"type":"function"}]}}],"created":1766066995,"id":"MQtEafqbFYTZsbwPwuCVoAg","usage":{"completion_tokens":19,"prompt_tokens":3767,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":3797,"reasoning_tokens":11},"model":"gemini-2.5-pro"}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
|
||||
// Case where reasoning goes directly to tool_calls with NO content
|
||||
// reasoning_opaque and tool_calls come in the same chunk
|
||||
reasoningDirectlyToToolCalls: [
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Executing and Analyzing HTML**\\n\\nI've successfully captured the HTML snapshot using the \`browser_eval\` tool, giving me a solid understanding of the page structure. Now, I'm shifting focus to Elixir code execution with \`project_eval\` to assess my ability to work within the project's environment.\\n\\n\\n"}}],"created":1766068643,"id":"oBFEaafzD9DVlOoPkY3l4Qs","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Testing Project Contexts**\\n\\nI've got the HTML body snapshot from \`browser_eval\`, which is a helpful reference. Next, I'm testing my ability to run Elixir code in the project with \`project_eval\`. I'm starting with a simple sum: \`1 + 1\`. This will confirm I'm set up to interact with the project's codebase.\\n\\n\\n"}}],"created":1766068644,"id":"oBFEaafzD9DVlOoPkY3l4Qs","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\\"code\\":\\"1 + 1\\"}","name":"project_eval"},"id":"call_MHw3RDhmT1J5Z3B6WlhpVjlveTc","index":0,"type":"function"}],"reasoning_opaque":"ytGNWFf2doK38peANDvm7whkLPKrd+Fv6/k34zEPBF6Qwitj4bTZT0FBXleydLb6"}}],"created":1766068644,"id":"oBFEaafzD9DVlOoPkY3l4Qs","usage":{"completion_tokens":12,"prompt_tokens":8677,"prompt_tokens_details":{"cached_tokens":3692},"total_tokens":8768,"reasoning_tokens":79},"model":"gemini-3-pro-preview"}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
}
|
||||
|
||||
function createMockFetch(chunks: string[]) {
|
||||
return mock(async () => {
|
||||
const body = new ReadableStream({
|
||||
start(controller) {
|
||||
for (const chunk of chunks) {
|
||||
controller.enqueue(new TextEncoder().encode(chunk + "\n\n"))
|
||||
}
|
||||
controller.close()
|
||||
},
|
||||
})
|
||||
|
||||
return new Response(body, {
|
||||
status: 200,
|
||||
headers: { "Content-Type": "text/event-stream" },
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
function createModel(fetchFn: ReturnType<typeof mock>) {
|
||||
return new OpenAICompatibleChatLanguageModel("test-model", {
|
||||
provider: "copilot.chat",
|
||||
url: () => "https://api.test.com/chat/completions",
|
||||
headers: () => ({ Authorization: "Bearer test-token" }),
|
||||
fetch: fetchFn as any,
|
||||
})
|
||||
}
|
||||
|
||||
describe("doStream", () => {
|
||||
test("should stream text deltas", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.basicText)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
// Filter to just the key events
|
||||
const textParts = parts.filter(
|
||||
(p) => p.type === "text-start" || p.type === "text-delta" || p.type === "text-end" || p.type === "finish",
|
||||
)
|
||||
|
||||
expect(textParts).toMatchObject([
|
||||
{ type: "text-start", id: "txt-0" },
|
||||
{ type: "text-delta", id: "txt-0", delta: "Hello" },
|
||||
{ type: "text-delta", id: "txt-0", delta: " world" },
|
||||
{ type: "text-delta", id: "txt-0", delta: "!" },
|
||||
{ type: "text-end", id: "txt-0" },
|
||||
{ type: "finish", finishReason: "stop" },
|
||||
])
|
||||
})
|
||||
|
||||
test("should stream reasoning with tool calls and capture reasoning_opaque", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.reasoningWithToolCalls)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
// Check reasoning parts
|
||||
const reasoningParts = parts.filter(
|
||||
(p) => p.type === "reasoning-start" || p.type === "reasoning-delta" || p.type === "reasoning-end",
|
||||
)
|
||||
|
||||
expect(reasoningParts[0]).toEqual({
|
||||
type: "reasoning-start",
|
||||
id: "reasoning-0",
|
||||
})
|
||||
|
||||
expect(reasoningParts[1]).toMatchObject({
|
||||
type: "reasoning-delta",
|
||||
id: "reasoning-0",
|
||||
})
|
||||
expect((reasoningParts[1] as { delta: string }).delta).toContain("**Understanding Dayzee's Purpose**")
|
||||
|
||||
expect(reasoningParts[2]).toMatchObject({
|
||||
type: "reasoning-delta",
|
||||
id: "reasoning-0",
|
||||
})
|
||||
expect((reasoningParts[2] as { delta: string }).delta).toContain("**Assessing Dayzee's Functionality**")
|
||||
|
||||
// reasoning_opaque should be in reasoning-end providerMetadata
|
||||
const reasoningEnd = reasoningParts.find((p) => p.type === "reasoning-end")
|
||||
expect(reasoningEnd).toMatchObject({
|
||||
type: "reasoning-end",
|
||||
id: "reasoning-0",
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
reasoningOpaque: "4CUQ6696CwSXOdQ5rtvDimqA91tBzfmga4ieRbmZ5P67T2NLW3",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Check tool calls
|
||||
const toolParts = parts.filter(
|
||||
(p) => p.type === "tool-input-start" || p.type === "tool-call" || p.type === "tool-input-end",
|
||||
)
|
||||
|
||||
expect(toolParts).toContainEqual({
|
||||
type: "tool-input-start",
|
||||
id: "call_abc123",
|
||||
toolName: "read_file",
|
||||
})
|
||||
|
||||
expect(toolParts).toContainEqual(
|
||||
expect.objectContaining({
|
||||
type: "tool-call",
|
||||
toolCallId: "call_abc123",
|
||||
toolName: "read_file",
|
||||
}),
|
||||
)
|
||||
|
||||
expect(toolParts).toContainEqual({
|
||||
type: "tool-input-start",
|
||||
id: "call_def456",
|
||||
toolName: "read_file",
|
||||
})
|
||||
|
||||
// Check finish
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
usage: {
|
||||
inputTokens: 19581,
|
||||
outputTokens: 53,
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("should handle reasoning_opaque that comes at end with text in between", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.reasoningWithOpaqueAtEnd)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
// Check that reasoning comes first
|
||||
const reasoningStart = parts.findIndex((p) => p.type === "reasoning-start")
|
||||
const textStart = parts.findIndex((p) => p.type === "text-start")
|
||||
expect(reasoningStart).toBeLessThan(textStart)
|
||||
|
||||
// Check reasoning deltas
|
||||
const reasoningDeltas = parts.filter((p) => p.type === "reasoning-delta")
|
||||
expect(reasoningDeltas).toHaveLength(2)
|
||||
expect((reasoningDeltas[0] as { delta: string }).delta).toContain("**Analyzing the Inquiry's Nature**")
|
||||
expect((reasoningDeltas[1] as { delta: string }).delta).toContain("**Reconciling User's Input**")
|
||||
|
||||
// Check text deltas
|
||||
const textDeltas = parts.filter((p) => p.type === "text-delta")
|
||||
expect(textDeltas).toHaveLength(2)
|
||||
expect((textDeltas[0] as { delta: string }).delta).toContain("I am Tidewave")
|
||||
expect((textDeltas[1] as { delta: string }).delta).toContain("How can I help you?")
|
||||
|
||||
// reasoning-end should be emitted before text-start
|
||||
const reasoningEndIndex = parts.findIndex((p) => p.type === "reasoning-end")
|
||||
const textStartIndex = parts.findIndex((p) => p.type === "text-start")
|
||||
expect(reasoningEndIndex).toBeGreaterThan(-1)
|
||||
expect(reasoningEndIndex).toBeLessThan(textStartIndex)
|
||||
|
||||
// In this fixture, reasoning_opaque comes AFTER content has started (in chunk 4)
|
||||
// So it arrives too late to be attached to reasoning-end. But it should still
|
||||
// be captured and included in the finish event's providerMetadata.
|
||||
const reasoningEnd = parts.find((p) => p.type === "reasoning-end")
|
||||
expect(reasoningEnd).toMatchObject({
|
||||
type: "reasoning-end",
|
||||
id: "reasoning-0",
|
||||
})
|
||||
|
||||
// reasoning_opaque should be in the finish event's providerMetadata
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "stop",
|
||||
usage: {
|
||||
inputTokens: 5778,
|
||||
outputTokens: 59,
|
||||
},
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
reasoningOpaque: "/PMlTqxqSJZnUBDHgnnJKLVI4eZQ",
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("should handle reasoning_opaque and content in the same chunk", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.reasoningWithOpaqueAndContentSameChunk)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
// The critical test: reasoning-end should come BEFORE text-start
|
||||
const reasoningEndIndex = parts.findIndex((p) => p.type === "reasoning-end")
|
||||
const textStartIndex = parts.findIndex((p) => p.type === "text-start")
|
||||
expect(reasoningEndIndex).toBeGreaterThan(-1)
|
||||
expect(textStartIndex).toBeGreaterThan(-1)
|
||||
expect(reasoningEndIndex).toBeLessThan(textStartIndex)
|
||||
|
||||
// Check reasoning deltas
|
||||
const reasoningDeltas = parts.filter((p) => p.type === "reasoning-delta")
|
||||
expect(reasoningDeltas).toHaveLength(2)
|
||||
expect((reasoningDeltas[0] as { delta: string }).delta).toContain("**Understanding the Query's Nature**")
|
||||
expect((reasoningDeltas[1] as { delta: string }).delta).toContain("**Framing the Response's Core**")
|
||||
|
||||
// reasoning_opaque should be in reasoning-end even though it came with content
|
||||
const reasoningEnd = parts.find((p) => p.type === "reasoning-end")
|
||||
expect(reasoningEnd).toMatchObject({
|
||||
type: "reasoning-end",
|
||||
id: "reasoning-0",
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
reasoningOpaque: "ExXaGwW7jBo39OXRe9EPoFGN1rOtLJBx",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Check text deltas
|
||||
const textDeltas = parts.filter((p) => p.type === "text-delta")
|
||||
expect(textDeltas).toHaveLength(2)
|
||||
expect((textDeltas[0] as { delta: string }).delta).toContain("Of course. I'm thinking right now.")
|
||||
expect((textDeltas[1] as { delta: string }).delta).toContain("What's on your mind?")
|
||||
|
||||
// Check finish
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "stop",
|
||||
})
|
||||
})
|
||||
|
||||
test("should handle reasoning_opaque and content followed by tool calls", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.reasoningWithOpaqueContentAndToolCalls)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
// Check that reasoning comes first, then text, then tool calls
|
||||
const reasoningEndIndex = parts.findIndex((p) => p.type === "reasoning-end")
|
||||
const textStartIndex = parts.findIndex((p) => p.type === "text-start")
|
||||
const toolStartIndex = parts.findIndex((p) => p.type === "tool-input-start")
|
||||
|
||||
expect(reasoningEndIndex).toBeGreaterThan(-1)
|
||||
expect(textStartIndex).toBeGreaterThan(-1)
|
||||
expect(toolStartIndex).toBeGreaterThan(-1)
|
||||
expect(reasoningEndIndex).toBeLessThan(textStartIndex)
|
||||
expect(textStartIndex).toBeLessThan(toolStartIndex)
|
||||
|
||||
// Check reasoning content
|
||||
const reasoningDeltas = parts.filter((p) => p.type === "reasoning-delta")
|
||||
expect(reasoningDeltas).toHaveLength(1)
|
||||
expect((reasoningDeltas[0] as { delta: string }).delta).toContain("**Analyzing the Structure**")
|
||||
|
||||
// reasoning_opaque should be in reasoning-end (comes with content in same chunk)
|
||||
const reasoningEnd = parts.find((p) => p.type === "reasoning-end")
|
||||
expect(reasoningEnd).toMatchObject({
|
||||
type: "reasoning-end",
|
||||
id: "reasoning-0",
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
reasoningOpaque: expect.stringContaining("WHOd3dYFnxEBOsKUXjbX6c2rJa0fS214"),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Check text content
|
||||
const textDeltas = parts.filter((p) => p.type === "text-delta")
|
||||
expect(textDeltas).toHaveLength(1)
|
||||
expect((textDeltas[0] as { delta: string }).delta).toContain("Okay, I need to check out the project's file structure.")
|
||||
|
||||
// Check tool call
|
||||
const toolParts = parts.filter(
|
||||
(p) => p.type === "tool-input-start" || p.type === "tool-call" || p.type === "tool-input-end",
|
||||
)
|
||||
|
||||
expect(toolParts).toContainEqual({
|
||||
type: "tool-input-start",
|
||||
id: "call_MHxqRDd5WVo3NU8wUXRaMmc0MFE",
|
||||
toolName: "list_project_files",
|
||||
})
|
||||
|
||||
expect(toolParts).toContainEqual(
|
||||
expect.objectContaining({
|
||||
type: "tool-call",
|
||||
toolCallId: "call_MHxqRDd5WVo3NU8wUXRaMmc0MFE",
|
||||
toolName: "list_project_files",
|
||||
}),
|
||||
)
|
||||
|
||||
// Check finish
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
usage: {
|
||||
inputTokens: 3767,
|
||||
outputTokens: 19,
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("should emit reasoning-end before tool-input-start when reasoning goes directly to tool calls", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.reasoningDirectlyToToolCalls)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
// Critical check: reasoning-end MUST come before tool-input-start
|
||||
const reasoningEndIndex = parts.findIndex((p) => p.type === "reasoning-end")
|
||||
const toolStartIndex = parts.findIndex((p) => p.type === "tool-input-start")
|
||||
|
||||
expect(reasoningEndIndex).toBeGreaterThan(-1)
|
||||
expect(toolStartIndex).toBeGreaterThan(-1)
|
||||
expect(reasoningEndIndex).toBeLessThan(toolStartIndex)
|
||||
|
||||
// Check reasoning parts
|
||||
const reasoningDeltas = parts.filter((p) => p.type === "reasoning-delta")
|
||||
expect(reasoningDeltas).toHaveLength(2)
|
||||
expect((reasoningDeltas[0] as { delta: string }).delta).toContain("**Executing and Analyzing HTML**")
|
||||
expect((reasoningDeltas[1] as { delta: string }).delta).toContain("**Testing Project Contexts**")
|
||||
|
||||
// reasoning_opaque should be in reasoning-end providerMetadata
|
||||
const reasoningEnd = parts.find((p) => p.type === "reasoning-end")
|
||||
expect(reasoningEnd).toMatchObject({
|
||||
type: "reasoning-end",
|
||||
id: "reasoning-0",
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
reasoningOpaque: "ytGNWFf2doK38peANDvm7whkLPKrd+Fv6/k34zEPBF6Qwitj4bTZT0FBXleydLb6",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// No text parts should exist
|
||||
const textParts = parts.filter((p) => p.type === "text-start" || p.type === "text-delta" || p.type === "text-end")
|
||||
expect(textParts).toHaveLength(0)
|
||||
|
||||
// Check tool call
|
||||
const toolCall = parts.find((p) => p.type === "tool-call")
|
||||
expect(toolCall).toMatchObject({
|
||||
type: "tool-call",
|
||||
toolCallId: "call_MHw3RDhmT1J5Z3B6WlhpVjlveTc",
|
||||
toolName: "project_eval",
|
||||
})
|
||||
|
||||
// Check finish
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
})
|
||||
})
|
||||
|
||||
test("should include response metadata from first chunk", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.basicText)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
const metadata = parts.find((p) => p.type === "response-metadata")
|
||||
expect(metadata).toMatchObject({
|
||||
type: "response-metadata",
|
||||
id: "chatcmpl-123",
|
||||
modelId: "gemini-2.0-flash-001",
|
||||
})
|
||||
})
|
||||
|
||||
test("should emit stream-start with warnings", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.basicText)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
const streamStart = parts.find((p) => p.type === "stream-start")
|
||||
expect(streamStart).toEqual({
|
||||
type: "stream-start",
|
||||
warnings: [],
|
||||
})
|
||||
})
|
||||
|
||||
test("should include raw chunks when requested", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.basicText)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: true,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
const rawChunks = parts.filter((p) => p.type === "raw")
|
||||
expect(rawChunks.length).toBeGreaterThan(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe("request body", () => {
|
||||
test("should send tools in OpenAI format", async () => {
|
||||
let capturedBody: unknown
|
||||
const mockFetch = mock(async (_url: string, init?: RequestInit) => {
|
||||
capturedBody = JSON.parse(init?.body as string)
|
||||
return new Response(
|
||||
new ReadableStream({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: [DONE]\n\n`))
|
||||
controller.close()
|
||||
},
|
||||
}),
|
||||
{ status: 200, headers: { "Content-Type": "text/event-stream" } },
|
||||
)
|
||||
})
|
||||
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
tools: [
|
||||
{
|
||||
type: "function",
|
||||
name: "get_weather",
|
||||
description: "Get the weather for a location",
|
||||
inputSchema: {
|
||||
type: "object",
|
||||
properties: {
|
||||
location: { type: "string" },
|
||||
},
|
||||
required: ["location"],
|
||||
},
|
||||
},
|
||||
],
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
expect((capturedBody as { tools: unknown[] }).tools).toEqual([
|
||||
{
|
||||
type: "function",
|
||||
function: {
|
||||
name: "get_weather",
|
||||
description: "Get the weather for a location",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
location: { type: "string" },
|
||||
},
|
||||
required: ["location"],
|
||||
},
|
||||
},
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
@@ -1101,21 +1101,21 @@ describe("ProviderTransform.message - providerOptions key remapping", () => {
|
||||
expect(result[0].providerOptions?.openai).toBeUndefined()
|
||||
})
|
||||
|
||||
test("openai with github-copilot npm remaps providerID to 'openai'", () => {
|
||||
test("copilot remaps providerID to 'copilot' key", () => {
|
||||
const model = createModel("github-copilot", "@ai-sdk/github-copilot")
|
||||
const msgs = [
|
||||
{
|
||||
role: "user",
|
||||
content: "Hello",
|
||||
providerOptions: {
|
||||
"github-copilot": { someOption: "value" },
|
||||
"copilot": { someOption: "value" },
|
||||
},
|
||||
},
|
||||
] as any[]
|
||||
|
||||
const result = ProviderTransform.message(msgs, model, {})
|
||||
|
||||
expect(result[0].providerOptions?.openai).toEqual({ someOption: "value" })
|
||||
expect(result[0].providerOptions?.copilot).toEqual({ someOption: "value" })
|
||||
expect(result[0].providerOptions?.["github-copilot"]).toBeUndefined()
|
||||
})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user