mirror of
https://github.com/anomalyco/opencode.git
synced 2026-04-24 06:45:22 +00:00
feat: migrate github copilot sdk to v6
This commit is contained in:
@@ -1,16 +1,16 @@
|
||||
import {
|
||||
type LanguageModelV2Prompt,
|
||||
type SharedV2ProviderMetadata,
|
||||
type LanguageModelV3Prompt,
|
||||
type SharedV3ProviderOptions,
|
||||
UnsupportedFunctionalityError,
|
||||
} from "@ai-sdk/provider"
|
||||
import type { OpenAICompatibleChatPrompt } from "./openai-compatible-api-types"
|
||||
import { convertToBase64 } from "@ai-sdk/provider-utils"
|
||||
|
||||
function getOpenAIMetadata(message: { providerOptions?: SharedV2ProviderMetadata }) {
|
||||
function getOpenAIMetadata(message: { providerOptions?: SharedV3ProviderOptions }) {
|
||||
return message?.providerOptions?.copilot ?? {}
|
||||
}
|
||||
|
||||
export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Prompt): OpenAICompatibleChatPrompt {
|
||||
export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV3Prompt): OpenAICompatibleChatPrompt {
|
||||
const messages: OpenAICompatibleChatPrompt = []
|
||||
for (const { role, content, ...message } of prompt) {
|
||||
const metadata = getOpenAIMetadata({ ...message })
|
||||
@@ -127,6 +127,9 @@ export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Pro
|
||||
|
||||
case "tool": {
|
||||
for (const toolResponse of content) {
|
||||
if (toolResponse.type === "tool-approval-response") {
|
||||
continue
|
||||
}
|
||||
const output = toolResponse.output
|
||||
|
||||
let contentValue: string
|
||||
@@ -135,6 +138,9 @@ export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Pro
|
||||
case "error-text":
|
||||
contentValue = output.value
|
||||
break
|
||||
case "execution-denied":
|
||||
contentValue = output.reason ?? "Tool execution denied."
|
||||
break
|
||||
case "content":
|
||||
case "json":
|
||||
case "error-json":
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import type { LanguageModelV2FinishReason } from "@ai-sdk/provider"
|
||||
import type { LanguageModelV3FinishReason } from "@ai-sdk/provider"
|
||||
|
||||
export function mapOpenAICompatibleFinishReason(finishReason: string | null | undefined): LanguageModelV2FinishReason {
|
||||
export function mapOpenAICompatibleFinishReason(
|
||||
finishReason: string | null | undefined,
|
||||
): LanguageModelV3FinishReason["unified"] {
|
||||
switch (finishReason) {
|
||||
case "stop":
|
||||
return "stop"
|
||||
@@ -12,6 +14,6 @@ export function mapOpenAICompatibleFinishReason(finishReason: string | null | un
|
||||
case "tool_calls":
|
||||
return "tool-calls"
|
||||
default:
|
||||
return "unknown"
|
||||
return "other"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import {
|
||||
APICallError,
|
||||
InvalidResponseDataError,
|
||||
type LanguageModelV2,
|
||||
type LanguageModelV2CallWarning,
|
||||
type LanguageModelV2Content,
|
||||
type LanguageModelV2FinishReason,
|
||||
type LanguageModelV2StreamPart,
|
||||
type SharedV2ProviderMetadata,
|
||||
type LanguageModelV3,
|
||||
type LanguageModelV3CallOptions,
|
||||
type LanguageModelV3Content,
|
||||
type LanguageModelV3StreamPart,
|
||||
type SharedV3ProviderMetadata,
|
||||
type SharedV3Warning,
|
||||
} from "@ai-sdk/provider"
|
||||
import {
|
||||
combineHeaders,
|
||||
@@ -47,11 +47,11 @@ export type OpenAICompatibleChatConfig = {
|
||||
/**
|
||||
* The supported URLs for the model.
|
||||
*/
|
||||
supportedUrls?: () => LanguageModelV2["supportedUrls"]
|
||||
supportedUrls?: () => LanguageModelV3["supportedUrls"]
|
||||
}
|
||||
|
||||
export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
readonly specificationVersion = "v2"
|
||||
export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
||||
readonly specificationVersion = "v3"
|
||||
|
||||
readonly supportsStructuredOutputs: boolean
|
||||
|
||||
@@ -98,8 +98,8 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
seed,
|
||||
toolChoice,
|
||||
tools,
|
||||
}: Parameters<LanguageModelV2["doGenerate"]>[0]) {
|
||||
const warnings: LanguageModelV2CallWarning[] = []
|
||||
}: LanguageModelV3CallOptions) {
|
||||
const warnings: SharedV3Warning[] = []
|
||||
|
||||
// Parse provider options
|
||||
const compatibleOptions = Object.assign(
|
||||
@@ -116,13 +116,13 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
)
|
||||
|
||||
if (topK != null) {
|
||||
warnings.push({ type: "unsupported-setting", setting: "topK" })
|
||||
warnings.push({ type: "unsupported", feature: "topK" })
|
||||
}
|
||||
|
||||
if (responseFormat?.type === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "responseFormat",
|
||||
type: "unsupported",
|
||||
feature: "responseFormat",
|
||||
details: "JSON response format schema is only supported with structuredOutputs",
|
||||
})
|
||||
}
|
||||
@@ -189,9 +189,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
}
|
||||
}
|
||||
|
||||
async doGenerate(
|
||||
options: Parameters<LanguageModelV2["doGenerate"]>[0],
|
||||
): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
|
||||
async doGenerate(options: LanguageModelV3CallOptions) {
|
||||
const { args, warnings } = await this.getArgs({ ...options })
|
||||
|
||||
const body = JSON.stringify(args)
|
||||
@@ -214,7 +212,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
})
|
||||
|
||||
const choice = responseBody.choices[0]
|
||||
const content: Array<LanguageModelV2Content> = []
|
||||
const content: Array<LanguageModelV3Content> = []
|
||||
|
||||
// text content:
|
||||
const text = choice.message.content
|
||||
@@ -257,7 +255,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
}
|
||||
|
||||
// provider metadata:
|
||||
const providerMetadata: SharedV2ProviderMetadata = {
|
||||
const providerMetadata: SharedV3ProviderMetadata = {
|
||||
[this.providerOptionsName]: {},
|
||||
...(await this.config.metadataExtractor?.extractMetadata?.({
|
||||
parsedBody: rawResponse,
|
||||
@@ -275,13 +273,23 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
|
||||
return {
|
||||
content,
|
||||
finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
|
||||
finishReason: {
|
||||
unified: mapOpenAICompatibleFinishReason(choice.finish_reason),
|
||||
raw: choice.finish_reason ?? undefined,
|
||||
},
|
||||
usage: {
|
||||
inputTokens: responseBody.usage?.prompt_tokens ?? undefined,
|
||||
outputTokens: responseBody.usage?.completion_tokens ?? undefined,
|
||||
totalTokens: responseBody.usage?.total_tokens ?? undefined,
|
||||
reasoningTokens: responseBody.usage?.completion_tokens_details?.reasoning_tokens ?? undefined,
|
||||
cachedInputTokens: responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
|
||||
inputTokens: {
|
||||
total: responseBody.usage?.prompt_tokens ?? undefined,
|
||||
noCache: undefined,
|
||||
cacheRead: responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
|
||||
cacheWrite: undefined,
|
||||
},
|
||||
outputTokens: {
|
||||
total: responseBody.usage?.completion_tokens ?? undefined,
|
||||
text: undefined,
|
||||
reasoning: responseBody.usage?.completion_tokens_details?.reasoning_tokens ?? undefined,
|
||||
},
|
||||
raw: responseBody.usage ?? undefined,
|
||||
},
|
||||
providerMetadata,
|
||||
request: { body },
|
||||
@@ -294,9 +302,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
}
|
||||
}
|
||||
|
||||
async doStream(
|
||||
options: Parameters<LanguageModelV2["doStream"]>[0],
|
||||
): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
|
||||
async doStream(options: LanguageModelV3CallOptions) {
|
||||
const { args, warnings } = await this.getArgs({ ...options })
|
||||
|
||||
const body = {
|
||||
@@ -332,7 +338,13 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
hasFinished: boolean
|
||||
}> = []
|
||||
|
||||
let finishReason: LanguageModelV2FinishReason = "unknown"
|
||||
let finishReason: {
|
||||
unified: ReturnType<typeof mapOpenAICompatibleFinishReason>
|
||||
raw: string | undefined
|
||||
} = {
|
||||
unified: "other",
|
||||
raw: undefined,
|
||||
}
|
||||
const usage: {
|
||||
completionTokens: number | undefined
|
||||
completionTokensDetails: {
|
||||
@@ -366,7 +378,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
|
||||
return {
|
||||
stream: response.pipeThrough(
|
||||
new TransformStream<ParseResult<z.infer<typeof this.chunkSchema>>, LanguageModelV2StreamPart>({
|
||||
new TransformStream<ParseResult<z.infer<typeof this.chunkSchema>>, LanguageModelV3StreamPart>({
|
||||
start(controller) {
|
||||
controller.enqueue({ type: "stream-start", warnings })
|
||||
},
|
||||
@@ -380,7 +392,10 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
|
||||
// handle failed chunk parsing / validation:
|
||||
if (!chunk.success) {
|
||||
finishReason = "error"
|
||||
finishReason = {
|
||||
unified: "error",
|
||||
raw: undefined,
|
||||
}
|
||||
controller.enqueue({ type: "error", error: chunk.error })
|
||||
return
|
||||
}
|
||||
@@ -390,7 +405,10 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
|
||||
// handle error chunks:
|
||||
if ("error" in value) {
|
||||
finishReason = "error"
|
||||
finishReason = {
|
||||
unified: "error",
|
||||
raw: undefined,
|
||||
}
|
||||
controller.enqueue({ type: "error", error: value.error.message })
|
||||
return
|
||||
}
|
||||
@@ -435,7 +453,10 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
const choice = value.choices[0]
|
||||
|
||||
if (choice?.finish_reason != null) {
|
||||
finishReason = mapOpenAICompatibleFinishReason(choice.finish_reason)
|
||||
finishReason = {
|
||||
unified: mapOpenAICompatibleFinishReason(choice.finish_reason),
|
||||
raw: choice.finish_reason ?? undefined,
|
||||
}
|
||||
}
|
||||
|
||||
if (choice?.delta == null) {
|
||||
@@ -652,7 +673,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
})
|
||||
}
|
||||
|
||||
const providerMetadata: SharedV2ProviderMetadata = {
|
||||
const providerMetadata: SharedV3ProviderMetadata = {
|
||||
[providerOptionsName]: {},
|
||||
// Include reasoning_opaque for Copilot multi-turn reasoning
|
||||
...(reasoningOpaque ? { copilot: { reasoningOpaque } } : {}),
|
||||
@@ -671,11 +692,25 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
type: "finish",
|
||||
finishReason,
|
||||
usage: {
|
||||
inputTokens: usage.promptTokens ?? undefined,
|
||||
outputTokens: usage.completionTokens ?? undefined,
|
||||
totalTokens: usage.totalTokens ?? undefined,
|
||||
reasoningTokens: usage.completionTokensDetails.reasoningTokens ?? undefined,
|
||||
cachedInputTokens: usage.promptTokensDetails.cachedTokens ?? undefined,
|
||||
inputTokens: {
|
||||
total: usage.promptTokens,
|
||||
noCache:
|
||||
usage.promptTokens != undefined && usage.promptTokensDetails.cachedTokens != undefined
|
||||
? usage.promptTokens - usage.promptTokensDetails.cachedTokens
|
||||
: undefined,
|
||||
cacheRead: usage.promptTokensDetails.cachedTokens,
|
||||
cacheWrite: undefined,
|
||||
},
|
||||
outputTokens: {
|
||||
total: usage.completionTokens,
|
||||
text: undefined,
|
||||
reasoning: usage.completionTokensDetails.reasoningTokens,
|
||||
},
|
||||
raw: {
|
||||
prompt_tokens: usage.promptTokens ?? null,
|
||||
completion_tokens: usage.completionTokens ?? null,
|
||||
total_tokens: usage.totalTokens ?? null,
|
||||
},
|
||||
},
|
||||
providerMetadata,
|
||||
})
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { SharedV2ProviderMetadata } from "@ai-sdk/provider"
|
||||
import type { SharedV3ProviderMetadata } from "@ai-sdk/provider"
|
||||
|
||||
/**
|
||||
Extracts provider-specific metadata from API responses.
|
||||
@@ -14,7 +14,7 @@ export type MetadataExtractor = {
|
||||
* @returns Provider-specific metadata or undefined if no metadata is available.
|
||||
* The metadata should be under a key indicating the provider id.
|
||||
*/
|
||||
extractMetadata: ({ parsedBody }: { parsedBody: unknown }) => Promise<SharedV2ProviderMetadata | undefined>
|
||||
extractMetadata: ({ parsedBody }: { parsedBody: unknown }) => Promise<SharedV3ProviderMetadata | undefined>
|
||||
|
||||
/**
|
||||
* Creates an extractor for handling streaming responses. The returned object provides
|
||||
@@ -39,6 +39,6 @@ export type MetadataExtractor = {
|
||||
* @returns Provider-specific metadata or undefined if no metadata is available.
|
||||
* The metadata should be under a key indicating the provider id.
|
||||
*/
|
||||
buildMetadata(): SharedV2ProviderMetadata | undefined
|
||||
buildMetadata(): SharedV3ProviderMetadata | undefined
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
import {
|
||||
type LanguageModelV2CallOptions,
|
||||
type LanguageModelV2CallWarning,
|
||||
UnsupportedFunctionalityError,
|
||||
} from "@ai-sdk/provider"
|
||||
import { type LanguageModelV3CallOptions, type SharedV3Warning, UnsupportedFunctionalityError } from "@ai-sdk/provider"
|
||||
|
||||
export function prepareTools({
|
||||
tools,
|
||||
toolChoice,
|
||||
}: {
|
||||
tools: LanguageModelV2CallOptions["tools"]
|
||||
toolChoice?: LanguageModelV2CallOptions["toolChoice"]
|
||||
tools: LanguageModelV3CallOptions["tools"]
|
||||
toolChoice?: LanguageModelV3CallOptions["toolChoice"]
|
||||
}): {
|
||||
tools:
|
||||
| undefined
|
||||
@@ -22,12 +18,12 @@ export function prepareTools({
|
||||
}
|
||||
}>
|
||||
toolChoice: { type: "function"; function: { name: string } } | "auto" | "none" | "required" | undefined
|
||||
toolWarnings: LanguageModelV2CallWarning[]
|
||||
toolWarnings: SharedV3Warning[]
|
||||
} {
|
||||
// when the tools array is empty, change it to undefined to prevent errors:
|
||||
tools = tools?.length ? tools : undefined
|
||||
|
||||
const toolWarnings: LanguageModelV2CallWarning[] = []
|
||||
const toolWarnings: SharedV3Warning[] = []
|
||||
|
||||
if (tools == null) {
|
||||
return { tools: undefined, toolChoice: undefined, toolWarnings }
|
||||
@@ -43,8 +39,8 @@ export function prepareTools({
|
||||
}> = []
|
||||
|
||||
for (const tool of tools) {
|
||||
if (tool.type === "provider-defined") {
|
||||
toolWarnings.push({ type: "unsupported-tool", tool })
|
||||
if (tool.type === "provider") {
|
||||
toolWarnings.push({ type: "unsupported", feature: `tool type: ${tool.type}` })
|
||||
} else {
|
||||
openaiCompatTools.push({
|
||||
type: "function",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import {
|
||||
type LanguageModelV2CallWarning,
|
||||
type LanguageModelV2Prompt,
|
||||
type LanguageModelV2ToolCallPart,
|
||||
type LanguageModelV3Prompt,
|
||||
type LanguageModelV3ToolCallPart,
|
||||
type SharedV3Warning,
|
||||
UnsupportedFunctionalityError,
|
||||
} from "@ai-sdk/provider"
|
||||
import { convertToBase64, parseProviderOptions } from "@ai-sdk/provider-utils"
|
||||
@@ -25,17 +25,18 @@ export async function convertToOpenAIResponsesInput({
|
||||
store,
|
||||
hasLocalShellTool = false,
|
||||
}: {
|
||||
prompt: LanguageModelV2Prompt
|
||||
prompt: LanguageModelV3Prompt
|
||||
systemMessageMode: "system" | "developer" | "remove"
|
||||
fileIdPrefixes?: readonly string[]
|
||||
store: boolean
|
||||
hasLocalShellTool?: boolean
|
||||
}): Promise<{
|
||||
input: OpenAIResponsesInput
|
||||
warnings: Array<LanguageModelV2CallWarning>
|
||||
warnings: Array<SharedV3Warning>
|
||||
}> {
|
||||
const input: OpenAIResponsesInput = []
|
||||
const warnings: Array<LanguageModelV2CallWarning> = []
|
||||
const warnings: Array<SharedV3Warning> = []
|
||||
const processedApprovalIds = new Set<string>()
|
||||
|
||||
for (const { role, content } of prompt) {
|
||||
switch (role) {
|
||||
@@ -118,7 +119,7 @@ export async function convertToOpenAIResponsesInput({
|
||||
|
||||
case "assistant": {
|
||||
const reasoningMessages: Record<string, OpenAIResponsesReasoning> = {}
|
||||
const toolCallParts: Record<string, LanguageModelV2ToolCallPart> = {}
|
||||
const toolCallParts: Record<string, LanguageModelV3ToolCallPart> = {}
|
||||
|
||||
for (const part of content) {
|
||||
switch (part.type) {
|
||||
@@ -251,8 +252,36 @@ export async function convertToOpenAIResponsesInput({
|
||||
|
||||
case "tool": {
|
||||
for (const part of content) {
|
||||
if (part.type === "tool-approval-response") {
|
||||
if (processedApprovalIds.has(part.approvalId)) {
|
||||
continue
|
||||
}
|
||||
processedApprovalIds.add(part.approvalId)
|
||||
|
||||
if (store) {
|
||||
input.push({
|
||||
type: "item_reference",
|
||||
id: part.approvalId,
|
||||
})
|
||||
}
|
||||
|
||||
input.push({
|
||||
type: "mcp_approval_response",
|
||||
approval_request_id: part.approvalId,
|
||||
approve: part.approved,
|
||||
})
|
||||
continue
|
||||
}
|
||||
const output = part.output
|
||||
|
||||
if (output.type === "execution-denied") {
|
||||
const approvalId = (output.providerOptions?.openai as { approvalId?: string } | undefined)?.approvalId
|
||||
|
||||
if (approvalId) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
|
||||
input.push({
|
||||
type: "local_shell_call_output",
|
||||
@@ -268,6 +297,9 @@ export async function convertToOpenAIResponsesInput({
|
||||
case "error-text":
|
||||
contentValue = output.value
|
||||
break
|
||||
case "execution-denied":
|
||||
contentValue = output.reason ?? "Tool execution denied."
|
||||
break
|
||||
case "content":
|
||||
case "json":
|
||||
case "error-json":
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { LanguageModelV2FinishReason } from "@ai-sdk/provider"
|
||||
import type { LanguageModelV3FinishReason } from "@ai-sdk/provider"
|
||||
|
||||
export function mapOpenAIResponseFinishReason({
|
||||
finishReason,
|
||||
@@ -7,7 +7,7 @@ export function mapOpenAIResponseFinishReason({
|
||||
finishReason: string | null | undefined
|
||||
// flag that checks if there have been client-side tool calls (not executed by openai)
|
||||
hasFunctionCall: boolean
|
||||
}): LanguageModelV2FinishReason {
|
||||
}): LanguageModelV3FinishReason["unified"] {
|
||||
switch (finishReason) {
|
||||
case undefined:
|
||||
case null:
|
||||
@@ -17,6 +17,6 @@ export function mapOpenAIResponseFinishReason({
|
||||
case "content_filter":
|
||||
return "content-filter"
|
||||
default:
|
||||
return hasFunctionCall ? "tool-calls" : "unknown"
|
||||
return hasFunctionCall ? "tool-calls" : "other"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ export type OpenAIResponsesInputItem =
|
||||
| OpenAIResponsesLocalShellCallOutput
|
||||
| OpenAIResponsesReasoning
|
||||
| OpenAIResponsesItemReference
|
||||
| OpenAIResponsesMcpApprovalResponse
|
||||
|
||||
export type OpenAIResponsesIncludeValue =
|
||||
| "web_search_call.action.sources"
|
||||
@@ -93,6 +94,12 @@ export type OpenAIResponsesItemReference = {
|
||||
id: string
|
||||
}
|
||||
|
||||
export type OpenAIResponsesMcpApprovalResponse = {
|
||||
type: "mcp_approval_response"
|
||||
approval_request_id: string
|
||||
approve: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* A filter used to compare a specified attribute key to a given value using a defined comparison operation.
|
||||
*/
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
import {
|
||||
APICallError,
|
||||
type LanguageModelV2,
|
||||
type LanguageModelV2CallWarning,
|
||||
type LanguageModelV2Content,
|
||||
type LanguageModelV2FinishReason,
|
||||
type LanguageModelV2ProviderDefinedTool,
|
||||
type LanguageModelV2StreamPart,
|
||||
type LanguageModelV2Usage,
|
||||
type SharedV2ProviderMetadata,
|
||||
type JSONValue,
|
||||
type LanguageModelV3,
|
||||
type LanguageModelV3CallOptions,
|
||||
type LanguageModelV3Content,
|
||||
type LanguageModelV3ProviderTool,
|
||||
type LanguageModelV3StreamPart,
|
||||
type SharedV3ProviderMetadata,
|
||||
type SharedV3Warning,
|
||||
} from "@ai-sdk/provider"
|
||||
import {
|
||||
combineHeaders,
|
||||
@@ -128,8 +128,8 @@ const LOGPROBS_SCHEMA = z.array(
|
||||
}),
|
||||
)
|
||||
|
||||
export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
readonly specificationVersion = "v2"
|
||||
export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
|
||||
readonly specificationVersion = "v3"
|
||||
|
||||
readonly modelId: OpenAIResponsesModelId
|
||||
|
||||
@@ -163,34 +163,34 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
tools,
|
||||
toolChoice,
|
||||
responseFormat,
|
||||
}: Parameters<LanguageModelV2["doGenerate"]>[0]) {
|
||||
const warnings: LanguageModelV2CallWarning[] = []
|
||||
}: LanguageModelV3CallOptions) {
|
||||
const warnings: SharedV3Warning[] = []
|
||||
const modelConfig = getResponsesModelConfig(this.modelId)
|
||||
|
||||
if (topK != null) {
|
||||
warnings.push({ type: "unsupported-setting", setting: "topK" })
|
||||
warnings.push({ type: "unsupported", feature: "topK" })
|
||||
}
|
||||
|
||||
if (seed != null) {
|
||||
warnings.push({ type: "unsupported-setting", setting: "seed" })
|
||||
warnings.push({ type: "unsupported", feature: "seed" })
|
||||
}
|
||||
|
||||
if (presencePenalty != null) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "presencePenalty",
|
||||
type: "unsupported",
|
||||
feature: "presencePenalty",
|
||||
})
|
||||
}
|
||||
|
||||
if (frequencyPenalty != null) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "frequencyPenalty",
|
||||
type: "unsupported",
|
||||
feature: "frequencyPenalty",
|
||||
})
|
||||
}
|
||||
|
||||
if (stopSequences != null) {
|
||||
warnings.push({ type: "unsupported-setting", setting: "stopSequences" })
|
||||
warnings.push({ type: "unsupported", feature: "stopSequences" })
|
||||
}
|
||||
|
||||
const openaiOptions = await parseProviderOptions({
|
||||
@@ -218,7 +218,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
}
|
||||
|
||||
function hasOpenAITool(id: string) {
|
||||
return tools?.find((tool) => tool.type === "provider-defined" && tool.id === id) != null
|
||||
return tools?.find((tool) => tool.type === "provider" && tool.id === id) != null
|
||||
}
|
||||
|
||||
// when logprobs are requested, automatically include them:
|
||||
@@ -237,9 +237,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
const webSearchToolName = (
|
||||
tools?.find(
|
||||
(tool) =>
|
||||
tool.type === "provider-defined" &&
|
||||
(tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"),
|
||||
) as LanguageModelV2ProviderDefinedTool | undefined
|
||||
tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"),
|
||||
) as LanguageModelV3ProviderTool | undefined
|
||||
)?.name
|
||||
|
||||
if (webSearchToolName) {
|
||||
@@ -315,8 +314,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
if (baseArgs.temperature != null) {
|
||||
baseArgs.temperature = undefined
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "temperature",
|
||||
type: "unsupported",
|
||||
feature: "temperature",
|
||||
details: "temperature is not supported for reasoning models",
|
||||
})
|
||||
}
|
||||
@@ -324,24 +323,24 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
if (baseArgs.top_p != null) {
|
||||
baseArgs.top_p = undefined
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "topP",
|
||||
type: "unsupported",
|
||||
feature: "topP",
|
||||
details: "topP is not supported for reasoning models",
|
||||
})
|
||||
}
|
||||
} else {
|
||||
if (openaiOptions?.reasoningEffort != null) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "reasoningEffort",
|
||||
type: "unsupported",
|
||||
feature: "reasoningEffort",
|
||||
details: "reasoningEffort is not supported for non-reasoning models",
|
||||
})
|
||||
}
|
||||
|
||||
if (openaiOptions?.reasoningSummary != null) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "reasoningSummary",
|
||||
type: "unsupported",
|
||||
feature: "reasoningSummary",
|
||||
details: "reasoningSummary is not supported for non-reasoning models",
|
||||
})
|
||||
}
|
||||
@@ -350,8 +349,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
// Validate flex processing support
|
||||
if (openaiOptions?.serviceTier === "flex" && !modelConfig.supportsFlexProcessing) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "serviceTier",
|
||||
type: "unsupported",
|
||||
feature: "serviceTier",
|
||||
details: "flex processing is only available for o3, o4-mini, and gpt-5 models",
|
||||
})
|
||||
// Remove from args if not supported
|
||||
@@ -361,8 +360,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
// Validate priority processing support
|
||||
if (openaiOptions?.serviceTier === "priority" && !modelConfig.supportsPriorityProcessing) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "serviceTier",
|
||||
type: "unsupported",
|
||||
feature: "serviceTier",
|
||||
details:
|
||||
"priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported",
|
||||
})
|
||||
@@ -391,9 +390,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
}
|
||||
}
|
||||
|
||||
async doGenerate(
|
||||
options: Parameters<LanguageModelV2["doGenerate"]>[0],
|
||||
): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
|
||||
async doGenerate(options: LanguageModelV3CallOptions) {
|
||||
const { args: body, warnings, webSearchToolName } = await this.getArgs(options)
|
||||
const url = this.config.url({
|
||||
path: "/responses",
|
||||
@@ -508,7 +505,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
})
|
||||
}
|
||||
|
||||
const content: Array<LanguageModelV2Content> = []
|
||||
const content: Array<LanguageModelV3Content> = []
|
||||
const logprobs: Array<z.infer<typeof LOGPROBS_SCHEMA>> = []
|
||||
|
||||
// flag that checks if there have been client-side tool calls (not executed by openai)
|
||||
@@ -554,7 +551,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
result: {
|
||||
result: part.result,
|
||||
} satisfies z.infer<typeof imageGenerationOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
|
||||
break
|
||||
@@ -648,7 +644,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
toolCallId: part.id,
|
||||
toolName: webSearchToolName ?? "web_search",
|
||||
result: { status: part.status },
|
||||
providerExecuted: true,
|
||||
})
|
||||
|
||||
break
|
||||
@@ -671,7 +666,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
type: "computer_use_tool_result",
|
||||
status: part.status || "completed",
|
||||
},
|
||||
providerExecuted: true,
|
||||
})
|
||||
break
|
||||
}
|
||||
@@ -693,14 +687,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
queries: part.queries,
|
||||
results:
|
||||
part.results?.map((result) => ({
|
||||
attributes: result.attributes,
|
||||
attributes: result.attributes as Record<string, JSONValue>,
|
||||
fileId: result.file_id,
|
||||
filename: result.filename,
|
||||
score: result.score,
|
||||
text: result.text,
|
||||
})) ?? null,
|
||||
} satisfies z.infer<typeof fileSearchOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
break
|
||||
}
|
||||
@@ -724,14 +717,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
result: {
|
||||
outputs: part.outputs,
|
||||
} satisfies z.infer<typeof codeInterpreterOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const providerMetadata: SharedV2ProviderMetadata = {
|
||||
const providerMetadata: SharedV3ProviderMetadata = {
|
||||
openai: { responseId: response.id },
|
||||
}
|
||||
|
||||
@@ -745,16 +737,29 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
|
||||
return {
|
||||
content,
|
||||
finishReason: mapOpenAIResponseFinishReason({
|
||||
finishReason: response.incomplete_details?.reason,
|
||||
hasFunctionCall,
|
||||
}),
|
||||
finishReason: {
|
||||
unified: mapOpenAIResponseFinishReason({
|
||||
finishReason: response.incomplete_details?.reason,
|
||||
hasFunctionCall,
|
||||
}),
|
||||
raw: response.incomplete_details?.reason,
|
||||
},
|
||||
usage: {
|
||||
inputTokens: response.usage.input_tokens,
|
||||
outputTokens: response.usage.output_tokens,
|
||||
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
||||
reasoningTokens: response.usage.output_tokens_details?.reasoning_tokens ?? undefined,
|
||||
cachedInputTokens: response.usage.input_tokens_details?.cached_tokens ?? undefined,
|
||||
inputTokens: {
|
||||
total: response.usage.input_tokens,
|
||||
noCache:
|
||||
response.usage.input_tokens_details?.cached_tokens != null
|
||||
? response.usage.input_tokens - response.usage.input_tokens_details.cached_tokens
|
||||
: undefined,
|
||||
cacheRead: response.usage.input_tokens_details?.cached_tokens ?? undefined,
|
||||
cacheWrite: undefined,
|
||||
},
|
||||
outputTokens: {
|
||||
total: response.usage.output_tokens,
|
||||
text: undefined,
|
||||
reasoning: response.usage.output_tokens_details?.reasoning_tokens ?? undefined,
|
||||
},
|
||||
raw: response.usage,
|
||||
},
|
||||
request: { body },
|
||||
response: {
|
||||
@@ -769,9 +774,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
}
|
||||
}
|
||||
|
||||
async doStream(
|
||||
options: Parameters<LanguageModelV2["doStream"]>[0],
|
||||
): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
|
||||
async doStream(options: LanguageModelV3CallOptions) {
|
||||
const { args: body, warnings, webSearchToolName } = await this.getArgs(options)
|
||||
|
||||
const { responseHeaders, value: response } = await postJsonToApi({
|
||||
@@ -792,11 +795,25 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
|
||||
const self = this
|
||||
|
||||
let finishReason: LanguageModelV2FinishReason = "unknown"
|
||||
const usage: LanguageModelV2Usage = {
|
||||
let finishReason: {
|
||||
unified: ReturnType<typeof mapOpenAIResponseFinishReason>
|
||||
raw: string | undefined
|
||||
} = {
|
||||
unified: "other",
|
||||
raw: undefined,
|
||||
}
|
||||
const usage: {
|
||||
inputTokens: number | undefined
|
||||
outputTokens: number | undefined
|
||||
totalTokens: number | undefined
|
||||
reasoningTokens: number | undefined
|
||||
cachedInputTokens: number | undefined
|
||||
} = {
|
||||
inputTokens: undefined,
|
||||
outputTokens: undefined,
|
||||
totalTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
cachedInputTokens: undefined,
|
||||
}
|
||||
const logprobs: Array<z.infer<typeof LOGPROBS_SCHEMA>> = []
|
||||
let responseId: string | null = null
|
||||
@@ -837,7 +854,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
|
||||
return {
|
||||
stream: response.pipeThrough(
|
||||
new TransformStream<ParseResult<z.infer<typeof openaiResponsesChunkSchema>>, LanguageModelV2StreamPart>({
|
||||
new TransformStream<ParseResult<z.infer<typeof openaiResponsesChunkSchema>>, LanguageModelV3StreamPart>({
|
||||
start(controller) {
|
||||
controller.enqueue({ type: "stream-start", warnings })
|
||||
},
|
||||
@@ -849,7 +866,10 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
|
||||
// handle failed chunk parsing / validation:
|
||||
if (!chunk.success) {
|
||||
finishReason = "error"
|
||||
finishReason = {
|
||||
unified: "error",
|
||||
raw: undefined,
|
||||
}
|
||||
controller.enqueue({ type: "error", error: chunk.error })
|
||||
return
|
||||
}
|
||||
@@ -999,7 +1019,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
toolCallId: value.item.id,
|
||||
toolName: "web_search",
|
||||
result: { status: value.item.status },
|
||||
providerExecuted: true,
|
||||
})
|
||||
} else if (value.item.type === "computer_call") {
|
||||
ongoingToolCalls[value.output_index] = undefined
|
||||
@@ -1025,7 +1044,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
type: "computer_use_tool_result",
|
||||
status: value.item.status || "completed",
|
||||
},
|
||||
providerExecuted: true,
|
||||
})
|
||||
} else if (value.item.type === "file_search_call") {
|
||||
ongoingToolCalls[value.output_index] = undefined
|
||||
@@ -1038,14 +1056,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
queries: value.item.queries,
|
||||
results:
|
||||
value.item.results?.map((result) => ({
|
||||
attributes: result.attributes,
|
||||
attributes: result.attributes as Record<string, JSONValue>,
|
||||
fileId: result.file_id,
|
||||
filename: result.filename,
|
||||
score: result.score,
|
||||
text: result.text,
|
||||
})) ?? null,
|
||||
} satisfies z.infer<typeof fileSearchOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
} else if (value.item.type === "code_interpreter_call") {
|
||||
ongoingToolCalls[value.output_index] = undefined
|
||||
@@ -1057,7 +1074,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
result: {
|
||||
outputs: value.item.outputs,
|
||||
} satisfies z.infer<typeof codeInterpreterOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
} else if (value.item.type === "image_generation_call") {
|
||||
controller.enqueue({
|
||||
@@ -1067,7 +1083,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
result: {
|
||||
result: value.item.result,
|
||||
} satisfies z.infer<typeof imageGenerationOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
} else if (value.item.type === "local_shell_call") {
|
||||
ongoingToolCalls[value.output_index] = undefined
|
||||
@@ -1137,7 +1152,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
result: {
|
||||
result: value.partial_image_b64,
|
||||
} satisfies z.infer<typeof imageGenerationOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
} else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
|
||||
const toolCall = ongoingToolCalls[value.output_index]
|
||||
@@ -1244,10 +1258,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
})
|
||||
}
|
||||
} else if (isResponseFinishedChunk(value)) {
|
||||
finishReason = mapOpenAIResponseFinishReason({
|
||||
finishReason: value.response.incomplete_details?.reason,
|
||||
hasFunctionCall,
|
||||
})
|
||||
finishReason = {
|
||||
unified: mapOpenAIResponseFinishReason({
|
||||
finishReason: value.response.incomplete_details?.reason,
|
||||
hasFunctionCall,
|
||||
}),
|
||||
raw: value.response.incomplete_details?.reason ?? undefined,
|
||||
}
|
||||
usage.inputTokens = value.response.usage.input_tokens
|
||||
usage.outputTokens = value.response.usage.output_tokens
|
||||
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens
|
||||
@@ -1287,7 +1304,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
currentTextId = null
|
||||
}
|
||||
|
||||
const providerMetadata: SharedV2ProviderMetadata = {
|
||||
const providerMetadata: SharedV3ProviderMetadata = {
|
||||
openai: {
|
||||
responseId,
|
||||
},
|
||||
@@ -1304,7 +1321,27 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
controller.enqueue({
|
||||
type: "finish",
|
||||
finishReason,
|
||||
usage,
|
||||
usage: {
|
||||
inputTokens: {
|
||||
total: usage.inputTokens,
|
||||
noCache:
|
||||
usage.inputTokens != null && usage.cachedInputTokens != null
|
||||
? usage.inputTokens - usage.cachedInputTokens
|
||||
: undefined,
|
||||
cacheRead: usage.cachedInputTokens,
|
||||
cacheWrite: undefined,
|
||||
},
|
||||
outputTokens: {
|
||||
total: usage.outputTokens,
|
||||
text: undefined,
|
||||
reasoning: usage.reasoningTokens,
|
||||
},
|
||||
raw: {
|
||||
input_tokens: usage.inputTokens,
|
||||
output_tokens: usage.outputTokens,
|
||||
total_tokens: usage.totalTokens,
|
||||
},
|
||||
},
|
||||
providerMetadata,
|
||||
})
|
||||
},
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
import {
|
||||
type LanguageModelV2CallOptions,
|
||||
type LanguageModelV2CallWarning,
|
||||
UnsupportedFunctionalityError,
|
||||
} from "@ai-sdk/provider"
|
||||
import { type LanguageModelV3CallOptions, type SharedV3Warning, UnsupportedFunctionalityError } from "@ai-sdk/provider"
|
||||
import { codeInterpreterArgsSchema } from "./tool/code-interpreter"
|
||||
import { fileSearchArgsSchema } from "./tool/file-search"
|
||||
import { webSearchArgsSchema } from "./tool/web-search"
|
||||
@@ -15,8 +11,8 @@ export function prepareResponsesTools({
|
||||
toolChoice,
|
||||
strictJsonSchema,
|
||||
}: {
|
||||
tools: LanguageModelV2CallOptions["tools"]
|
||||
toolChoice?: LanguageModelV2CallOptions["toolChoice"]
|
||||
tools: LanguageModelV3CallOptions["tools"]
|
||||
toolChoice?: LanguageModelV3CallOptions["toolChoice"]
|
||||
strictJsonSchema: boolean
|
||||
}): {
|
||||
tools?: Array<OpenAIResponsesTool>
|
||||
@@ -30,12 +26,12 @@ export function prepareResponsesTools({
|
||||
| { type: "function"; name: string }
|
||||
| { type: "code_interpreter" }
|
||||
| { type: "image_generation" }
|
||||
toolWarnings: LanguageModelV2CallWarning[]
|
||||
toolWarnings: SharedV3Warning[]
|
||||
} {
|
||||
// when the tools array is empty, change it to undefined to prevent errors:
|
||||
tools = tools?.length ? tools : undefined
|
||||
|
||||
const toolWarnings: LanguageModelV2CallWarning[] = []
|
||||
const toolWarnings: SharedV3Warning[] = []
|
||||
|
||||
if (tools == null) {
|
||||
return { tools: undefined, toolChoice: undefined, toolWarnings }
|
||||
@@ -54,7 +50,7 @@ export function prepareResponsesTools({
|
||||
strict: strictJsonSchema,
|
||||
})
|
||||
break
|
||||
case "provider-defined": {
|
||||
case "provider": {
|
||||
switch (tool.id) {
|
||||
case "openai.file_search": {
|
||||
const args = fileSearchArgsSchema.parse(tool.args)
|
||||
@@ -138,7 +134,7 @@ export function prepareResponsesTools({
|
||||
break
|
||||
}
|
||||
default:
|
||||
toolWarnings.push({ type: "unsupported-tool", tool })
|
||||
toolWarnings.push({ type: "unsupported", feature: "tool type" })
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { OpenAICompatibleChatLanguageModel } from "@/provider/sdk/copilot/chat/openai-compatible-chat-language-model"
|
||||
import { describe, test, expect, mock } from "bun:test"
|
||||
import type { LanguageModelV2Prompt } from "@ai-sdk/provider"
|
||||
import type { LanguageModelV3Prompt } from "@ai-sdk/provider"
|
||||
|
||||
async function convertReadableStreamToArray<T>(stream: ReadableStream<T>): Promise<T[]> {
|
||||
const reader = stream.getReader()
|
||||
@@ -13,7 +13,7 @@ async function convertReadableStreamToArray<T>(stream: ReadableStream<T>): Promi
|
||||
return result
|
||||
}
|
||||
|
||||
const TEST_PROMPT: LanguageModelV2Prompt = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
|
||||
const TEST_PROMPT: LanguageModelV3Prompt = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
|
||||
|
||||
// Fixtures from copilot_test.exs
|
||||
const FIXTURES = {
|
||||
@@ -123,7 +123,7 @@ describe("doStream", () => {
|
||||
{ type: "text-delta", id: "txt-0", delta: " world" },
|
||||
{ type: "text-delta", id: "txt-0", delta: "!" },
|
||||
{ type: "text-end", id: "txt-0" },
|
||||
{ type: "finish", finishReason: "stop" },
|
||||
{ type: "finish", finishReason: { unified: "stop" } },
|
||||
])
|
||||
})
|
||||
|
||||
@@ -201,10 +201,10 @@ describe("doStream", () => {
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
finishReason: { unified: "tool-calls" },
|
||||
usage: {
|
||||
inputTokens: 19581,
|
||||
outputTokens: 53,
|
||||
inputTokens: { total: 19581 },
|
||||
outputTokens: { total: 53 },
|
||||
},
|
||||
})
|
||||
})
|
||||
@@ -256,10 +256,10 @@ describe("doStream", () => {
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "stop",
|
||||
finishReason: { unified: "stop" },
|
||||
usage: {
|
||||
inputTokens: 5778,
|
||||
outputTokens: 59,
|
||||
inputTokens: { total: 5778 },
|
||||
outputTokens: { total: 59 },
|
||||
},
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
@@ -315,7 +315,7 @@ describe("doStream", () => {
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "stop",
|
||||
finishReason: { unified: "stop" },
|
||||
})
|
||||
})
|
||||
|
||||
@@ -388,10 +388,10 @@ describe("doStream", () => {
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
finishReason: { unified: "tool-calls" },
|
||||
usage: {
|
||||
inputTokens: 3767,
|
||||
outputTokens: 19,
|
||||
inputTokens: { total: 3767 },
|
||||
outputTokens: { total: 19 },
|
||||
},
|
||||
})
|
||||
})
|
||||
@@ -449,7 +449,7 @@ describe("doStream", () => {
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
finishReason: { unified: "tool-calls" },
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user