mirror of
https://github.com/anomalyco/opencode.git
synced 2026-02-01 22:48:16 +00:00
get codex working in copilot (#4914)
Co-authored-by: OpeOginni <107570612+OpeOginni@users.noreply.github.com> Co-authored-by: GitHub Action <action@github.com>
This commit is contained in:
16
bun.lock
16
bun.lock
@@ -224,6 +224,8 @@
|
||||
"@ai-sdk/mcp": "0.0.8",
|
||||
"@ai-sdk/openai": "2.0.71",
|
||||
"@ai-sdk/openai-compatible": "1.0.27",
|
||||
"@ai-sdk/provider": "2.0.0",
|
||||
"@ai-sdk/provider-utils": "3.0.18",
|
||||
"@clack/prompts": "1.0.0-alpha.1",
|
||||
"@hono/standard-validator": "0.1.5",
|
||||
"@hono/zod-validator": "catalog:",
|
||||
@@ -503,7 +505,7 @@
|
||||
|
||||
"@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="],
|
||||
|
||||
"@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="],
|
||||
"@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.18", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ypv1xXMsgGcNKUP+hglKqtdDuMg68nWHucPPAhIENrbFAI+xCHiqPVN8Zllxyv1TNZwGWUghPxJXU+Mqps0YRQ=="],
|
||||
|
||||
"@alloc/quick-lru": ["@alloc/quick-lru@5.2.0", "", {}, "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw=="],
|
||||
|
||||
@@ -3737,20 +3739,22 @@
|
||||
|
||||
"@ai-sdk/amazon-bedrock/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="],
|
||||
|
||||
"@ai-sdk/anthropic/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="],
|
||||
|
||||
"@ai-sdk/azure/@ai-sdk/openai": ["@ai-sdk/openai@2.0.71", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.17" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-tg+gj+R0z/On9P4V7hy7/7o04cQPjKGayMCL3gzWD/aNGjAKkhEnaocuNDidSnghizt8g2zJn16cAuAolnW+qQ=="],
|
||||
|
||||
"@ai-sdk/azure/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="],
|
||||
|
||||
"@ai-sdk/gateway/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="],
|
||||
|
||||
"@ai-sdk/google/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.18", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ypv1xXMsgGcNKUP+hglKqtdDuMg68nWHucPPAhIENrbFAI+xCHiqPVN8Zllxyv1TNZwGWUghPxJXU+Mqps0YRQ=="],
|
||||
|
||||
"@ai-sdk/google-vertex/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.50", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.18" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-21PaHfoLmouOXXNINTsZJsMw+wE5oLR2He/1kq/sKokTVKyq7ObGT1LDk6ahwxaz/GoaNaGankMh+EgVcdv2Cw=="],
|
||||
|
||||
"@ai-sdk/google-vertex/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.18", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ypv1xXMsgGcNKUP+hglKqtdDuMg68nWHucPPAhIENrbFAI+xCHiqPVN8Zllxyv1TNZwGWUghPxJXU+Mqps0YRQ=="],
|
||||
|
||||
"@ai-sdk/mcp/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="],
|
||||
|
||||
"@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="],
|
||||
|
||||
"@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="],
|
||||
|
||||
"@astrojs/cloudflare/vite": ["vite@6.4.1", "", { "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.4.4", "picomatch": "^4.0.2", "postcss": "^8.5.3", "rollup": "^4.34.9", "tinyglobby": "^0.2.13" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "peerDependencies": { "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", "jiti": ">=1.21.0", "less": "*", "lightningcss": "^1.21.0", "sass": "*", "sass-embedded": "*", "stylus": "*", "sugarss": "*", "terser": "^5.16.0", "tsx": "^4.8.1", "yaml": "^2.4.2" }, "optionalPeers": ["@types/node", "jiti", "less", "lightningcss", "sass", "sass-embedded", "stylus", "sugarss", "terser", "tsx", "yaml"], "bin": { "vite": "bin/vite.js" } }, "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g=="],
|
||||
|
||||
"@astrojs/markdown-remark/@astrojs/internal-helpers": ["@astrojs/internal-helpers@0.6.1", "", {}, "sha512-l5Pqf6uZu31aG+3Lv8nl/3s4DbUzdlxTWDof4pEpto6GUJNhhCbelVi9dEyurOVyqaelwmS9oSyOWOENSfgo9A=="],
|
||||
@@ -4619,8 +4623,6 @@
|
||||
|
||||
"jsonwebtoken/jws/jwa": ["jwa@1.4.2", "", { "dependencies": { "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw=="],
|
||||
|
||||
"opencode/@ai-sdk/anthropic/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.18", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ypv1xXMsgGcNKUP+hglKqtdDuMg68nWHucPPAhIENrbFAI+xCHiqPVN8Zllxyv1TNZwGWUghPxJXU+Mqps0YRQ=="],
|
||||
|
||||
"opencode/@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="],
|
||||
|
||||
"opencode/@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="],
|
||||
|
||||
6
flake.lock
generated
6
flake.lock
generated
@@ -2,11 +2,11 @@
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1764384123,
|
||||
"narHash": "sha256-UoliURDJFaOolycBZYrjzd9Cc66zULEyHqGFH3QHEq0=",
|
||||
"lastModified": 1764445028,
|
||||
"narHash": "sha256-ik6H/0Zl+qHYDKTXFPpzuVHSZE+uvVz2XQuQd1IVXzo=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "59b6c96beacc898566c9be1052ae806f3835f87d",
|
||||
"rev": "a09378c0108815dbf3961a0e085936f4146ec415",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{
|
||||
"nodeModules": "sha256-+PJZG5jNxBGkxblpnNa4lvfBi9YEvHaGQRE0+avNwHY="
|
||||
"nodeModules": "sha256-jLrT8GVq0Fh34tN1MPgJpPKd9SGhOauaBl8f1oZ/XgI="
|
||||
}
|
||||
|
||||
@@ -50,6 +50,8 @@
|
||||
"@ai-sdk/mcp": "0.0.8",
|
||||
"@ai-sdk/openai": "2.0.71",
|
||||
"@ai-sdk/openai-compatible": "1.0.27",
|
||||
"@ai-sdk/provider": "2.0.0",
|
||||
"@ai-sdk/provider-utils": "3.0.18",
|
||||
"@clack/prompts": "1.0.0-alpha.1",
|
||||
"@hono/standard-validator": "0.1.5",
|
||||
"@hono/zod-validator": "catalog:",
|
||||
|
||||
@@ -843,6 +843,7 @@ export function Prompt(props: PromptProps) {
|
||||
justifyContent={status().type === "retry" ? "space-between" : "flex-start"}
|
||||
>
|
||||
<box flexShrink={0} flexDirection="row" gap={1}>
|
||||
{/* @ts-ignore // SpinnerOptions doesn't support marginLeft */}
|
||||
<spinner marginLeft={1} color={spinnerDef().color} frames={spinnerDef().frames} interval={40} />
|
||||
<box flexDirection="row" gap={1} flexShrink={0}>
|
||||
{(() => {
|
||||
|
||||
@@ -23,6 +23,7 @@ import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic"
|
||||
import { createOpenAI } from "@ai-sdk/openai"
|
||||
import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
|
||||
import { createOpenRouter } from "@openrouter/ai-sdk-provider"
|
||||
import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/openai-compatible/src"
|
||||
|
||||
export namespace Provider {
|
||||
const log = Log.create({ service: "provider" })
|
||||
@@ -37,6 +38,8 @@ export namespace Provider {
|
||||
"@ai-sdk/openai": createOpenAI,
|
||||
"@ai-sdk/openai-compatible": createOpenAICompatible,
|
||||
"@openrouter/ai-sdk-provider": createOpenRouter,
|
||||
// @ts-ignore (TODO: kill this code so we dont have to maintain it)
|
||||
"@ai-sdk/github-copilot": createGitHubCopilotOpenAICompatible,
|
||||
}
|
||||
|
||||
type CustomLoader = (provider: ModelsDev.Provider) => Promise<{
|
||||
@@ -87,6 +90,30 @@ export namespace Provider {
|
||||
options: {},
|
||||
}
|
||||
},
|
||||
"github-copilot": async () => {
|
||||
return {
|
||||
autoload: false,
|
||||
async getModel(sdk: any, modelID: string, _options?: Record<string, any>) {
|
||||
if (modelID.includes("gpt-5")) {
|
||||
return sdk.responses(modelID)
|
||||
}
|
||||
return sdk.chat(modelID)
|
||||
},
|
||||
options: {},
|
||||
}
|
||||
},
|
||||
"github-copilot-enterprise": async () => {
|
||||
return {
|
||||
autoload: false,
|
||||
async getModel(sdk: any, modelID: string, _options?: Record<string, any>) {
|
||||
if (modelID.includes("gpt-5")) {
|
||||
return sdk.responses(modelID)
|
||||
}
|
||||
return sdk.chat(modelID)
|
||||
},
|
||||
options: {},
|
||||
}
|
||||
},
|
||||
azure: async () => {
|
||||
return {
|
||||
autoload: false,
|
||||
@@ -428,15 +455,6 @@ export namespace Provider {
|
||||
}
|
||||
}
|
||||
|
||||
// load custom
|
||||
for (const [providerID, fn] of Object.entries(CUSTOM_LOADERS)) {
|
||||
if (disabled.has(providerID)) continue
|
||||
const result = await fn(database[providerID])
|
||||
if (result && (result.autoload || providers[providerID])) {
|
||||
mergeProvider(providerID, result.options ?? {}, "custom", result.getModel)
|
||||
}
|
||||
}
|
||||
|
||||
for (const plugin of await Plugin.list()) {
|
||||
if (!plugin.auth) continue
|
||||
const providerID = plugin.auth.provider
|
||||
@@ -478,6 +496,14 @@ export namespace Provider {
|
||||
}
|
||||
}
|
||||
|
||||
for (const [providerID, fn] of Object.entries(CUSTOM_LOADERS)) {
|
||||
if (disabled.has(providerID)) continue
|
||||
const result = await fn(database[providerID])
|
||||
if (result && (result.autoload || providers[providerID])) {
|
||||
mergeProvider(providerID, result.options ?? {}, "custom", result.getModel)
|
||||
}
|
||||
}
|
||||
|
||||
// load config
|
||||
for (const [providerID, provider] of configProviders) {
|
||||
mergeProvider(providerID, provider.options ?? {}, "config")
|
||||
@@ -489,6 +515,10 @@ export namespace Provider {
|
||||
continue
|
||||
}
|
||||
|
||||
if (providerID === "github-copilot") {
|
||||
provider.info.npm = "@ai-sdk/github-copilot"
|
||||
}
|
||||
|
||||
const configProvider = config.provider?.[providerID]
|
||||
const filteredModels = Object.fromEntries(
|
||||
Object.entries(provider.info.models)
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
This is a temporary package used primarily for github copilot compatibility.
|
||||
|
||||
Avoid making changes to these files unless you want to only affect Copilot provider.
|
||||
|
||||
Also this should ONLY be used for Copilot provider.
|
||||
@@ -0,0 +1,2 @@
|
||||
export { createOpenaiCompatible, openaiCompatible } from "./openai-compatible-provider"
|
||||
export type { OpenaiCompatibleProvider, OpenaiCompatibleProviderSettings } from "./openai-compatible-provider"
|
||||
@@ -0,0 +1,100 @@
|
||||
import type { LanguageModelV2 } from "@ai-sdk/provider"
|
||||
import { OpenAICompatibleChatLanguageModel } from "@ai-sdk/openai-compatible"
|
||||
import { type FetchFunction, withoutTrailingSlash, withUserAgentSuffix } from "@ai-sdk/provider-utils"
|
||||
import { OpenAIResponsesLanguageModel } from "./responses/openai-responses-language-model"
|
||||
|
||||
// Import the version or define it
|
||||
const VERSION = "0.1.0"
|
||||
|
||||
export type OpenaiCompatibleModelId = string
|
||||
|
||||
export interface OpenaiCompatibleProviderSettings {
|
||||
/**
|
||||
* API key for authenticating requests.
|
||||
*/
|
||||
apiKey?: string
|
||||
|
||||
/**
|
||||
* Base URL for the OpenAI Compatible API calls.
|
||||
*/
|
||||
baseURL?: string
|
||||
|
||||
/**
|
||||
* Name of the provider.
|
||||
*/
|
||||
name?: string
|
||||
|
||||
/**
|
||||
* Custom headers to include in the requests.
|
||||
*/
|
||||
headers?: Record<string, string>
|
||||
|
||||
/**
|
||||
* Custom fetch implementation.
|
||||
*/
|
||||
fetch?: FetchFunction
|
||||
}
|
||||
|
||||
export interface OpenaiCompatibleProvider {
|
||||
(modelId: OpenaiCompatibleModelId): LanguageModelV2
|
||||
chat(modelId: OpenaiCompatibleModelId): LanguageModelV2
|
||||
responses(modelId: OpenaiCompatibleModelId): LanguageModelV2
|
||||
languageModel(modelId: OpenaiCompatibleModelId): LanguageModelV2
|
||||
|
||||
// embeddingModel(modelId: any): EmbeddingModelV2
|
||||
|
||||
// imageModel(modelId: any): ImageModelV2
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an OpenAI Compatible provider instance.
|
||||
*/
|
||||
export function createOpenaiCompatible(options: OpenaiCompatibleProviderSettings = {}): OpenaiCompatibleProvider {
|
||||
const baseURL = withoutTrailingSlash(options.baseURL ?? "https://api.openai.com/v1")
|
||||
|
||||
if (!baseURL) {
|
||||
throw new Error("baseURL is required")
|
||||
}
|
||||
|
||||
// Merge headers: defaults first, then user overrides
|
||||
const headers = {
|
||||
// Default OpenAI Compatible headers (can be overridden by user)
|
||||
...(options.apiKey && { Authorization: `Bearer ${options.apiKey}` }),
|
||||
...options.headers,
|
||||
}
|
||||
|
||||
const getHeaders = () => withUserAgentSuffix(headers, `ai-sdk/openai-compatible/${VERSION}`)
|
||||
|
||||
const createChatModel = (modelId: OpenaiCompatibleModelId) => {
|
||||
return new OpenAICompatibleChatLanguageModel(modelId, {
|
||||
provider: `${options.name ?? "openai-compatible"}.chat`,
|
||||
headers: getHeaders,
|
||||
url: ({ path }) => `${baseURL}${path}`,
|
||||
fetch: options.fetch,
|
||||
})
|
||||
}
|
||||
|
||||
const createResponsesModel = (modelId: OpenaiCompatibleModelId) => {
|
||||
return new OpenAIResponsesLanguageModel(modelId, {
|
||||
provider: `${options.name ?? "openai-compatible"}.responses`,
|
||||
headers: getHeaders,
|
||||
url: ({ path }) => `${baseURL}${path}`,
|
||||
fetch: options.fetch,
|
||||
})
|
||||
}
|
||||
|
||||
const createLanguageModel = (modelId: OpenaiCompatibleModelId) => createChatModel(modelId)
|
||||
|
||||
const provider = function (modelId: OpenaiCompatibleModelId) {
|
||||
return createChatModel(modelId)
|
||||
}
|
||||
|
||||
provider.languageModel = createLanguageModel
|
||||
provider.chat = createChatModel
|
||||
provider.responses = createResponsesModel
|
||||
|
||||
return provider as OpenaiCompatibleProvider
|
||||
}
|
||||
|
||||
// Default OpenAI Compatible provider instance
|
||||
export const openaiCompatible = createOpenaiCompatible()
|
||||
@@ -0,0 +1,303 @@
|
||||
import {
|
||||
type LanguageModelV2CallWarning,
|
||||
type LanguageModelV2Prompt,
|
||||
type LanguageModelV2ToolCallPart,
|
||||
UnsupportedFunctionalityError,
|
||||
} from "@ai-sdk/provider"
|
||||
import { convertToBase64, parseProviderOptions } from "@ai-sdk/provider-utils"
|
||||
import { z } from "zod/v4"
|
||||
import type { OpenAIResponsesInput, OpenAIResponsesReasoning } from "./openai-responses-api-types"
|
||||
import { localShellInputSchema, localShellOutputSchema } from "./tool/local-shell"
|
||||
|
||||
/**
|
||||
* Check if a string is a file ID based on the given prefixes
|
||||
* Returns false if prefixes is undefined (disables file ID detection)
|
||||
*/
|
||||
function isFileId(data: string, prefixes?: readonly string[]): boolean {
|
||||
if (!prefixes) return false
|
||||
return prefixes.some((prefix) => data.startsWith(prefix))
|
||||
}
|
||||
|
||||
export async function convertToOpenAIResponsesInput({
|
||||
prompt,
|
||||
systemMessageMode,
|
||||
fileIdPrefixes,
|
||||
store,
|
||||
hasLocalShellTool = false,
|
||||
}: {
|
||||
prompt: LanguageModelV2Prompt
|
||||
systemMessageMode: "system" | "developer" | "remove"
|
||||
fileIdPrefixes?: readonly string[]
|
||||
store: boolean
|
||||
hasLocalShellTool?: boolean
|
||||
}): Promise<{
|
||||
input: OpenAIResponsesInput
|
||||
warnings: Array<LanguageModelV2CallWarning>
|
||||
}> {
|
||||
const input: OpenAIResponsesInput = []
|
||||
const warnings: Array<LanguageModelV2CallWarning> = []
|
||||
|
||||
for (const { role, content } of prompt) {
|
||||
switch (role) {
|
||||
case "system": {
|
||||
switch (systemMessageMode) {
|
||||
case "system": {
|
||||
input.push({ role: "system", content })
|
||||
break
|
||||
}
|
||||
case "developer": {
|
||||
input.push({ role: "developer", content })
|
||||
break
|
||||
}
|
||||
case "remove": {
|
||||
warnings.push({
|
||||
type: "other",
|
||||
message: "system messages are removed for this model",
|
||||
})
|
||||
break
|
||||
}
|
||||
default: {
|
||||
const _exhaustiveCheck: never = systemMessageMode
|
||||
throw new Error(`Unsupported system message mode: ${_exhaustiveCheck}`)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case "user": {
|
||||
input.push({
|
||||
role: "user",
|
||||
content: content.map((part, index) => {
|
||||
switch (part.type) {
|
||||
case "text": {
|
||||
return { type: "input_text", text: part.text }
|
||||
}
|
||||
case "file": {
|
||||
if (part.mediaType.startsWith("image/")) {
|
||||
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType
|
||||
|
||||
return {
|
||||
type: "input_image",
|
||||
...(part.data instanceof URL
|
||||
? { image_url: part.data.toString() }
|
||||
: typeof part.data === "string" && isFileId(part.data, fileIdPrefixes)
|
||||
? { file_id: part.data }
|
||||
: {
|
||||
image_url: `data:${mediaType};base64,${convertToBase64(part.data)}`,
|
||||
}),
|
||||
detail: part.providerOptions?.openai?.imageDetail,
|
||||
}
|
||||
} else if (part.mediaType === "application/pdf") {
|
||||
if (part.data instanceof URL) {
|
||||
return {
|
||||
type: "input_file",
|
||||
file_url: part.data.toString(),
|
||||
}
|
||||
}
|
||||
return {
|
||||
type: "input_file",
|
||||
...(typeof part.data === "string" && isFileId(part.data, fileIdPrefixes)
|
||||
? { file_id: part.data }
|
||||
: {
|
||||
filename: part.filename ?? `part-${index}.pdf`,
|
||||
file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`,
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
throw new UnsupportedFunctionalityError({
|
||||
functionality: `file part media type ${part.mediaType}`,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}),
|
||||
})
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
case "assistant": {
|
||||
const reasoningMessages: Record<string, OpenAIResponsesReasoning> = {}
|
||||
const toolCallParts: Record<string, LanguageModelV2ToolCallPart> = {}
|
||||
|
||||
for (const part of content) {
|
||||
switch (part.type) {
|
||||
case "text": {
|
||||
input.push({
|
||||
role: "assistant",
|
||||
content: [{ type: "output_text", text: part.text }],
|
||||
id: (part.providerOptions?.openai?.itemId as string) ?? undefined,
|
||||
})
|
||||
break
|
||||
}
|
||||
case "tool-call": {
|
||||
toolCallParts[part.toolCallId] = part
|
||||
|
||||
if (part.providerExecuted) {
|
||||
break
|
||||
}
|
||||
|
||||
if (hasLocalShellTool && part.toolName === "local_shell") {
|
||||
const parsedInput = localShellInputSchema.parse(part.input)
|
||||
input.push({
|
||||
type: "local_shell_call",
|
||||
call_id: part.toolCallId,
|
||||
id: (part.providerOptions?.openai?.itemId as string) ?? undefined,
|
||||
action: {
|
||||
type: "exec",
|
||||
command: parsedInput.action.command,
|
||||
timeout_ms: parsedInput.action.timeoutMs,
|
||||
user: parsedInput.action.user,
|
||||
working_directory: parsedInput.action.workingDirectory,
|
||||
env: parsedInput.action.env,
|
||||
},
|
||||
})
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
input.push({
|
||||
type: "function_call",
|
||||
call_id: part.toolCallId,
|
||||
name: part.toolName,
|
||||
arguments: JSON.stringify(part.input),
|
||||
id: (part.providerOptions?.openai?.itemId as string) ?? undefined,
|
||||
})
|
||||
break
|
||||
}
|
||||
|
||||
// assistant tool result parts are from provider-executed tools:
|
||||
case "tool-result": {
|
||||
if (store) {
|
||||
// use item references to refer to tool results from built-in tools
|
||||
input.push({ type: "item_reference", id: part.toolCallId })
|
||||
} else {
|
||||
warnings.push({
|
||||
type: "other",
|
||||
message: `Results for OpenAI tool ${part.toolName} are not sent to the API when store is false`,
|
||||
})
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
case "reasoning": {
|
||||
const providerOptions = await parseProviderOptions({
|
||||
provider: "openai",
|
||||
providerOptions: part.providerOptions,
|
||||
schema: openaiResponsesReasoningProviderOptionsSchema,
|
||||
})
|
||||
|
||||
const reasoningId = providerOptions?.itemId
|
||||
|
||||
if (reasoningId != null) {
|
||||
const reasoningMessage = reasoningMessages[reasoningId]
|
||||
|
||||
if (store) {
|
||||
if (reasoningMessage === undefined) {
|
||||
// use item references to refer to reasoning (single reference)
|
||||
input.push({ type: "item_reference", id: reasoningId })
|
||||
|
||||
// store unused reasoning message to mark id as used
|
||||
reasoningMessages[reasoningId] = {
|
||||
type: "reasoning",
|
||||
id: reasoningId,
|
||||
summary: [],
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const summaryParts: Array<{
|
||||
type: "summary_text"
|
||||
text: string
|
||||
}> = []
|
||||
|
||||
if (part.text.length > 0) {
|
||||
summaryParts.push({
|
||||
type: "summary_text",
|
||||
text: part.text,
|
||||
})
|
||||
} else if (reasoningMessage !== undefined) {
|
||||
warnings.push({
|
||||
type: "other",
|
||||
message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`,
|
||||
})
|
||||
}
|
||||
|
||||
if (reasoningMessage === undefined) {
|
||||
reasoningMessages[reasoningId] = {
|
||||
type: "reasoning",
|
||||
id: reasoningId,
|
||||
encrypted_content: providerOptions?.reasoningEncryptedContent,
|
||||
summary: summaryParts,
|
||||
}
|
||||
input.push(reasoningMessages[reasoningId])
|
||||
} else {
|
||||
reasoningMessage.summary.push(...summaryParts)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warnings.push({
|
||||
type: "other",
|
||||
message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`,
|
||||
})
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
case "tool": {
|
||||
for (const part of content) {
|
||||
const output = part.output
|
||||
|
||||
if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
|
||||
input.push({
|
||||
type: "local_shell_call_output",
|
||||
call_id: part.toolCallId,
|
||||
output: localShellOutputSchema.parse(output.value).output,
|
||||
})
|
||||
break
|
||||
}
|
||||
|
||||
let contentValue: string
|
||||
switch (output.type) {
|
||||
case "text":
|
||||
case "error-text":
|
||||
contentValue = output.value
|
||||
break
|
||||
case "content":
|
||||
case "json":
|
||||
case "error-json":
|
||||
contentValue = JSON.stringify(output.value)
|
||||
break
|
||||
}
|
||||
|
||||
input.push({
|
||||
type: "function_call_output",
|
||||
call_id: part.toolCallId,
|
||||
output: contentValue,
|
||||
})
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
default: {
|
||||
const _exhaustiveCheck: never = role
|
||||
throw new Error(`Unsupported role: ${_exhaustiveCheck}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { input, warnings }
|
||||
}
|
||||
|
||||
const openaiResponsesReasoningProviderOptionsSchema = z.object({
|
||||
itemId: z.string().nullish(),
|
||||
reasoningEncryptedContent: z.string().nullish(),
|
||||
})
|
||||
|
||||
export type OpenAIResponsesReasoningProviderOptions = z.infer<typeof openaiResponsesReasoningProviderOptionsSchema>
|
||||
@@ -0,0 +1,22 @@
|
||||
import type { LanguageModelV2FinishReason } from "@ai-sdk/provider"
|
||||
|
||||
export function mapOpenAIResponseFinishReason({
|
||||
finishReason,
|
||||
hasFunctionCall,
|
||||
}: {
|
||||
finishReason: string | null | undefined
|
||||
// flag that checks if there have been client-side tool calls (not executed by openai)
|
||||
hasFunctionCall: boolean
|
||||
}): LanguageModelV2FinishReason {
|
||||
switch (finishReason) {
|
||||
case undefined:
|
||||
case null:
|
||||
return hasFunctionCall ? "tool-calls" : "stop"
|
||||
case "max_output_tokens":
|
||||
return "length"
|
||||
case "content_filter":
|
||||
return "content-filter"
|
||||
default:
|
||||
return hasFunctionCall ? "tool-calls" : "unknown"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
import type { FetchFunction } from "@ai-sdk/provider-utils"
|
||||
|
||||
export type OpenAIConfig = {
|
||||
provider: string
|
||||
url: (options: { modelId: string; path: string }) => string
|
||||
headers: () => Record<string, string | undefined>
|
||||
fetch?: FetchFunction
|
||||
generateId?: () => string
|
||||
/**
|
||||
* File ID prefixes used to identify file IDs in Responses API.
|
||||
* When undefined, all file data is treated as base64 content.
|
||||
*
|
||||
* Examples:
|
||||
* - OpenAI: ['file-'] for IDs like 'file-abc123'
|
||||
* - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
|
||||
*/
|
||||
fileIdPrefixes?: readonly string[]
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
import { z } from "zod/v4"
|
||||
import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils"
|
||||
|
||||
export const openaiErrorDataSchema = z.object({
|
||||
error: z.object({
|
||||
message: z.string(),
|
||||
|
||||
// The additional information below is handled loosely to support
|
||||
// OpenAI-compatible providers that have slightly different error
|
||||
// responses:
|
||||
type: z.string().nullish(),
|
||||
param: z.any().nullish(),
|
||||
code: z.union([z.string(), z.number()]).nullish(),
|
||||
}),
|
||||
})
|
||||
|
||||
export type OpenAIErrorData = z.infer<typeof openaiErrorDataSchema>
|
||||
|
||||
export const openaiFailedResponseHandler: any = createJsonErrorResponseHandler({
|
||||
errorSchema: openaiErrorDataSchema,
|
||||
errorToMessage: (data) => data.error.message,
|
||||
})
|
||||
@@ -0,0 +1,207 @@
|
||||
import type { JSONSchema7 } from "@ai-sdk/provider"
|
||||
|
||||
export type OpenAIResponsesInput = Array<OpenAIResponsesInputItem>
|
||||
|
||||
export type OpenAIResponsesInputItem =
|
||||
| OpenAIResponsesSystemMessage
|
||||
| OpenAIResponsesUserMessage
|
||||
| OpenAIResponsesAssistantMessage
|
||||
| OpenAIResponsesFunctionCall
|
||||
| OpenAIResponsesFunctionCallOutput
|
||||
| OpenAIResponsesComputerCall
|
||||
| OpenAIResponsesLocalShellCall
|
||||
| OpenAIResponsesLocalShellCallOutput
|
||||
| OpenAIResponsesReasoning
|
||||
| OpenAIResponsesItemReference
|
||||
|
||||
export type OpenAIResponsesIncludeValue =
|
||||
| "web_search_call.action.sources"
|
||||
| "code_interpreter_call.outputs"
|
||||
| "computer_call_output.output.image_url"
|
||||
| "file_search_call.results"
|
||||
| "message.input_image.image_url"
|
||||
| "message.output_text.logprobs"
|
||||
| "reasoning.encrypted_content"
|
||||
|
||||
export type OpenAIResponsesIncludeOptions = Array<OpenAIResponsesIncludeValue> | undefined | null
|
||||
|
||||
export type OpenAIResponsesSystemMessage = {
|
||||
role: "system" | "developer"
|
||||
content: string
|
||||
}
|
||||
|
||||
export type OpenAIResponsesUserMessage = {
|
||||
role: "user"
|
||||
content: Array<
|
||||
| { type: "input_text"; text: string }
|
||||
| { type: "input_image"; image_url: string }
|
||||
| { type: "input_image"; file_id: string }
|
||||
| { type: "input_file"; file_url: string }
|
||||
| { type: "input_file"; filename: string; file_data: string }
|
||||
| { type: "input_file"; file_id: string }
|
||||
>
|
||||
}
|
||||
|
||||
export type OpenAIResponsesAssistantMessage = {
|
||||
role: "assistant"
|
||||
content: Array<{ type: "output_text"; text: string }>
|
||||
id?: string
|
||||
}
|
||||
|
||||
export type OpenAIResponsesFunctionCall = {
|
||||
type: "function_call"
|
||||
call_id: string
|
||||
name: string
|
||||
arguments: string
|
||||
id?: string
|
||||
}
|
||||
|
||||
export type OpenAIResponsesFunctionCallOutput = {
|
||||
type: "function_call_output"
|
||||
call_id: string
|
||||
output: string
|
||||
}
|
||||
|
||||
export type OpenAIResponsesComputerCall = {
|
||||
type: "computer_call"
|
||||
id: string
|
||||
status?: string
|
||||
}
|
||||
|
||||
export type OpenAIResponsesLocalShellCall = {
|
||||
type: "local_shell_call"
|
||||
id: string
|
||||
call_id: string
|
||||
action: {
|
||||
type: "exec"
|
||||
command: string[]
|
||||
timeout_ms?: number
|
||||
user?: string
|
||||
working_directory?: string
|
||||
env?: Record<string, string>
|
||||
}
|
||||
}
|
||||
|
||||
export type OpenAIResponsesLocalShellCallOutput = {
|
||||
type: "local_shell_call_output"
|
||||
call_id: string
|
||||
output: string
|
||||
}
|
||||
|
||||
export type OpenAIResponsesItemReference = {
|
||||
type: "item_reference"
|
||||
id: string
|
||||
}
|
||||
|
||||
/**
|
||||
* A filter used to compare a specified attribute key to a given value using a defined comparison operation.
|
||||
*/
|
||||
export type OpenAIResponsesFileSearchToolComparisonFilter = {
|
||||
/**
|
||||
* The key to compare against the value.
|
||||
*/
|
||||
key: string
|
||||
|
||||
/**
|
||||
* Specifies the comparison operator: eq, ne, gt, gte, lt, lte.
|
||||
*/
|
||||
type: "eq" | "ne" | "gt" | "gte" | "lt" | "lte"
|
||||
|
||||
/**
|
||||
* The value to compare against the attribute key; supports string, number, or boolean types.
|
||||
*/
|
||||
value: string | number | boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Combine multiple filters using and or or.
|
||||
*/
|
||||
export type OpenAIResponsesFileSearchToolCompoundFilter = {
|
||||
/**
|
||||
* Type of operation: and or or.
|
||||
*/
|
||||
type: "and" | "or"
|
||||
|
||||
/**
|
||||
* Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
|
||||
*/
|
||||
filters: Array<OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter>
|
||||
}
|
||||
|
||||
export type OpenAIResponsesTool =
|
||||
| {
|
||||
type: "function"
|
||||
name: string
|
||||
description: string | undefined
|
||||
parameters: JSONSchema7
|
||||
strict: boolean | undefined
|
||||
}
|
||||
| {
|
||||
type: "web_search"
|
||||
filters: { allowed_domains: string[] | undefined } | undefined
|
||||
search_context_size: "low" | "medium" | "high" | undefined
|
||||
user_location:
|
||||
| {
|
||||
type: "approximate"
|
||||
city?: string
|
||||
country?: string
|
||||
region?: string
|
||||
timezone?: string
|
||||
}
|
||||
| undefined
|
||||
}
|
||||
| {
|
||||
type: "web_search_preview"
|
||||
search_context_size: "low" | "medium" | "high" | undefined
|
||||
user_location:
|
||||
| {
|
||||
type: "approximate"
|
||||
city?: string
|
||||
country?: string
|
||||
region?: string
|
||||
timezone?: string
|
||||
}
|
||||
| undefined
|
||||
}
|
||||
| {
|
||||
type: "code_interpreter"
|
||||
container: string | { type: "auto"; file_ids: string[] | undefined }
|
||||
}
|
||||
| {
|
||||
type: "file_search"
|
||||
vector_store_ids: string[]
|
||||
max_num_results: number | undefined
|
||||
ranking_options: { ranker?: string; score_threshold?: number } | undefined
|
||||
filters: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter | undefined
|
||||
}
|
||||
| {
|
||||
type: "image_generation"
|
||||
background: "auto" | "opaque" | "transparent" | undefined
|
||||
input_fidelity: "low" | "high" | undefined
|
||||
input_image_mask:
|
||||
| {
|
||||
file_id: string | undefined
|
||||
image_url: string | undefined
|
||||
}
|
||||
| undefined
|
||||
model: string | undefined
|
||||
moderation: "auto" | undefined
|
||||
output_compression: number | undefined
|
||||
output_format: "png" | "jpeg" | "webp" | undefined
|
||||
partial_images: number | undefined
|
||||
quality: "auto" | "low" | "medium" | "high" | undefined
|
||||
size: "auto" | "1024x1024" | "1024x1536" | "1536x1024" | undefined
|
||||
}
|
||||
| {
|
||||
type: "local_shell"
|
||||
}
|
||||
|
||||
export type OpenAIResponsesReasoning = {
|
||||
type: "reasoning"
|
||||
id: string
|
||||
encrypted_content?: string | null
|
||||
summary: Array<{
|
||||
type: "summary_text"
|
||||
text: string
|
||||
}>
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,177 @@
|
||||
import {
|
||||
type LanguageModelV2CallOptions,
|
||||
type LanguageModelV2CallWarning,
|
||||
UnsupportedFunctionalityError,
|
||||
} from "@ai-sdk/provider"
|
||||
import { codeInterpreterArgsSchema } from "./tool/code-interpreter"
|
||||
import { fileSearchArgsSchema } from "./tool/file-search"
|
||||
import { webSearchArgsSchema } from "./tool/web-search"
|
||||
import { webSearchPreviewArgsSchema } from "./tool/web-search-preview"
|
||||
import { imageGenerationArgsSchema } from "./tool/image-generation"
|
||||
import type { OpenAIResponsesTool } from "./openai-responses-api-types"
|
||||
|
||||
export function prepareResponsesTools({
|
||||
tools,
|
||||
toolChoice,
|
||||
strictJsonSchema,
|
||||
}: {
|
||||
tools: LanguageModelV2CallOptions["tools"]
|
||||
toolChoice?: LanguageModelV2CallOptions["toolChoice"]
|
||||
strictJsonSchema: boolean
|
||||
}): {
|
||||
tools?: Array<OpenAIResponsesTool>
|
||||
toolChoice?:
|
||||
| "auto"
|
||||
| "none"
|
||||
| "required"
|
||||
| { type: "file_search" }
|
||||
| { type: "web_search_preview" }
|
||||
| { type: "web_search" }
|
||||
| { type: "function"; name: string }
|
||||
| { type: "code_interpreter" }
|
||||
| { type: "image_generation" }
|
||||
toolWarnings: LanguageModelV2CallWarning[]
|
||||
} {
|
||||
// when the tools array is empty, change it to undefined to prevent errors:
|
||||
tools = tools?.length ? tools : undefined
|
||||
|
||||
const toolWarnings: LanguageModelV2CallWarning[] = []
|
||||
|
||||
if (tools == null) {
|
||||
return { tools: undefined, toolChoice: undefined, toolWarnings }
|
||||
}
|
||||
|
||||
const openaiTools: Array<OpenAIResponsesTool> = []
|
||||
|
||||
for (const tool of tools) {
|
||||
switch (tool.type) {
|
||||
case "function":
|
||||
openaiTools.push({
|
||||
type: "function",
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: tool.inputSchema,
|
||||
strict: strictJsonSchema,
|
||||
})
|
||||
break
|
||||
case "provider-defined": {
|
||||
switch (tool.id) {
|
||||
case "openai.file_search": {
|
||||
const args = fileSearchArgsSchema.parse(tool.args)
|
||||
|
||||
openaiTools.push({
|
||||
type: "file_search",
|
||||
vector_store_ids: args.vectorStoreIds,
|
||||
max_num_results: args.maxNumResults,
|
||||
ranking_options: args.ranking
|
||||
? {
|
||||
ranker: args.ranking.ranker,
|
||||
score_threshold: args.ranking.scoreThreshold,
|
||||
}
|
||||
: undefined,
|
||||
filters: args.filters,
|
||||
})
|
||||
|
||||
break
|
||||
}
|
||||
case "openai.local_shell": {
|
||||
openaiTools.push({
|
||||
type: "local_shell",
|
||||
})
|
||||
break
|
||||
}
|
||||
case "openai.web_search_preview": {
|
||||
const args = webSearchPreviewArgsSchema.parse(tool.args)
|
||||
openaiTools.push({
|
||||
type: "web_search_preview",
|
||||
search_context_size: args.searchContextSize,
|
||||
user_location: args.userLocation,
|
||||
})
|
||||
break
|
||||
}
|
||||
case "openai.web_search": {
|
||||
const args = webSearchArgsSchema.parse(tool.args)
|
||||
openaiTools.push({
|
||||
type: "web_search",
|
||||
filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : undefined,
|
||||
search_context_size: args.searchContextSize,
|
||||
user_location: args.userLocation,
|
||||
})
|
||||
break
|
||||
}
|
||||
case "openai.code_interpreter": {
|
||||
const args = codeInterpreterArgsSchema.parse(tool.args)
|
||||
openaiTools.push({
|
||||
type: "code_interpreter",
|
||||
container:
|
||||
args.container == null
|
||||
? { type: "auto", file_ids: undefined }
|
||||
: typeof args.container === "string"
|
||||
? args.container
|
||||
: { type: "auto", file_ids: args.container.fileIds },
|
||||
})
|
||||
break
|
||||
}
|
||||
case "openai.image_generation": {
|
||||
const args = imageGenerationArgsSchema.parse(tool.args)
|
||||
openaiTools.push({
|
||||
type: "image_generation",
|
||||
background: args.background,
|
||||
input_fidelity: args.inputFidelity,
|
||||
input_image_mask: args.inputImageMask
|
||||
? {
|
||||
file_id: args.inputImageMask.fileId,
|
||||
image_url: args.inputImageMask.imageUrl,
|
||||
}
|
||||
: undefined,
|
||||
model: args.model,
|
||||
moderation: args.moderation,
|
||||
partial_images: args.partialImages,
|
||||
quality: args.quality,
|
||||
output_compression: args.outputCompression,
|
||||
output_format: args.outputFormat,
|
||||
size: args.size,
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
default:
|
||||
toolWarnings.push({ type: "unsupported-tool", tool })
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (toolChoice == null) {
|
||||
return { tools: openaiTools, toolChoice: undefined, toolWarnings }
|
||||
}
|
||||
|
||||
const type = toolChoice.type
|
||||
|
||||
switch (type) {
|
||||
case "auto":
|
||||
case "none":
|
||||
case "required":
|
||||
return { tools: openaiTools, toolChoice: type, toolWarnings }
|
||||
case "tool":
|
||||
return {
|
||||
tools: openaiTools,
|
||||
toolChoice:
|
||||
toolChoice.toolName === "code_interpreter" ||
|
||||
toolChoice.toolName === "file_search" ||
|
||||
toolChoice.toolName === "image_generation" ||
|
||||
toolChoice.toolName === "web_search_preview" ||
|
||||
toolChoice.toolName === "web_search"
|
||||
? { type: toolChoice.toolName }
|
||||
: { type: "function", name: toolChoice.toolName },
|
||||
toolWarnings,
|
||||
}
|
||||
default: {
|
||||
const _exhaustiveCheck: never = type
|
||||
throw new UnsupportedFunctionalityError({
|
||||
functionality: `tool choice type: ${_exhaustiveCheck}`,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
export type OpenAIResponsesModelId = string
|
||||
@@ -0,0 +1,88 @@
|
||||
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
|
||||
import { z } from "zod/v4"
|
||||
|
||||
export const codeInterpreterInputSchema = z.object({
|
||||
code: z.string().nullish(),
|
||||
containerId: z.string(),
|
||||
})
|
||||
|
||||
export const codeInterpreterOutputSchema = z.object({
|
||||
outputs: z
|
||||
.array(
|
||||
z.discriminatedUnion("type", [
|
||||
z.object({ type: z.literal("logs"), logs: z.string() }),
|
||||
z.object({ type: z.literal("image"), url: z.string() }),
|
||||
]),
|
||||
)
|
||||
.nullish(),
|
||||
})
|
||||
|
||||
export const codeInterpreterArgsSchema = z.object({
|
||||
container: z
|
||||
.union([
|
||||
z.string(),
|
||||
z.object({
|
||||
fileIds: z.array(z.string()).optional(),
|
||||
}),
|
||||
])
|
||||
.optional(),
|
||||
})
|
||||
|
||||
type CodeInterpreterArgs = {
|
||||
/**
|
||||
* The code interpreter container.
|
||||
* Can be a container ID
|
||||
* or an object that specifies uploaded file IDs to make available to your code.
|
||||
*/
|
||||
container?: string | { fileIds?: string[] }
|
||||
}
|
||||
|
||||
export const codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOutputSchema<
|
||||
{
|
||||
/**
|
||||
* The code to run, or null if not available.
|
||||
*/
|
||||
code?: string | null
|
||||
|
||||
/**
|
||||
* The ID of the container used to run the code.
|
||||
*/
|
||||
containerId: string
|
||||
},
|
||||
{
|
||||
/**
|
||||
* The outputs generated by the code interpreter, such as logs or images.
|
||||
* Can be null if no outputs are available.
|
||||
*/
|
||||
outputs?: Array<
|
||||
| {
|
||||
type: "logs"
|
||||
|
||||
/**
|
||||
* The logs output from the code interpreter.
|
||||
*/
|
||||
logs: string
|
||||
}
|
||||
| {
|
||||
type: "image"
|
||||
|
||||
/**
|
||||
* The URL of the image output from the code interpreter.
|
||||
*/
|
||||
url: string
|
||||
}
|
||||
> | null
|
||||
},
|
||||
CodeInterpreterArgs
|
||||
>({
|
||||
id: "openai.code_interpreter",
|
||||
name: "code_interpreter",
|
||||
inputSchema: codeInterpreterInputSchema,
|
||||
outputSchema: codeInterpreterOutputSchema,
|
||||
})
|
||||
|
||||
export const codeInterpreter = (
|
||||
args: CodeInterpreterArgs = {}, // default
|
||||
) => {
|
||||
return codeInterpreterToolFactory(args)
|
||||
}
|
||||
@@ -0,0 +1,128 @@
|
||||
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
|
||||
import type {
|
||||
OpenAIResponsesFileSearchToolComparisonFilter,
|
||||
OpenAIResponsesFileSearchToolCompoundFilter,
|
||||
} from "../openai-responses-api-types"
|
||||
import { z } from "zod/v4"
|
||||
|
||||
const comparisonFilterSchema = z.object({
|
||||
key: z.string(),
|
||||
type: z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
|
||||
value: z.union([z.string(), z.number(), z.boolean()]),
|
||||
})
|
||||
|
||||
const compoundFilterSchema: z.ZodType<any> = z.object({
|
||||
type: z.enum(["and", "or"]),
|
||||
filters: z.array(z.union([comparisonFilterSchema, z.lazy(() => compoundFilterSchema)])),
|
||||
})
|
||||
|
||||
export const fileSearchArgsSchema = z.object({
|
||||
vectorStoreIds: z.array(z.string()),
|
||||
maxNumResults: z.number().optional(),
|
||||
ranking: z
|
||||
.object({
|
||||
ranker: z.string().optional(),
|
||||
scoreThreshold: z.number().optional(),
|
||||
})
|
||||
.optional(),
|
||||
filters: z.union([comparisonFilterSchema, compoundFilterSchema]).optional(),
|
||||
})
|
||||
|
||||
export const fileSearchOutputSchema = z.object({
|
||||
queries: z.array(z.string()),
|
||||
results: z
|
||||
.array(
|
||||
z.object({
|
||||
attributes: z.record(z.string(), z.unknown()),
|
||||
fileId: z.string(),
|
||||
filename: z.string(),
|
||||
score: z.number(),
|
||||
text: z.string(),
|
||||
}),
|
||||
)
|
||||
.nullable(),
|
||||
})
|
||||
|
||||
export const fileSearch = createProviderDefinedToolFactoryWithOutputSchema<
|
||||
{},
|
||||
{
|
||||
/**
|
||||
* The search query to execute.
|
||||
*/
|
||||
queries: string[]
|
||||
|
||||
/**
|
||||
* The results of the file search tool call.
|
||||
*/
|
||||
results:
|
||||
| null
|
||||
| {
|
||||
/**
|
||||
* Set of 16 key-value pairs that can be attached to an object.
|
||||
* This can be useful for storing additional information about the object
|
||||
* in a structured format, and querying for objects via API or the dashboard.
|
||||
* Keys are strings with a maximum length of 64 characters.
|
||||
* Values are strings with a maximum length of 512 characters, booleans, or numbers.
|
||||
*/
|
||||
attributes: Record<string, unknown>
|
||||
|
||||
/**
|
||||
* The unique ID of the file.
|
||||
*/
|
||||
fileId: string
|
||||
|
||||
/**
|
||||
* The name of the file.
|
||||
*/
|
||||
filename: string
|
||||
|
||||
/**
|
||||
* The relevance score of the file - a value between 0 and 1.
|
||||
*/
|
||||
score: number
|
||||
|
||||
/**
|
||||
* The text that was retrieved from the file.
|
||||
*/
|
||||
text: string
|
||||
}[]
|
||||
},
|
||||
{
|
||||
/**
|
||||
* List of vector store IDs to search through.
|
||||
*/
|
||||
vectorStoreIds: string[]
|
||||
|
||||
/**
|
||||
* Maximum number of search results to return. Defaults to 10.
|
||||
*/
|
||||
maxNumResults?: number
|
||||
|
||||
/**
|
||||
* Ranking options for the search.
|
||||
*/
|
||||
ranking?: {
|
||||
/**
|
||||
* The ranker to use for the file search.
|
||||
*/
|
||||
ranker?: string
|
||||
|
||||
/**
|
||||
* The score threshold for the file search, a number between 0 and 1.
|
||||
* Numbers closer to 1 will attempt to return only the most relevant results,
|
||||
* but may return fewer results.
|
||||
*/
|
||||
scoreThreshold?: number
|
||||
}
|
||||
|
||||
/**
|
||||
* A filter to apply.
|
||||
*/
|
||||
filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter
|
||||
}
|
||||
>({
|
||||
id: "openai.file_search",
|
||||
name: "file_search",
|
||||
inputSchema: z.object({}),
|
||||
outputSchema: fileSearchOutputSchema,
|
||||
})
|
||||
@@ -0,0 +1,115 @@
|
||||
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
|
||||
import { z } from "zod/v4"
|
||||
|
||||
export const imageGenerationArgsSchema = z
|
||||
.object({
|
||||
background: z.enum(["auto", "opaque", "transparent"]).optional(),
|
||||
inputFidelity: z.enum(["low", "high"]).optional(),
|
||||
inputImageMask: z
|
||||
.object({
|
||||
fileId: z.string().optional(),
|
||||
imageUrl: z.string().optional(),
|
||||
})
|
||||
.optional(),
|
||||
model: z.string().optional(),
|
||||
moderation: z.enum(["auto"]).optional(),
|
||||
outputCompression: z.number().int().min(0).max(100).optional(),
|
||||
outputFormat: z.enum(["png", "jpeg", "webp"]).optional(),
|
||||
partialImages: z.number().int().min(0).max(3).optional(),
|
||||
quality: z.enum(["auto", "low", "medium", "high"]).optional(),
|
||||
size: z.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional(),
|
||||
})
|
||||
.strict()
|
||||
|
||||
export const imageGenerationOutputSchema = z.object({
|
||||
result: z.string(),
|
||||
})
|
||||
|
||||
type ImageGenerationArgs = {
|
||||
/**
|
||||
* Background type for the generated image. Default is 'auto'.
|
||||
*/
|
||||
background?: "auto" | "opaque" | "transparent"
|
||||
|
||||
/**
|
||||
* Input fidelity for the generated image. Default is 'low'.
|
||||
*/
|
||||
inputFidelity?: "low" | "high"
|
||||
|
||||
/**
|
||||
* Optional mask for inpainting.
|
||||
* Contains image_url (string, optional) and file_id (string, optional).
|
||||
*/
|
||||
inputImageMask?: {
|
||||
/**
|
||||
* File ID for the mask image.
|
||||
*/
|
||||
fileId?: string
|
||||
|
||||
/**
|
||||
* Base64-encoded mask image.
|
||||
*/
|
||||
imageUrl?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* The image generation model to use. Default: gpt-image-1.
|
||||
*/
|
||||
model?: string
|
||||
|
||||
/**
|
||||
* Moderation level for the generated image. Default: auto.
|
||||
*/
|
||||
moderation?: "auto"
|
||||
|
||||
/**
|
||||
* Compression level for the output image. Default: 100.
|
||||
*/
|
||||
outputCompression?: number
|
||||
|
||||
/**
|
||||
* The output format of the generated image. One of png, webp, or jpeg.
|
||||
* Default: png
|
||||
*/
|
||||
outputFormat?: "png" | "jpeg" | "webp"
|
||||
|
||||
/**
|
||||
* Number of partial images to generate in streaming mode, from 0 (default value) to 3.
|
||||
*/
|
||||
partialImages?: number
|
||||
|
||||
/**
|
||||
* The quality of the generated image.
|
||||
* One of low, medium, high, or auto. Default: auto.
|
||||
*/
|
||||
quality?: "auto" | "low" | "medium" | "high"
|
||||
|
||||
/**
|
||||
* The size of the generated image.
|
||||
* One of 1024x1024, 1024x1536, 1536x1024, or auto.
|
||||
* Default: auto.
|
||||
*/
|
||||
size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024"
|
||||
}
|
||||
|
||||
const imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSchema<
|
||||
{},
|
||||
{
|
||||
/**
|
||||
* The generated image encoded in base64.
|
||||
*/
|
||||
result: string
|
||||
},
|
||||
ImageGenerationArgs
|
||||
>({
|
||||
id: "openai.image_generation",
|
||||
name: "image_generation",
|
||||
inputSchema: z.object({}),
|
||||
outputSchema: imageGenerationOutputSchema,
|
||||
})
|
||||
|
||||
export const imageGeneration = (
|
||||
args: ImageGenerationArgs = {}, // default
|
||||
) => {
|
||||
return imageGenerationToolFactory(args)
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
|
||||
import { z } from "zod/v4"
|
||||
|
||||
export const localShellInputSchema = z.object({
|
||||
action: z.object({
|
||||
type: z.literal("exec"),
|
||||
command: z.array(z.string()),
|
||||
timeoutMs: z.number().optional(),
|
||||
user: z.string().optional(),
|
||||
workingDirectory: z.string().optional(),
|
||||
env: z.record(z.string(), z.string()).optional(),
|
||||
}),
|
||||
})
|
||||
|
||||
export const localShellOutputSchema = z.object({
|
||||
output: z.string(),
|
||||
})
|
||||
|
||||
export const localShell = createProviderDefinedToolFactoryWithOutputSchema<
|
||||
{
|
||||
/**
|
||||
* Execute a shell command on the server.
|
||||
*/
|
||||
action: {
|
||||
type: "exec"
|
||||
|
||||
/**
|
||||
* The command to run.
|
||||
*/
|
||||
command: string[]
|
||||
|
||||
/**
|
||||
* Optional timeout in milliseconds for the command.
|
||||
*/
|
||||
timeoutMs?: number
|
||||
|
||||
/**
|
||||
* Optional user to run the command as.
|
||||
*/
|
||||
user?: string
|
||||
|
||||
/**
|
||||
* Optional working directory to run the command in.
|
||||
*/
|
||||
workingDirectory?: string
|
||||
|
||||
/**
|
||||
* Environment variables to set for the command.
|
||||
*/
|
||||
env?: Record<string, string>
|
||||
}
|
||||
},
|
||||
{
|
||||
/**
|
||||
* The output of local shell tool call.
|
||||
*/
|
||||
output: string
|
||||
},
|
||||
{}
|
||||
>({
|
||||
id: "openai.local_shell",
|
||||
name: "local_shell",
|
||||
inputSchema: localShellInputSchema,
|
||||
outputSchema: localShellOutputSchema,
|
||||
})
|
||||
@@ -0,0 +1,104 @@
|
||||
import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils"
|
||||
import { z } from "zod/v4"
|
||||
|
||||
// Args validation schema
|
||||
export const webSearchPreviewArgsSchema = z.object({
|
||||
/**
|
||||
* Search context size to use for the web search.
|
||||
* - high: Most comprehensive context, highest cost, slower response
|
||||
* - medium: Balanced context, cost, and latency (default)
|
||||
* - low: Least context, lowest cost, fastest response
|
||||
*/
|
||||
searchContextSize: z.enum(["low", "medium", "high"]).optional(),
|
||||
|
||||
/**
|
||||
* User location information to provide geographically relevant search results.
|
||||
*/
|
||||
userLocation: z
|
||||
.object({
|
||||
/**
|
||||
* Type of location (always 'approximate')
|
||||
*/
|
||||
type: z.literal("approximate"),
|
||||
/**
|
||||
* Two-letter ISO country code (e.g., 'US', 'GB')
|
||||
*/
|
||||
country: z.string().optional(),
|
||||
/**
|
||||
* City name (free text, e.g., 'Minneapolis')
|
||||
*/
|
||||
city: z.string().optional(),
|
||||
/**
|
||||
* Region name (free text, e.g., 'Minnesota')
|
||||
*/
|
||||
region: z.string().optional(),
|
||||
/**
|
||||
* IANA timezone (e.g., 'America/Chicago')
|
||||
*/
|
||||
timezone: z.string().optional(),
|
||||
})
|
||||
.optional(),
|
||||
})
|
||||
|
||||
export const webSearchPreview = createProviderDefinedToolFactory<
|
||||
{
|
||||
// Web search doesn't take input parameters - it's controlled by the prompt
|
||||
},
|
||||
{
|
||||
/**
|
||||
* Search context size to use for the web search.
|
||||
* - high: Most comprehensive context, highest cost, slower response
|
||||
* - medium: Balanced context, cost, and latency (default)
|
||||
* - low: Least context, lowest cost, fastest response
|
||||
*/
|
||||
searchContextSize?: "low" | "medium" | "high"
|
||||
|
||||
/**
|
||||
* User location information to provide geographically relevant search results.
|
||||
*/
|
||||
userLocation?: {
|
||||
/**
|
||||
* Type of location (always 'approximate')
|
||||
*/
|
||||
type: "approximate"
|
||||
/**
|
||||
* Two-letter ISO country code (e.g., 'US', 'GB')
|
||||
*/
|
||||
country?: string
|
||||
/**
|
||||
* City name (free text, e.g., 'Minneapolis')
|
||||
*/
|
||||
city?: string
|
||||
/**
|
||||
* Region name (free text, e.g., 'Minnesota')
|
||||
*/
|
||||
region?: string
|
||||
/**
|
||||
* IANA timezone (e.g., 'America/Chicago')
|
||||
*/
|
||||
timezone?: string
|
||||
}
|
||||
}
|
||||
>({
|
||||
id: "openai.web_search_preview",
|
||||
name: "web_search_preview",
|
||||
inputSchema: z.object({
|
||||
action: z
|
||||
.discriminatedUnion("type", [
|
||||
z.object({
|
||||
type: z.literal("search"),
|
||||
query: z.string().nullish(),
|
||||
}),
|
||||
z.object({
|
||||
type: z.literal("open_page"),
|
||||
url: z.string(),
|
||||
}),
|
||||
z.object({
|
||||
type: z.literal("find"),
|
||||
url: z.string(),
|
||||
pattern: z.string(),
|
||||
}),
|
||||
])
|
||||
.nullish(),
|
||||
}),
|
||||
})
|
||||
@@ -0,0 +1,103 @@
|
||||
import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils"
|
||||
import { z } from "zod/v4"
|
||||
|
||||
export const webSearchArgsSchema = z.object({
|
||||
filters: z
|
||||
.object({
|
||||
allowedDomains: z.array(z.string()).optional(),
|
||||
})
|
||||
.optional(),
|
||||
|
||||
searchContextSize: z.enum(["low", "medium", "high"]).optional(),
|
||||
|
||||
userLocation: z
|
||||
.object({
|
||||
type: z.literal("approximate"),
|
||||
country: z.string().optional(),
|
||||
city: z.string().optional(),
|
||||
region: z.string().optional(),
|
||||
timezone: z.string().optional(),
|
||||
})
|
||||
.optional(),
|
||||
})
|
||||
|
||||
export const webSearchToolFactory = createProviderDefinedToolFactory<
|
||||
{
|
||||
// Web search doesn't take input parameters - it's controlled by the prompt
|
||||
},
|
||||
{
|
||||
/**
|
||||
* Filters for the search.
|
||||
*/
|
||||
filters?: {
|
||||
/**
|
||||
* Allowed domains for the search.
|
||||
* If not provided, all domains are allowed.
|
||||
* Subdomains of the provided domains are allowed as well.
|
||||
*/
|
||||
allowedDomains?: string[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Search context size to use for the web search.
|
||||
* - high: Most comprehensive context, highest cost, slower response
|
||||
* - medium: Balanced context, cost, and latency (default)
|
||||
* - low: Least context, lowest cost, fastest response
|
||||
*/
|
||||
searchContextSize?: "low" | "medium" | "high"
|
||||
|
||||
/**
|
||||
* User location information to provide geographically relevant search results.
|
||||
*/
|
||||
userLocation?: {
|
||||
/**
|
||||
* Type of location (always 'approximate')
|
||||
*/
|
||||
type: "approximate"
|
||||
/**
|
||||
* Two-letter ISO country code (e.g., 'US', 'GB')
|
||||
*/
|
||||
country?: string
|
||||
/**
|
||||
* City name (free text, e.g., 'Minneapolis')
|
||||
*/
|
||||
city?: string
|
||||
/**
|
||||
* Region name (free text, e.g., 'Minnesota')
|
||||
*/
|
||||
region?: string
|
||||
/**
|
||||
* IANA timezone (e.g., 'America/Chicago')
|
||||
*/
|
||||
timezone?: string
|
||||
}
|
||||
}
|
||||
>({
|
||||
id: "openai.web_search",
|
||||
name: "web_search",
|
||||
inputSchema: z.object({
|
||||
action: z
|
||||
.discriminatedUnion("type", [
|
||||
z.object({
|
||||
type: z.literal("search"),
|
||||
query: z.string().nullish(),
|
||||
}),
|
||||
z.object({
|
||||
type: z.literal("open_page"),
|
||||
url: z.string(),
|
||||
}),
|
||||
z.object({
|
||||
type: z.literal("find"),
|
||||
url: z.string(),
|
||||
pattern: z.string(),
|
||||
}),
|
||||
])
|
||||
.nullish(),
|
||||
}),
|
||||
})
|
||||
|
||||
export const webSearch = (
|
||||
args: Parameters<typeof webSearchToolFactory>[0] = {}, // default
|
||||
) => {
|
||||
return webSearchToolFactory(args)
|
||||
}
|
||||
@@ -24,4 +24,4 @@
|
||||
"typescript": "catalog:",
|
||||
"@typescript/native-preview": "catalog:"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,4 +26,4 @@
|
||||
"publishConfig": {
|
||||
"directory": "dist"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user