Compare commits

..

15 Commits

Author SHA1 Message Date
Kit Langton
8a5a6852ca fix(format): set stdin/stdout/stderr to ignore for formatter subprocesses
ChildProcess.make defaults to "pipe" for all stdio streams. Formatters
that wait on stdin EOF or write enough output to fill pipe buffers would
hang. Matches the original Process.spawn behavior.
2026-03-27 19:43:39 -04:00
Kit Langton
09ce30c433 fix(format): restore format test dependencies 2026-03-27 16:58:11 -04:00
Kit Langton
c713781c88 Merge branch 'dev' into kit/format-child-process-spawner 2026-03-27 16:52:31 -04:00
Kit Langton
7f9eda6201 refactor(format): use ChildProcessSpawner instead of Process.spawn
Replace raw Process.spawn with Effect's ChildProcessSpawner in the
Format service. formatFile now returns an Effect instead of a Promise,
composing natively with the layer.
2026-03-27 16:40:47 -04:00
James Long
4b9660b211 refactor(core): move more responsibility to workspace routing (#19455) 2026-03-27 16:33:56 -04:00
Kit Langton
e5f0e813b6 refactor(session): effectify Session service (#19449) 2026-03-27 16:25:47 -04:00
Aiden Cline
c33d9996f0 feat: AI SDK v6 support (#18433) 2026-03-27 15:24:30 -05:00
Sebastian
7a7643c86a no theme override in dev (#19456) 2026-03-27 20:21:15 +00:00
Aiden Cline
6f5b70e681 tweak: add additional overflow error patterns (#19446) 2026-03-27 15:19:51 -05:00
Sebastian
ff13524a53 fix flaky plugin tests (no mock.module for bun) (#19445) 2026-03-27 20:55:03 +01:00
Kit Langton
e973bbf54a fix(app): default file tree to closed with minimum width (#19426) 2026-03-27 14:11:50 -04:00
Kit Langton
d36b38e4a6 fix(desktop-electron): match dev dock icon inset on macOS (#19429) 2026-03-27 17:32:05 +00:00
Burak Yigit Kaya
bdd7829c68 fix(app): resize layout viewport when mobile keyboard appears (#15841) 2026-03-27 11:39:13 -05:00
Shoubhit Dash
a93374c48f fix(ui): make streamed markdown feel more continuous (#19404) 2026-03-27 22:06:47 +05:30
Adam
af2ccc94eb chore(app): more spacing controls 2026-03-27 11:22:28 -05:00
62 changed files with 2228 additions and 1818 deletions

View File

@@ -1,6 +1,5 @@
{
"$schema": "https://opencode.ai/tui.json",
"theme": "smoke-theme",
"plugin": [
[
"./plugins/tui-smoke.tsx",

246
bun.lock
View File

@@ -142,9 +142,9 @@
"name": "@opencode-ai/console-function",
"version": "1.3.3",
"dependencies": {
"@ai-sdk/anthropic": "2.0.0",
"@ai-sdk/openai": "2.0.2",
"@ai-sdk/openai-compatible": "1.0.1",
"@ai-sdk/anthropic": "3.0.64",
"@ai-sdk/openai": "3.0.48",
"@ai-sdk/openai-compatible": "2.0.37",
"@hono/zod-validator": "catalog:",
"@openauthjs/openauth": "0.0.0-20250322224806",
"@opencode-ai/console-core": "workspace:*",
@@ -305,25 +305,25 @@
"@actions/core": "1.11.1",
"@actions/github": "6.0.1",
"@agentclientprotocol/sdk": "0.14.1",
"@ai-sdk/amazon-bedrock": "3.0.82",
"@ai-sdk/anthropic": "2.0.65",
"@ai-sdk/azure": "2.0.91",
"@ai-sdk/cerebras": "1.0.36",
"@ai-sdk/cohere": "2.0.22",
"@ai-sdk/deepinfra": "1.0.36",
"@ai-sdk/gateway": "2.0.30",
"@ai-sdk/google": "2.0.54",
"@ai-sdk/google-vertex": "3.0.106",
"@ai-sdk/groq": "2.0.34",
"@ai-sdk/mistral": "2.0.27",
"@ai-sdk/openai": "2.0.89",
"@ai-sdk/openai-compatible": "1.0.32",
"@ai-sdk/perplexity": "2.0.23",
"@ai-sdk/provider": "2.0.1",
"@ai-sdk/provider-utils": "3.0.21",
"@ai-sdk/togetherai": "1.0.34",
"@ai-sdk/vercel": "1.0.33",
"@ai-sdk/xai": "2.0.51",
"@ai-sdk/amazon-bedrock": "4.0.83",
"@ai-sdk/anthropic": "3.0.64",
"@ai-sdk/azure": "3.0.49",
"@ai-sdk/cerebras": "2.0.41",
"@ai-sdk/cohere": "3.0.27",
"@ai-sdk/deepinfra": "2.0.41",
"@ai-sdk/gateway": "3.0.80",
"@ai-sdk/google": "3.0.53",
"@ai-sdk/google-vertex": "4.0.95",
"@ai-sdk/groq": "3.0.31",
"@ai-sdk/mistral": "3.0.27",
"@ai-sdk/openai": "3.0.48",
"@ai-sdk/openai-compatible": "2.0.37",
"@ai-sdk/perplexity": "3.0.26",
"@ai-sdk/provider": "3.0.8",
"@ai-sdk/provider-utils": "4.0.21",
"@ai-sdk/togetherai": "2.0.41",
"@ai-sdk/vercel": "2.0.39",
"@ai-sdk/xai": "3.0.74",
"@aws-sdk/credential-providers": "3.993.0",
"@clack/prompts": "1.0.0-alpha.1",
"@effect/platform-node": "catalog:",
@@ -337,7 +337,7 @@
"@opencode-ai/script": "workspace:*",
"@opencode-ai/sdk": "workspace:*",
"@opencode-ai/util": "workspace:*",
"@openrouter/ai-sdk-provider": "1.5.4",
"@openrouter/ai-sdk-provider": "2.3.3",
"@opentui/core": "0.1.90",
"@opentui/solid": "0.1.90",
"@parcel/watcher": "2.5.1",
@@ -347,7 +347,7 @@
"@standard-schema/spec": "1.0.0",
"@zip.js/zip.js": "2.7.62",
"ai": "catalog:",
"ai-gateway-provider": "2.3.1",
"ai-gateway-provider": "3.1.2",
"bonjour-service": "1.3.0",
"bun-pty": "0.4.8",
"chokidar": "4.0.3",
@@ -358,7 +358,7 @@
"drizzle-orm": "catalog:",
"effect": "catalog:",
"fuzzysort": "3.1.0",
"gitlab-ai-provider": "5.3.3",
"gitlab-ai-provider": "6.0.0",
"glob": "13.0.5",
"google-auth-library": "10.5.0",
"gray-matter": "4.0.3",
@@ -599,10 +599,10 @@
"tree-sitter-bash",
],
"patchedDependencies": {
"@openrouter/ai-sdk-provider@1.5.4": "patches/@openrouter%2Fai-sdk-provider@1.5.4.patch",
"solid-js@1.9.10": "patches/solid-js@1.9.10.patch",
"@ai-sdk/xai@2.0.51": "patches/@ai-sdk%2Fxai@2.0.51.patch",
"@standard-community/standard-openapi@0.2.9": "patches/@standard-community%2Fstandard-openapi@0.2.9.patch",
"@ai-sdk/anthropic@3.0.64": "patches/@ai-sdk%2Fanthropic@3.0.64.patch",
"@ai-sdk/provider-utils@4.0.21": "patches/@ai-sdk%2Fprovider-utils@4.0.21.patch",
},
"overrides": {
"@types/bun": "catalog:",
@@ -629,7 +629,7 @@
"@types/node": "22.13.9",
"@types/semver": "7.7.1",
"@typescript/native-preview": "7.0.0-dev.20251207.1",
"ai": "5.0.124",
"ai": "6.0.138",
"diff": "8.0.2",
"dompurify": "3.3.1",
"drizzle-kit": "1.0.0-beta.19-d95b7a4",
@@ -673,51 +673,51 @@
"@agentclientprotocol/sdk": ["@agentclientprotocol/sdk@0.14.1", "", { "peerDependencies": { "zod": "^3.25.0 || ^4.0.0" } }, "sha512-b6r3PS3Nly+Wyw9U+0nOr47bV8tfS476EgyEMhoKvJCZLbgqoDFN7DJwkxL88RR0aiOqOYV1ZnESHqb+RmdH8w=="],
"@ai-sdk/amazon-bedrock": ["@ai-sdk/amazon-bedrock@3.0.82", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.65", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21", "@smithy/eventstream-codec": "^4.0.1", "@smithy/util-utf8": "^4.0.0", "aws4fetch": "^1.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-yb1EkRCMWex0tnpHPLGQxoJEiJvMGOizuxzlXFOpuGFiYgE679NsWE/F8pHwtoAWsqLlylgGAJvJDIJ8us8LEw=="],
"@ai-sdk/amazon-bedrock": ["@ai-sdk/amazon-bedrock@4.0.83", "", { "dependencies": { "@ai-sdk/anthropic": "3.0.64", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21", "@smithy/eventstream-codec": "^4.0.1", "@smithy/util-utf8": "^4.0.0", "aws4fetch": "^1.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-DoRpvIWGU/r83UeJAM9L93Lca8Kf/yP5fIhfEOltMPGP/PXrGe0BZaz0maLSRn8djJ6+HzWIsgu5ZI6bZqXEXg=="],
"@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-uyyaO4KhxoIKZztREqLPh+6/K3ZJx/rp72JKoUEL9/kC+vfQTThUfPnY/bUryUpcnawx8IY/tSoYNOi/8PCv7w=="],
"@ai-sdk/anthropic": ["@ai-sdk/anthropic@3.0.64", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-rwLi/Rsuj2pYniQXIrvClHvXDzgM4UQHHnvHTWEF14efnlKclG/1ghpNC+adsRujAbCTr6gRsSbDE2vEqriV7g=="],
"@ai-sdk/azure": ["@ai-sdk/azure@2.0.91", "", { "dependencies": { "@ai-sdk/openai": "2.0.89", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-9tznVSs6LGQNKKxb8pKd7CkBV9yk+a/ENpFicHCj2CmBUKefxzwJ9JbUqrlK3VF6dGZw3LXq0dWxt7/Yekaj1w=="],
"@ai-sdk/azure": ["@ai-sdk/azure@3.0.49", "", { "dependencies": { "@ai-sdk/openai": "3.0.48", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-wskgAL+OmrHG7by/iWIxEBQCEdc1mDudha/UZav46i0auzdFfsDB/k2rXZaC4/3nWSgMZkxr0W3ncyouEGX/eg=="],
"@ai-sdk/cerebras": ["@ai-sdk/cerebras@1.0.36", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.32", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-zoJYL33+ieyd86FSP0Whm86D79d1lKPR7wUzh1SZ1oTxwYmsGyvIrmMf2Ll0JA9Ds2Es6qik4VaFCrjwGYRTIQ=="],
"@ai-sdk/cerebras": ["@ai-sdk/cerebras@2.0.41", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-kDMEpjaRdRXIUi1EH8WHwLRahyDTYv9SAJnP6VCCeq8X+tVqZbMLCqqxSG5dRknrI65ucjvzQt+FiDKTAa7AHg=="],
"@ai-sdk/cohere": ["@ai-sdk/cohere@2.0.22", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-yJ9kP5cEDJwo8qpITq5TQFD8YNfNtW+HbyvWwrKMbFzmiMvIZuk95HIaFXE7PCTuZsqMA05yYu+qX/vQ3rNKjA=="],
"@ai-sdk/cohere": ["@ai-sdk/cohere@3.0.27", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-OqcCq2PiFY1dbK/0Ck45KuvE8jfdxRuuAE9Y5w46dAk6U+9vPOeg1CDcmR+ncqmrYrhRl3nmyDttyDahyjCzAw=="],
"@ai-sdk/deepgram": ["@ai-sdk/deepgram@1.0.24", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.22" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-E+wzGPSa/XHmajO3WtX8mtq0ewy04tsHSpU6/SGwqbiykwWba/emi7ayZ4ir89s5OzbAen2g7T9zZiEchMfkHQ=="],
"@ai-sdk/deepgram": ["@ai-sdk/deepgram@2.0.24", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-htT1Y7vBN0cRu/1pGnhx6DNH3xaNr0o0MjDkmii48X2+6S/WkOzVNtMjn7V3vLWEQIWNio5vw1hG/F43K8WLHA=="],
"@ai-sdk/deepinfra": ["@ai-sdk/deepinfra@1.0.36", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.33", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-LndvRktEgY2IFu4peDJMEXcjhHEEFtM0upLx/J64kCpFHCifalXpK4PPSX3PVndnn0bJzvamO5+fc0z2ooqBZw=="],
"@ai-sdk/deepinfra": ["@ai-sdk/deepinfra@2.0.41", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-y6RoOP7DGWmDSiSxrUSt5p18sbz+Ixe5lMVPmdE7x+Tr5rlrzvftyHhjWHfqlAtoYERZTGFbP6tPW1OfQcrb4A=="],
"@ai-sdk/deepseek": ["@ai-sdk/deepseek@1.0.35", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.22" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-Qvh2yxL5zJS9RO/Bf12pyYBIDmn+9GR1hT6e28IYWQWnt2Xq0h9XGps6XagLAv3VYYFg8c/ozkWVd4kXLZ25HA=="],
"@ai-sdk/deepseek": ["@ai-sdk/deepseek@2.0.24", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4vOEekW4TAYVHN0qgiwoUOQZhguGwZBiEw8LDeUmpWBm07QkLRAtxYCaSoMiA4hZZojao5mj6NRGEBW1CnDPtg=="],
"@ai-sdk/elevenlabs": ["@ai-sdk/elevenlabs@1.0.24", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.22" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ee2At5jgV+SqC6nrtPq20iH7N/aN+O36LrA4gkzVM4cmhM7bvQKVkOXhC1XxG+wsYG6UZi3Nekoi8MEjNWuRrw=="],
"@ai-sdk/elevenlabs": ["@ai-sdk/elevenlabs@2.0.24", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-K+1YprVMO8R6vTcNhqTqUWhOzX5V/hEY0pFx9KQL0/+MJjOgRi6DcOLoNBd7ONcjxYTyiFLRfk/0a/pHTtSgFA=="],
"@ai-sdk/fireworks": ["@ai-sdk/fireworks@1.0.35", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.34", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.22" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-inUq29XvSVDer6JIeOkwAmCFxOtHPU0OZEhwaWoe3PI59naHIW4RIFA9wppLLV5fJI9WQcAfDKy0ZHW9nV3UJw=="],
"@ai-sdk/fireworks": ["@ai-sdk/fireworks@2.0.40", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.35", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ARjygiBQtVSgNBp3Sag+Bkwn68ub+cZPC05UpRGG+VY8/Q896K2yU1j4I0+S1eU0BQW/9DKbRG04d9Ayi2DUmA=="],
"@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.30", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "@vercel/oidc": "3.1.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-5Nrkj8B4MzkkOfjjA+Cs5pamkbkK4lI11bx80QV7TFcen/hWA8wEC+UVzwuM5H2zpekoNMjvl6GonHnR62XIZw=="],
"@ai-sdk/gateway": ["@ai-sdk/gateway@3.0.80", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21", "@vercel/oidc": "3.1.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-uM7kpZB5l977lW7+2X1+klBUxIZQ78+1a9jHlaHFEzcOcmmslTl3sdP0QqfuuBcO0YBM2gwOiqVdp8i4TRQYcw=="],
"@ai-sdk/google": ["@ai-sdk/google@2.0.54", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-VKguP0x/PUYpdQyuA/uy5pDGJy6reL0X/yDKxHfL207aCUXpFIBmyMhVs4US39dkEVhtmIFSwXauY0Pt170JRw=="],
"@ai-sdk/google": ["@ai-sdk/google@3.0.53", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-uz8tIlkDgQJG9Js2Wh9JHzd4kI9+hYJqf9XXJLx60vyN5mRIqhr49iwR5zGP5Gl8odp2PeR3Gh2k+5bh3Z1HHw=="],
"@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@3.0.106", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.65", "@ai-sdk/google": "2.0.54", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21", "google-auth-library": "^10.5.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-f9sA66bmhgJoTwa+pHWFSdYxPa0lgdQ/MgYNxZptzVyGptoziTf1a9EIXEL3jiCD0qIBAg+IhDAaYalbvZaDqQ=="],
"@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@4.0.95", "", { "dependencies": { "@ai-sdk/anthropic": "3.0.64", "@ai-sdk/google": "3.0.53", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21", "google-auth-library": "^10.5.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-xL44fHlTtDM7RLkMTgyqMfkfthA38JS91bbMaHItObIhte1PAIY936ZV1PLl/Z9A/oBAXjHWbXo5xDoHzB7LEg=="],
"@ai-sdk/groq": ["@ai-sdk/groq@2.0.34", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-wfCYkVgmVjxNA32T57KbLabVnv9aFUflJ4urJ7eWgTwbnmGQHElCTu+rJ3ydxkXSqxOkXPwMOttDm7XNrvPjmg=="],
"@ai-sdk/groq": ["@ai-sdk/groq@3.0.31", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-XbbugpnFmXGu2TlXiq8KUJskP6/VVbuFcnFIGDzDIB/Chg6XHsNnqrTF80Zxkh0Pd3+NvbM+2Uqrtsndk6bDAg=="],
"@ai-sdk/mistral": ["@ai-sdk/mistral@2.0.27", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-gaptHgaXjMw3+eA0Q4FABcsj5nQNP6EpFaGUR+Pj5WJy7Kn6mApl975/x57224MfeJIShNpt8wFKK3tvh5ewKg=="],
"@ai-sdk/mistral": ["@ai-sdk/mistral@3.0.27", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ZXe7nZQgliDdjz5ufH5RKpHWxbN72AzmzzKGbF/z+0K9GN5tUCnftrQRvTRFHA5jAzTapcm2BEevmGLVbMkW+A=="],
"@ai-sdk/openai": ["@ai-sdk/openai@2.0.2", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-D4zYz2uR90aooKQvX1XnS00Z7PkbrcY+snUvPfm5bCabTG7bzLrVtD56nJ5bSaZG8lmuOMfXpyiEEArYLyWPpw=="],
"@ai-sdk/openai": ["@ai-sdk/openai@3.0.48", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ALmj/53EXpcRqMbGpPJPP4UOSWw0q4VGpnDo7YctvsynjkrKDmoneDG/1a7VQnSPYHnJp6tTRMf5ZdxZ5whulg=="],
"@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.1", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-luHVcU+yKzwv3ekKgbP3v+elUVxb2Rt+8c6w9qi7g2NYG2/pEL21oIrnaEnc6UtTZLLZX9EFBcpq2N1FQKDIMw=="],
"@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@2.0.37", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-+POSFVcgiu47BK64dhsI6OpcDC0/VAE2ZSaXdXGNNhpC/ava++uSRJYks0k2bpfY0wwCTgpAWZsXn/dG2Yppiw=="],
"@ai-sdk/perplexity": ["@ai-sdk/perplexity@2.0.23", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-aiaRvnc6mhQZKhTTSXPCjPH8Iqr5D/PfCN1hgVP/3RGTBbJtsd9HemIBSABeSdAKbsMH/PwJxgnqH75HEamcBA=="],
"@ai-sdk/perplexity": ["@ai-sdk/perplexity@3.0.26", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-dXzrVsLR5f6tr+U04jq4AXoRroGFBTvODnLgss0SWbzNjGGQg3XqtQ9j7rCLo6o8qbYGuAHvqUrIpUCuiscuFg=="],
"@ai-sdk/provider": ["@ai-sdk/provider@2.0.1", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-KCUwswvsC5VsW2PWFqF8eJgSCu5Ysj7m1TxiHTVA6g7k360bk0RNQENT8KTMAYEs+8fWPD3Uu4dEmzGHc+jGng=="],
"@ai-sdk/provider": ["@ai-sdk/provider@3.0.8", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-oGMAgGoQdBXbZqNG0Ze56CHjDZ1IDYOwGYxYjO5KLSlz5HiNQ9udIXsPZ61VWaHGZ5XW/jyjmr6t2xz2jGVwbQ=="],
"@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.21", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-veuMwTLxsgh31Jjn0SnBABnM1f7ebHhRWcV2ZuY3hP3iJDCZ8VXBaYqcHXoOQDqUXTCas08sKQcHyWK+zl882Q=="],
"@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.21", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@standard-schema/spec": "^1.1.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-MtFUYI1/8mgDvRmaBDjbLJPFFrMG777AvSgyIFQtZHIMzm88R/12vYBBpnk7pfiWLFE1DSZzY4WDYzGbKAcmiw=="],
"@ai-sdk/togetherai": ["@ai-sdk/togetherai@1.0.34", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.32", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-jjJmJms6kdEc4nC3MDGFJfhV8F1ifY4nolV2dbnT7BM4ab+Wkskc0GwCsJ7G7WdRMk7xDbFh4he3DPL8KJ/cyA=="],
"@ai-sdk/togetherai": ["@ai-sdk/togetherai@2.0.41", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-k3p9e3k0/gpDDyTtvafsK4HYR4D/aUQW/kzCwWo1+CzdBU84i4L14gWISC/mv6tgSicMXHcEUd521fPufQwNlg=="],
"@ai-sdk/vercel": ["@ai-sdk/vercel@1.0.33", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.32", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-Qwjm+HdwKasu7L9bDUryBMGKDMscIEzMUkjw/33uGdJpktzyNW13YaNIObOZ2HkskqDMIQJSd4Ao2BBT8fEYLw=="],
"@ai-sdk/vercel": ["@ai-sdk/vercel@2.0.39", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-8eu3ljJpkCTP4ppcyYB+NcBrkcBoSOFthCSgk5VnjaxnDaOJFaxnPwfddM7wx3RwMk2CiK1O61Px/LlqNc7QkQ=="],
"@ai-sdk/xai": ["@ai-sdk/xai@2.0.51", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.30", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-AI3le03qiegkZvn9hpnpDwez49lOvQLj4QUBT8H41SMbrdTYOxn3ktTwrsSu90cNDdzKGMvoH0u2GHju1EdnCg=="],
"@ai-sdk/xai": ["@ai-sdk/xai@3.0.74", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HDDLsT+QrzE3c2QZLRV/HKAwMtXDb0PMDdk1PYUXLJ3r9Qv76zGKGyvJLX7Pu6c8TOHD1mwLrOVYrsTpC/eTMw=="],
"@alloc/quick-lru": ["@alloc/quick-lru@5.2.0", "", {}, "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw=="],
@@ -1455,9 +1455,7 @@
"@opencode-ai/web": ["@opencode-ai/web@workspace:packages/web"],
"@openrouter/ai-sdk-provider": ["@openrouter/ai-sdk-provider@1.5.4", "", { "dependencies": { "@openrouter/sdk": "^0.1.27" }, "peerDependencies": { "ai": "^5.0.0", "zod": "^3.24.1 || ^v4" } }, "sha512-xrSQPUIH8n9zuyYZR0XK7Ba0h2KsjJcMkxnwaYfmv13pKs3sDkjPzVPPhlhzqBGddHb5cFEwJ9VFuFeDcxCDSw=="],
"@openrouter/sdk": ["@openrouter/sdk@0.1.27", "", { "dependencies": { "zod": "^3.25.0 || ^4.0.0" } }, "sha512-RH//L10bSmc81q25zAZudiI4kNkLgxF2E+WU42vghp3N6TEvZ6F0jK7uT3tOxkEn91gzmMw9YVmDENy7SJsajQ=="],
"@openrouter/ai-sdk-provider": ["@openrouter/ai-sdk-provider@2.3.3", "", { "peerDependencies": { "ai": "^6.0.0", "zod": "^3.25.0 || ^4.0.0" } }, "sha512-4fVteGkVedc7fGoA9+qJs4tpYwALezMq14m2Sjub3KmyRlksCbK+WJf67NPdGem8+NZrV2tAN42A1NU3+SiV3w=="],
"@opentelemetry/api": ["@opentelemetry/api@1.9.0", "", {}, "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg=="],
@@ -2271,9 +2269,9 @@
"agentkeepalive": ["agentkeepalive@4.6.0", "", { "dependencies": { "humanize-ms": "^1.2.1" } }, "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ=="],
"ai": ["ai@5.0.124", "", { "dependencies": { "@ai-sdk/gateway": "2.0.30", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-Li6Jw9F9qsvFJXZPBfxj38ddP2iURCnMs96f9Q3OeQzrDVcl1hvtwSEAuxA/qmfh6SDV2ERqFUOFzigvr0697g=="],
"ai": ["ai@6.0.138", "", { "dependencies": { "@ai-sdk/gateway": "3.0.80", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-49OfPe0f5uxJ6jUdA5BBXjIinP6+ZdYfAtpF2aEH64GA5wPcxH2rf/TBUQQ0bbamBz/D+TLMV18xilZqOC+zaA=="],
"ai-gateway-provider": ["ai-gateway-provider@2.3.1", "", { "dependencies": { "@ai-sdk/provider": "^2.0.0", "@ai-sdk/provider-utils": "^3.0.19", "ai": "^5.0.116" }, "optionalDependencies": { "@ai-sdk/amazon-bedrock": "^3.0.71", "@ai-sdk/anthropic": "^2.0.56", "@ai-sdk/azure": "^2.0.90", "@ai-sdk/cerebras": "^1.0.33", "@ai-sdk/cohere": "^2.0.21", "@ai-sdk/deepgram": "^1.0.21", "@ai-sdk/deepseek": "^1.0.32", "@ai-sdk/elevenlabs": "^1.0.21", "@ai-sdk/fireworks": "^1.0.30", "@ai-sdk/google": "^2.0.51", "@ai-sdk/google-vertex": "3.0.90", "@ai-sdk/groq": "^2.0.33", "@ai-sdk/mistral": "^2.0.26", "@ai-sdk/openai": "^2.0.88", "@ai-sdk/perplexity": "^2.0.22", "@ai-sdk/xai": "^2.0.42", "@openrouter/ai-sdk-provider": "^1.5.3" }, "peerDependencies": { "@ai-sdk/openai-compatible": "^1.0.29" } }, "sha512-PqI6TVNEDNwr7kOhy7XUGnA8XJB1SpeA9aLqGjr0CyWkKgH+y+ofPm8MZGZ74DOwVejDF+POZq0Qs9jKEKUeYg=="],
"ai-gateway-provider": ["ai-gateway-provider@3.1.2", "", { "optionalDependencies": { "@ai-sdk/amazon-bedrock": "^4.0.62", "@ai-sdk/anthropic": "^3.0.46", "@ai-sdk/azure": "^3.0.31", "@ai-sdk/cerebras": "^2.0.34", "@ai-sdk/cohere": "^3.0.21", "@ai-sdk/deepgram": "^2.0.20", "@ai-sdk/deepseek": "^2.0.20", "@ai-sdk/elevenlabs": "^2.0.20", "@ai-sdk/fireworks": "^2.0.34", "@ai-sdk/google": "^3.0.30", "@ai-sdk/google-vertex": "^4.0.61", "@ai-sdk/groq": "^3.0.24", "@ai-sdk/mistral": "^3.0.20", "@ai-sdk/openai": "^3.0.30", "@ai-sdk/perplexity": "^3.0.19", "@ai-sdk/xai": "^3.0.57", "@openrouter/ai-sdk-provider": "^2.2.3" }, "peerDependencies": { "@ai-sdk/openai-compatible": "^2.0.0", "@ai-sdk/provider": "^3.0.0", "@ai-sdk/provider-utils": "^4.0.0", "ai": "^6.0.0" } }, "sha512-krGNnJSoO/gJ7Hbe5nQDlsBpDUGIBGtMQTRUaW7s1MylsfvLduba0TLWzQaGtOmNRkP0pGhtGlwsnS6FNQMlyw=="],
"ajv": ["ajv@8.18.0", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A=="],
@@ -3049,7 +3047,7 @@
"github-slugger": ["github-slugger@2.0.0", "", {}, "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw=="],
"gitlab-ai-provider": ["gitlab-ai-provider@5.3.3", "", { "dependencies": { "@anthropic-ai/sdk": "^0.71.0", "@anycable/core": "^0.9.2", "graphql-request": "^6.1.0", "isomorphic-ws": "^5.0.0", "openai": "^6.16.0", "socket.io-client": "^4.8.1", "vscode-jsonrpc": "^8.2.1", "zod": "^3.25.76" }, "peerDependencies": { "@ai-sdk/provider": ">=2.0.0", "@ai-sdk/provider-utils": ">=3.0.0" } }, "sha512-k0kRUoAhDvoRC28hQW4sPp+A3cfpT5c/oL9Ng10S0oBiF2Tci1AtsX1iclJM5Os8C1nIIAXBW8LMr0GY7rwcGA=="],
"gitlab-ai-provider": ["gitlab-ai-provider@6.0.0", "", { "dependencies": { "@anthropic-ai/sdk": "^0.71.0", "@anycable/core": "^0.9.2", "graphql-request": "^6.1.0", "isomorphic-ws": "^5.0.0", "openai": "^6.16.0", "socket.io-client": "^4.8.1", "vscode-jsonrpc": "^8.2.1", "zod": "^3.25.76" }, "peerDependencies": { "@ai-sdk/provider": ">=3.0.0", "@ai-sdk/provider-utils": ">=4.0.0" } }, "sha512-683GcJdrer/GhnljkbVcGsndCEhvGB8f9fUdCxQBlkuyt8rzf0G9DpSh+iMBYp9HpcSvYmYG0Qv5ks9dLrNxwQ=="],
"glob": ["glob@13.0.5", "", { "dependencies": { "minimatch": "^10.2.1", "minipass": "^7.1.2", "path-scurry": "^2.0.0" } }, "sha512-BzXxZg24Ibra1pbQ/zE7Kys4Ua1ks7Bn6pKLkVPZ9FZe4JQS6/Q7ef3LG1H+k7lUf5l4T3PLSyYyYJVYUvfgTw=="],
@@ -4799,63 +4797,21 @@
"@actions/http-client/undici": ["undici@6.23.0", "", {}, "sha512-VfQPToRA5FZs/qJxLIinmU59u0r7LXqoJkCzinq3ckNJp3vKEh7jTWN589YQ5+aoAC/TGRLyJLCPKcLQbM8r9g=="],
"@ai-sdk/amazon-bedrock/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.65", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HqTPP59mLQ9U6jXQcx6EORkdc5FyZu34Sitkg6jNpyMYcRjStvfx4+NWq/qaR+OTwBFcccv8hvVii0CYkH2Lag=="],
"@ai-sdk/amazon-bedrock/@smithy/eventstream-codec": ["@smithy/eventstream-codec@4.2.11", "", { "dependencies": { "@aws-crypto/crc32": "5.2.0", "@smithy/types": "^4.13.0", "@smithy/util-hex-encoding": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-Sf39Ml0iVX+ba/bgMPxaXWAAFmHqYLTmbjAPfLPLY8CrYkRDEqZdUsKC1OwVMCdJXfAt0v4j49GIJ8DoSYAe6w=="],
"@ai-sdk/anthropic/@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="],
"@ai-sdk/amazon-bedrock/@smithy/util-utf8": ["@smithy/util-utf8@4.2.2", "", { "dependencies": { "@smithy/util-buffer-from": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-75MeYpjdWRe8M5E3AW0O4Cx3UadweS+cwdXjwYGBW5h/gxxnbeZ877sLPX/ZJA9GVTlL/qG0dXP29JWFCD1Ayw=="],
"@ai-sdk/anthropic/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="],
"@ai-sdk/deepgram/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.19", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@standard-schema/spec": "^1.1.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg=="],
"@ai-sdk/azure/@ai-sdk/openai": ["@ai-sdk/openai@2.0.89", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4+qWkBCbL9HPKbgrUO/F2uXZ8GqrYxHa8SWEYIzxEJ9zvWw3ISr3t1/27O1i8MGSym+PzEyHBT48EV4LAwWaEw=="],
"@ai-sdk/deepseek/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.19", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@standard-schema/spec": "^1.1.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg=="],
"@ai-sdk/azure/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"@ai-sdk/elevenlabs/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.19", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@standard-schema/spec": "^1.1.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg=="],
"@ai-sdk/cerebras/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.32", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-YspqqyJPzHjqWrjt4y/Wgc2aJgCcQj5uIJgZpq2Ar/lH30cEVhgE+keePDbjKpetD9UwNggCj7u6kO3unS23OQ=="],
"@ai-sdk/fireworks/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@2.0.35", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-g3wA57IAQFb+3j4YuFndgkUdXyRETZVvbfAWM+UX7bZSxA3xjes0v3XKgIdKdekPtDGsh4ZX2byHD0gJIMPfiA=="],
"@ai-sdk/cerebras/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"@ai-sdk/fireworks/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.19", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@standard-schema/spec": "^1.1.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg=="],
"@ai-sdk/cohere/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"@ai-sdk/deepgram/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.22", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-fFT1KfUUKktfAFm5mClJhS1oux9tP2qgzmEZVl5UdwltQ1LO/s8hd7znVrgKzivwv1s1FIPza0s9OpJaNB/vHw=="],
"@ai-sdk/deepinfra/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.33", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-2KMcR2xAul3u5dGZD7gONgbIki3Hg7Ey+sFu7gsiJ4U2iRU0GDV3ccNq79dTuAEXPDFcOWCUpW8A8jXc0kxJxQ=="],
"@ai-sdk/deepseek/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.22", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-fFT1KfUUKktfAFm5mClJhS1oux9tP2qgzmEZVl5UdwltQ1LO/s8hd7znVrgKzivwv1s1FIPza0s9OpJaNB/vHw=="],
"@ai-sdk/elevenlabs/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.22", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-fFT1KfUUKktfAFm5mClJhS1oux9tP2qgzmEZVl5UdwltQ1LO/s8hd7znVrgKzivwv1s1FIPza0s9OpJaNB/vHw=="],
"@ai-sdk/fireworks/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.34", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.22" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-AnGoxVNZ/E3EU4lW12rrufI6riqL2cEv4jk3OrjJ/i54XwR0CJU1V26jXAwxb+Pc+uZmYG++HM+gzXxPQZkMNQ=="],
"@ai-sdk/fireworks/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.22", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-fFT1KfUUKktfAFm5mClJhS1oux9tP2qgzmEZVl5UdwltQ1LO/s8hd7znVrgKzivwv1s1FIPza0s9OpJaNB/vHw=="],
"@ai-sdk/gateway/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"@ai-sdk/google-vertex/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.65", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HqTPP59mLQ9U6jXQcx6EORkdc5FyZu34Sitkg6jNpyMYcRjStvfx4+NWq/qaR+OTwBFcccv8hvVii0CYkH2Lag=="],
"@ai-sdk/groq/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"@ai-sdk/mistral/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"@ai-sdk/openai/@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="],
"@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="],
"@ai-sdk/openai-compatible/@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="],
"@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="],
"@ai-sdk/perplexity/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"@ai-sdk/togetherai/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.32", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-YspqqyJPzHjqWrjt4y/Wgc2aJgCcQj5uIJgZpq2Ar/lH30cEVhgE+keePDbjKpetD9UwNggCj7u6kO3unS23OQ=="],
"@ai-sdk/togetherai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"@ai-sdk/vercel/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.32", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-YspqqyJPzHjqWrjt4y/Wgc2aJgCcQj5uIJgZpq2Ar/lH30cEVhgE+keePDbjKpetD9UwNggCj7u6kO3unS23OQ=="],
"@ai-sdk/vercel/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"@ai-sdk/xai/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.30", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-thubwhRtv9uicAxSWwNpinM7hiL/0CkhL/ymPaHuKvI494J7HIzn8KQZQ2ymRz284WTIZnI7VMyyejxW4RMM6w=="],
"@ai-sdk/xai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@astrojs/check/yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="],
@@ -5329,16 +5285,6 @@
"accepts/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="],
"ai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"ai-gateway-provider/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.65", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HqTPP59mLQ9U6jXQcx6EORkdc5FyZu34Sitkg6jNpyMYcRjStvfx4+NWq/qaR+OTwBFcccv8hvVii0CYkH2Lag=="],
"ai-gateway-provider/@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@3.0.90", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.56", "@ai-sdk/google": "2.0.46", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19", "google-auth-library": "^10.5.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-C9MLe1KZGg1ZbupV2osygHtL5qngyCDA6ATatunyfTbIe8TXKG8HGni/3O6ifbnI5qxTidIn150Ox7eIFZVMYg=="],
"ai-gateway-provider/@ai-sdk/openai": ["@ai-sdk/openai@2.0.89", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4+qWkBCbL9HPKbgrUO/F2uXZ8GqrYxHa8SWEYIzxEJ9zvWw3ISr3t1/27O1i8MGSym+PzEyHBT48EV4LAwWaEw=="],
"ai-gateway-provider/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.34", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.22" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-AnGoxVNZ/E3EU4lW12rrufI6riqL2cEv4jk3OrjJ/i54XwR0CJU1V26jXAwxb+Pc+uZmYG++HM+gzXxPQZkMNQ=="],
"ajv-keywords/ajv": ["ajv@6.14.0", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw=="],
"ansi-align/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
@@ -5557,12 +5503,6 @@
"nypm/tinyexec": ["tinyexec@1.0.2", "", {}, "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg=="],
"opencode/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.65", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HqTPP59mLQ9U6jXQcx6EORkdc5FyZu34Sitkg6jNpyMYcRjStvfx4+NWq/qaR+OTwBFcccv8hvVii0CYkH2Lag=="],
"opencode/@ai-sdk/openai": ["@ai-sdk/openai@2.0.89", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4+qWkBCbL9HPKbgrUO/F2uXZ8GqrYxHa8SWEYIzxEJ9zvWw3ISr3t1/27O1i8MGSym+PzEyHBT48EV4LAwWaEw=="],
"opencode/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.32", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-YspqqyJPzHjqWrjt4y/Wgc2aJgCcQj5uIJgZpq2Ar/lH30cEVhgE+keePDbjKpetD9UwNggCj7u6kO3unS23OQ=="],
"opencode-gitlab-auth/open": ["open@10.2.0", "", { "dependencies": { "default-browser": "^5.2.1", "define-lazy-prop": "^3.0.0", "is-inside-container": "^1.0.0", "wsl-utils": "^0.1.0" } }, "sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA=="],
"opencode-poe-auth/open": ["open@10.2.0", "", { "dependencies": { "default-browser": "^5.2.1", "define-lazy-prop": "^3.0.0", "is-inside-container": "^1.0.0", "wsl-utils": "^0.1.0" } }, "sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA=="],
@@ -5739,16 +5679,6 @@
"@actions/github/@octokit/plugin-rest-endpoint-methods/@octokit/types": ["@octokit/types@12.6.0", "", { "dependencies": { "@octokit/openapi-types": "^20.0.0" } }, "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw=="],
"@ai-sdk/anthropic/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/anthropic/@ai-sdk/provider-utils/zod-to-json-schema": ["zod-to-json-schema@3.25.1", "", { "peerDependencies": { "zod": "^3.25 || ^4" } }, "sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA=="],
"@ai-sdk/azure/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/cerebras/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/cohere/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/deepgram/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/deepseek/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
@@ -5757,28 +5687,6 @@
"@ai-sdk/fireworks/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/gateway/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/groq/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/mistral/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/openai-compatible/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/openai-compatible/@ai-sdk/provider-utils/zod-to-json-schema": ["zod-to-json-schema@3.25.1", "", { "peerDependencies": { "zod": "^3.25 || ^4" } }, "sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA=="],
"@ai-sdk/openai/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/openai/@ai-sdk/provider-utils/zod-to-json-schema": ["zod-to-json-schema@3.25.1", "", { "peerDependencies": { "zod": "^3.25 || ^4" } }, "sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA=="],
"@ai-sdk/perplexity/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/togetherai/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/vercel/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@ai-sdk/xai/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"@astrojs/check/yargs/cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="],
"@astrojs/check/yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
@@ -6211,20 +6119,6 @@
"accepts/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="],
"ai-gateway-provider/@ai-sdk/google-vertex/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.56", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-XHJKu0Yvfu9SPzRfsAFESa+9T7f2YJY6TxykKMfRsAwpeWAiX/Gbx5J5uM15AzYC3Rw8tVP3oH+j7jEivENirQ=="],
"ai-gateway-provider/@ai-sdk/google-vertex/@ai-sdk/google": ["@ai-sdk/google@2.0.46", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-8PK6u4sGE/kXebd7ZkTp+0aya4kNqzoqpS5m7cHY2NfTK6fhPc6GNvE+MZIZIoHQTp5ed86wGBdeBPpFaaUtyg=="],
"ai-gateway-provider/@ai-sdk/google-vertex/@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="],
"ai-gateway-provider/@ai-sdk/google-vertex/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.19", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-W41Wc9/jbUVXVwCN/7bWa4IKe8MtxO3EyA0Hfhx6grnmiYlCvpI8neSYWFE0zScXJkgA/YK3BRybzgyiXuu6JA=="],
"ai-gateway-provider/@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"ai-gateway-provider/@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.22", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-fFT1KfUUKktfAFm5mClJhS1oux9tP2qgzmEZVl5UdwltQ1LO/s8hd7znVrgKzivwv1s1FIPza0s9OpJaNB/vHw=="],
"ai/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"ajv-keywords/ajv/json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="],
"ansi-align/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
@@ -6321,10 +6215,6 @@
"opencode-poe-auth/open/wsl-utils": ["wsl-utils@0.1.0", "", { "dependencies": { "is-wsl": "^3.1.0" } }, "sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw=="],
"opencode/@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"opencode/@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
"opencontrol/@modelcontextprotocol/sdk/express": ["express@5.2.1", "", { "dependencies": { "accepts": "^2.0.0", "body-parser": "^2.2.1", "content-disposition": "^1.0.0", "content-type": "^1.0.5", "cookie": "^0.7.1", "cookie-signature": "^1.2.1", "debug": "^4.4.0", "depd": "^2.0.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "finalhandler": "^2.1.0", "fresh": "^2.0.0", "http-errors": "^2.0.0", "merge-descriptors": "^2.0.0", "mime-types": "^3.0.0", "on-finished": "^2.4.1", "once": "^1.4.0", "parseurl": "^1.3.3", "proxy-addr": "^2.0.7", "qs": "^6.14.0", "range-parser": "^1.2.1", "router": "^2.2.0", "send": "^1.1.0", "serve-static": "^2.2.0", "statuses": "^2.0.1", "type-is": "^2.0.1", "vary": "^1.1.2" } }, "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw=="],
"opencontrol/@modelcontextprotocol/sdk/express-rate-limit": ["express-rate-limit@7.5.1", "", { "peerDependencies": { "express": ">= 4.11" } }, "sha512-7iN8iPMDzOMHPUYllBEsQdWVB6fPDMPqwjBaFrgr4Jgr/+okjvzAy+UHlYYL/Vs0OsOrMkwS6PJDkFlJwoxUnw=="],
@@ -6581,12 +6471,6 @@
"@solidjs/start/shiki/@shikijs/engine-javascript/oniguruma-to-es": ["oniguruma-to-es@2.3.0", "", { "dependencies": { "emoji-regex-xs": "^1.0.0", "regex": "^5.1.1", "regex-recursion": "^5.1.1" } }, "sha512-bwALDxriqfKGfUufKGGepCzu9x7nJQuoRoAFp4AnwehhC2crqrDIAP/uN2qdlsAvSMpeRC3+Yzhqc7hLmle5+g=="],
"ai-gateway-provider/@ai-sdk/google-vertex/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"ai-gateway-provider/@ai-sdk/openai-compatible/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"ai-gateway-provider/@ai-sdk/openai/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"ansi-align/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
"app-builder-lib/@electron/get/fs-extra/universalify": ["universalify@0.1.2", "", {}, "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="],
@@ -6639,10 +6523,6 @@
"js-beautify/glob/path-scurry/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="],
"opencode/@ai-sdk/openai-compatible/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"opencode/@ai-sdk/openai/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
"opencontrol/@modelcontextprotocol/sdk/express/accepts": ["accepts@2.0.0", "", { "dependencies": { "mime-types": "^3.0.0", "negotiator": "^1.0.0" } }, "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng=="],
"opencontrol/@modelcontextprotocol/sdk/express/body-parser": ["body-parser@2.2.2", "", { "dependencies": { "bytes": "^3.1.2", "content-type": "^1.0.5", "debug": "^4.4.3", "http-errors": "^2.0.0", "iconv-lite": "^0.7.0", "on-finished": "^2.4.1", "qs": "^6.14.1", "raw-body": "^3.0.1", "type-is": "^2.0.1" } }, "sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA=="],

View File

@@ -46,7 +46,7 @@
"drizzle-kit": "1.0.0-beta.19-d95b7a4",
"drizzle-orm": "1.0.0-beta.19-d95b7a4",
"effect": "4.0.0-beta.37",
"ai": "5.0.124",
"ai": "6.0.138",
"hono": "4.10.7",
"hono-openapi": "1.1.2",
"fuzzysort": "3.1.0",
@@ -113,8 +113,8 @@
},
"patchedDependencies": {
"@standard-community/standard-openapi@0.2.9": "patches/@standard-community%2Fstandard-openapi@0.2.9.patch",
"@openrouter/ai-sdk-provider@1.5.4": "patches/@openrouter%2Fai-sdk-provider@1.5.4.patch",
"@ai-sdk/xai@2.0.51": "patches/@ai-sdk%2Fxai@2.0.51.patch",
"solid-js@1.9.10": "patches/solid-js@1.9.10.patch"
"solid-js@1.9.10": "patches/solid-js@1.9.10.patch",
"@ai-sdk/provider-utils@4.0.21": "patches/@ai-sdk%2Fprovider-utils@4.0.21.patch",
"@ai-sdk/anthropic@3.0.64": "patches/@ai-sdk%2Fanthropic@3.0.64.patch"
}
}

View File

@@ -2,7 +2,7 @@
<html lang="en" style="background-color: var(--background-base)">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta name="viewport" content="width=device-width, initial-scale=1, interactive-widget=resizes-content" />
<title>OpenCode</title>
<link rel="icon" type="image/png" href="/favicon-96x96-v3.png" sizes="96x96" />
<link rel="icon" type="image/svg+xml" href="/favicon-v3.svg" />

View File

@@ -13,7 +13,8 @@ import { createScrollPersistence, type SessionScroll } from "./layout-scroll"
import { createPathHelpers } from "./file/path"
const AVATAR_COLOR_KEYS = ["pink", "mint", "orange", "purple", "cyan", "lime"] as const
const DEFAULT_PANEL_WIDTH = 344
const DEFAULT_SIDEBAR_WIDTH = 344
const DEFAULT_FILE_TREE_WIDTH = 200
const DEFAULT_SESSION_WIDTH = 600
const DEFAULT_TERMINAL_HEIGHT = 280
export type AvatarColorKey = (typeof AVATAR_COLOR_KEYS)[number]
@@ -161,11 +162,11 @@ export const { use: useLayout, provider: LayoutProvider } = createSimpleContext(
if (!isRecord(fileTree)) return fileTree
if (fileTree.tab === "changes" || fileTree.tab === "all") return fileTree
const width = typeof fileTree.width === "number" ? fileTree.width : DEFAULT_PANEL_WIDTH
const width = typeof fileTree.width === "number" ? fileTree.width : DEFAULT_FILE_TREE_WIDTH
return {
...fileTree,
opened: true,
width: width === 260 ? DEFAULT_PANEL_WIDTH : width,
width: width === 260 ? DEFAULT_FILE_TREE_WIDTH : width,
tab: "changes",
}
})()
@@ -230,7 +231,7 @@ export const { use: useLayout, provider: LayoutProvider } = createSimpleContext(
createStore({
sidebar: {
opened: false,
width: DEFAULT_PANEL_WIDTH,
width: DEFAULT_SIDEBAR_WIDTH,
workspaces: {} as Record<string, boolean>,
workspacesDefault: false,
},
@@ -243,8 +244,8 @@ export const { use: useLayout, provider: LayoutProvider } = createSimpleContext(
panelOpened: true,
},
fileTree: {
opened: true,
width: DEFAULT_PANEL_WIDTH,
opened: false,
width: DEFAULT_FILE_TREE_WIDTH,
tab: "changes" as "changes" | "all",
},
session: {
@@ -628,32 +629,32 @@ export const { use: useLayout, provider: LayoutProvider } = createSimpleContext(
},
fileTree: {
opened: createMemo(() => store.fileTree?.opened ?? true),
width: createMemo(() => store.fileTree?.width ?? DEFAULT_PANEL_WIDTH),
width: createMemo(() => store.fileTree?.width ?? DEFAULT_FILE_TREE_WIDTH),
tab: createMemo(() => store.fileTree?.tab ?? "changes"),
setTab(tab: "changes" | "all") {
if (!store.fileTree) {
setStore("fileTree", { opened: true, width: DEFAULT_PANEL_WIDTH, tab })
setStore("fileTree", { opened: true, width: DEFAULT_FILE_TREE_WIDTH, tab })
return
}
setStore("fileTree", "tab", tab)
},
open() {
if (!store.fileTree) {
setStore("fileTree", { opened: true, width: DEFAULT_PANEL_WIDTH, tab: "changes" })
setStore("fileTree", { opened: true, width: DEFAULT_FILE_TREE_WIDTH, tab: "changes" })
return
}
setStore("fileTree", "opened", true)
},
close() {
if (!store.fileTree) {
setStore("fileTree", { opened: false, width: DEFAULT_PANEL_WIDTH, tab: "changes" })
setStore("fileTree", { opened: false, width: DEFAULT_FILE_TREE_WIDTH, tab: "changes" })
return
}
setStore("fileTree", "opened", false)
},
toggle() {
if (!store.fileTree) {
setStore("fileTree", { opened: true, width: DEFAULT_PANEL_WIDTH, tab: "changes" })
setStore("fileTree", { opened: true, width: DEFAULT_FILE_TREE_WIDTH, tab: "changes" })
return
}
setStore("fileTree", "opened", (x) => !x)

View File

@@ -1640,6 +1640,15 @@ export default function Page() {
consumePendingMessage: layout.pendingMessage.consume,
})
createEffect(
on(
() => params.id,
(id) => {
if (!id) requestAnimationFrame(() => inputRef?.focus())
},
),
)
onMount(() => {
document.addEventListener("keydown", handleKeyDown)
})

View File

@@ -17,9 +17,9 @@
"@typescript/native-preview": "catalog:"
},
"dependencies": {
"@ai-sdk/anthropic": "2.0.0",
"@ai-sdk/openai": "2.0.2",
"@ai-sdk/openai-compatible": "1.0.1",
"@ai-sdk/anthropic": "3.0.64",
"@ai-sdk/openai": "3.0.48",
"@ai-sdk/openai-compatible": "2.0.37",
"@hono/zod-validator": "catalog:",
"@opencode-ai/console-core": "workspace:*",
"@opencode-ai/console-resource": "workspace:*",

View File

@@ -9,3 +9,6 @@ Here's the process I've been using to create icons:
The Image2Icon step is necessary as the `icon.icns` generated by `app-icon.png` does not apply the shadow/padding expected by macOS,
so app icons appear larger than expected.
For unpackaged Electron on macOS, `app.dock.setIcon()` should use a PNG. Keep `dock.png` in each channel folder synced with the
extracted `icon_128x128@2x.png` from that channel's `icon.icns` so the dev Dock icon matches the packaged app inset.

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

View File

@@ -50,7 +50,8 @@ export function setTitlebar(win: BrowserWindow, theme: Partial<TitlebarTheme> =
export function setDockIcon() {
if (process.platform !== "darwin") return
app.dock?.setIcon(nativeImage.createFromPath(join(iconsDir(), "128x128@2x.png")))
const icon = nativeImage.createFromPath(join(iconsDir(), "dock.png"))
if (!icon.isEmpty()) app.dock?.setIcon(icon)
}
export function createMainWindow(globals: Globals) {

View File

@@ -68,25 +68,25 @@
"@actions/core": "1.11.1",
"@actions/github": "6.0.1",
"@agentclientprotocol/sdk": "0.14.1",
"@ai-sdk/amazon-bedrock": "3.0.82",
"@ai-sdk/anthropic": "2.0.65",
"@ai-sdk/azure": "2.0.91",
"@ai-sdk/cerebras": "1.0.36",
"@ai-sdk/cohere": "2.0.22",
"@ai-sdk/deepinfra": "1.0.36",
"@ai-sdk/gateway": "2.0.30",
"@ai-sdk/google": "2.0.54",
"@ai-sdk/google-vertex": "3.0.106",
"@ai-sdk/groq": "2.0.34",
"@ai-sdk/mistral": "2.0.27",
"@ai-sdk/openai": "2.0.89",
"@ai-sdk/openai-compatible": "1.0.32",
"@ai-sdk/perplexity": "2.0.23",
"@ai-sdk/provider": "2.0.1",
"@ai-sdk/provider-utils": "3.0.21",
"@ai-sdk/togetherai": "1.0.34",
"@ai-sdk/vercel": "1.0.33",
"@ai-sdk/xai": "2.0.51",
"@ai-sdk/amazon-bedrock": "4.0.83",
"@ai-sdk/anthropic": "3.0.64",
"@ai-sdk/azure": "3.0.49",
"@ai-sdk/cerebras": "2.0.41",
"@ai-sdk/cohere": "3.0.27",
"@ai-sdk/deepinfra": "2.0.41",
"@ai-sdk/gateway": "3.0.80",
"@ai-sdk/google": "3.0.53",
"@ai-sdk/google-vertex": "4.0.95",
"@ai-sdk/groq": "3.0.31",
"@ai-sdk/mistral": "3.0.27",
"@ai-sdk/openai": "3.0.48",
"@ai-sdk/openai-compatible": "2.0.37",
"@ai-sdk/perplexity": "3.0.26",
"@ai-sdk/provider": "3.0.8",
"@ai-sdk/provider-utils": "4.0.21",
"@ai-sdk/togetherai": "2.0.41",
"@ai-sdk/vercel": "2.0.39",
"@ai-sdk/xai": "3.0.74",
"@aws-sdk/credential-providers": "3.993.0",
"@clack/prompts": "1.0.0-alpha.1",
"@effect/platform-node": "catalog:",
@@ -100,7 +100,7 @@
"@opencode-ai/script": "workspace:*",
"@opencode-ai/sdk": "workspace:*",
"@opencode-ai/util": "workspace:*",
"@openrouter/ai-sdk-provider": "1.5.4",
"@openrouter/ai-sdk-provider": "2.3.3",
"@opentui/core": "0.1.90",
"@opentui/solid": "0.1.90",
"@parcel/watcher": "2.5.1",
@@ -110,7 +110,7 @@
"@standard-schema/spec": "1.0.0",
"@zip.js/zip.js": "2.7.62",
"ai": "catalog:",
"ai-gateway-provider": "2.3.1",
"ai-gateway-provider": "3.1.2",
"bonjour-service": "1.3.0",
"bun-pty": "0.4.8",
"chokidar": "4.0.3",
@@ -121,7 +121,7 @@
"drizzle-orm": "catalog:",
"effect": "catalog:",
"fuzzysort": "3.1.0",
"gitlab-ai-provider": "5.3.3",
"gitlab-ai-provider": "6.0.0",
"glob": "13.0.5",
"google-auth-library": "10.5.0",
"gray-matter": "4.0.3",

View File

@@ -212,8 +212,81 @@ Fully migrated (single namespace, InstanceState where needed, flattened facade):
Still open and likely worth migrating:
- [ ] `Session`
- [ ] `SessionProcessor`
- [ ] `SessionPrompt`
- [ ] `SessionCompaction`
- [ ] `Provider`
- [x] `Session``session/index.ts`
- [ ] `SessionProcessor` — blocked by AI SDK v6 PR (#18433)
- [ ] `SessionPrompt` — blocked by AI SDK v6 PR (#18433)
- [ ] `SessionCompaction` — blocked by AI SDK v6 PR (#18433)
- [ ] `Provider` — blocked by AI SDK v6 PR (#18433)
Other services not yet migrated:
- [ ] `SessionSummary``session/summary.ts`
- [ ] `SessionTodo``session/todo.ts`
- [ ] `SessionRevert``session/revert.ts`
- [ ] `Instruction``session/instruction.ts`
- [ ] `ShareNext``share/share-next.ts`
- [ ] `SyncEvent``sync/index.ts`
- [ ] `Storage``storage/storage.ts`
- [ ] `Workspace``control-plane/workspace.ts`
## Tool interface → Effect
Once individual tools are effectified, change `Tool.Info` (`tool/tool.ts`) so `init` and `execute` return `Effect` instead of `Promise`. This lets tool implementations compose natively with the Effect pipeline rather than being wrapped in `Effect.promise()` at the call site. Requires:
1. Migrate each tool to return Effects
2. Update `Tool.define()` factory to work with Effects
3. Update `SessionPrompt` to `yield*` tool results instead of `await`ing — blocked by AI SDK v6 PR (#18433)
Individual tools, ordered by value:
- [ ] `apply_patch.ts` — HIGH: multi-step orchestration, error accumulation, Bus events
- [ ] `read.ts` — HIGH: streaming I/O, readline, binary detection → FileSystem + Stream
- [ ] `edit.ts` — HIGH: multi-step diff/format/publish pipeline, FileWatcher lock
- [ ] `grep.ts` — MEDIUM: spawns ripgrep → ChildProcessSpawner, timeout handling
- [ ] `write.ts` — MEDIUM: permission checks, diagnostics polling, Bus events
- [ ] `codesearch.ts` — MEDIUM: HTTP + SSE + manual timeout → HttpClient + Effect.timeout
- [ ] `webfetch.ts` — MEDIUM: fetch with UA retry, size limits → HttpClient
- [ ] `websearch.ts` — MEDIUM: MCP over HTTP → HttpClient
- [ ] `batch.ts` — MEDIUM: parallel execution, per-call error recovery → Effect.all
- [ ] `task.ts` — MEDIUM: task state management
- [ ] `glob.ts` — LOW: simple async generator
- [ ] `lsp.ts` — LOW: dispatch switch over LSP operations
- [ ] `skill.ts` — LOW: skill tool adapter
- [ ] `plan.ts` — LOW: plan file operations
## Effect service adoption in already-migrated code
Some services are effectified but still use raw `Filesystem.*` or `Process.spawn` instead of the Effect equivalents. These are low-hanging fruit — the layers already exist, they just need the dependency swap.
### `Filesystem.*` → `AppFileSystem.Service` (yield in layer)
- [ ] `file/index.ts` — 11 calls (the File service itself)
- [ ] `config/config.ts` — 7 calls
- [ ] `auth/index.ts` — 3 calls
- [ ] `skill/index.ts` — 3 calls
- [ ] `file/time.ts` — 1 call
### `Process.spawn` → `ChildProcessSpawner` (yield in layer)
- [ ] `format/index.ts` — 1 call
## Filesystem consolidation
`util/filesystem.ts` (raw fs wrapper) is used by **64 files**. The effectified `AppFileSystem` service (`filesystem/index.ts`) exists but only has **8 consumers**. As services and tools are effectified, they should switch from `Filesystem.*` to yielding `AppFileSystem.Service` — this happens naturally during each migration, not as a separate effort.
Similarly, **28 files** still import raw `fs` or `fs/promises` directly. These should migrate to `AppFileSystem` or `Filesystem.*` as they're touched.
Current raw fs users that will convert during tool migration:
- `tool/read.ts` — fs.createReadStream, readline
- `tool/apply_patch.ts` — fs/promises
- `tool/bash.ts` — fs/promises
- `file/ripgrep.ts` — fs/promises
- `storage/storage.ts` — fs/promises
- `patch/index.ts` — fs, fs/promises
## Primitives & utilities
- [ ] `util/lock.ts` — reader-writer lock → Effect Semaphore/Permit
- [ ] `util/flock.ts` — file-based distributed lock with heartbeat → Effect.repeat + addFinalizer
- [ ] `util/process.ts` — child process spawn wrapper → return Effect instead of Promise
- [ ] `util/lazy.ts` — replace uses in Effect code with Effect.cached; keep for sync-only code

View File

@@ -32,15 +32,7 @@ export const WorktreeAdaptor: Adaptor = {
const config = Config.parse(info)
await Worktree.remove({ directory: config.directory })
},
async fetch(info, input: RequestInfo | URL, init?: RequestInit) {
const { Server } = await import("../../server/server")
const config = Config.parse(info)
const url = input instanceof Request || input instanceof URL ? input : new URL(input, "http://opencode.internal")
const headers = new Headers(init?.headers ?? (input instanceof Request ? input.headers : undefined))
headers.set("x-opencode-directory", config.directory)
const request = new Request(url, { ...init, headers })
return Server.Default().fetch(request)
async fetch(_info, _input: RequestInfo | URL, _init?: RequestInit) {
throw new Error("fetch not implemented")
},
}

View File

@@ -1,64 +0,0 @@
import type { MiddlewareHandler } from "hono"
import { Flag } from "../flag/flag"
import { getAdaptor } from "./adaptors"
import { WorkspaceID } from "./schema"
import { Workspace } from "./workspace"
import { InstanceRoutes } from "../server/instance"
import { lazy } from "../util/lazy"
type Rule = { method?: string; path: string; exact?: boolean; action: "local" | "forward" }
const RULES: Array<Rule> = [
{ path: "/session/status", action: "forward" },
{ method: "GET", path: "/session", action: "local" },
]
function local(method: string, path: string) {
for (const rule of RULES) {
if (rule.method && rule.method !== method) continue
const match = rule.exact ? path === rule.path : path === rule.path || path.startsWith(rule.path + "/")
if (match) return rule.action === "local"
}
return false
}
const routes = lazy(() => InstanceRoutes())
export const WorkspaceRouterMiddleware: MiddlewareHandler = async (c) => {
if (!Flag.OPENCODE_EXPERIMENTAL_WORKSPACES) {
return routes().fetch(c.req.raw, c.env)
}
const url = new URL(c.req.url)
const raw = url.searchParams.get("workspace")
if (!raw) {
return routes().fetch(c.req.raw, c.env)
}
if (local(c.req.method, url.pathname)) {
return routes().fetch(c.req.raw, c.env)
}
const workspaceID = WorkspaceID.make(raw)
const workspace = await Workspace.get(workspaceID)
if (!workspace) {
return new Response(`Workspace not found: ${workspaceID}`, {
status: 500,
headers: {
"content-type": "text/plain; charset=utf-8",
},
})
}
const adaptor = await getAdaptor(workspace.type)
const headers = new Headers(c.req.raw.headers)
headers.delete("x-opencode-workspace")
return adaptor.fetch(workspace, `${url.pathname}${url.search}`, {
method: c.req.method,
body: c.req.method === "GET" || c.req.method === "HEAD" ? undefined : await c.req.raw.arrayBuffer(),
signal: c.req.raw.signal,
headers,
})
}

View File

@@ -1,4 +1,6 @@
import { Effect, Layer, ServiceMap } from "effect"
import { ChildProcess, ChildProcessSpawner } from "effect/unstable/process"
import * as CrossSpawnSpawner from "@/effect/cross-spawn-spawner"
import { InstanceState } from "@/effect/instance-state"
import { makeRuntime } from "@/effect/run-service"
import path from "path"
@@ -6,7 +8,6 @@ import { mergeDeep } from "remeda"
import z from "zod"
import { Config } from "../config/config"
import { Instance } from "../project/instance"
import { Process } from "../util/process"
import { Log } from "../util/log"
import * as Formatter from "./formatter"
@@ -36,6 +37,7 @@ export namespace Format {
Service,
Effect.gen(function* () {
const config = yield* Config.Service
const spawner = yield* ChildProcessSpawner.ChildProcessSpawner
const state = yield* InstanceState.make(
Effect.fn("Format.state")(function* (_ctx) {
@@ -98,38 +100,48 @@ export namespace Format {
return checks.filter((x) => x.enabled).map((x) => x.item)
}
async function formatFile(filepath: string) {
log.info("formatting", { file: filepath })
const ext = path.extname(filepath)
function formatFile(filepath: string) {
return Effect.gen(function* () {
log.info("formatting", { file: filepath })
const ext = path.extname(filepath)
for (const item of await getFormatter(ext)) {
log.info("running", { command: item.command })
try {
const proc = Process.spawn(
item.command.map((x) => x.replace("$FILE", filepath)),
{
cwd: Instance.directory,
env: { ...process.env, ...item.environment },
stdout: "ignore",
stderr: "ignore",
},
)
const exit = await proc.exited
if (exit !== 0) {
for (const item of yield* Effect.promise(() => getFormatter(ext))) {
log.info("running", { command: item.command })
const cmd = item.command.map((x) => x.replace("$FILE", filepath))
const code = yield* spawner
.spawn(
ChildProcess.make(cmd[0]!, cmd.slice(1), {
cwd: Instance.directory,
env: item.environment,
extendEnv: true,
stdin: "ignore",
stdout: "ignore",
stderr: "ignore",
}),
)
.pipe(
Effect.flatMap((handle) => handle.exitCode),
Effect.scoped,
Effect.catch(() =>
Effect.sync(() => {
log.error("failed to format file", {
error: "spawn failed",
command: item.command,
...item.environment,
file: filepath,
})
return ChildProcessSpawner.ExitCode(1)
}),
),
)
if (code !== 0) {
log.error("failed", {
command: item.command,
...item.environment,
})
}
} catch (error) {
log.error("failed to format file", {
error,
command: item.command,
...item.environment,
file: filepath,
})
}
}
})
}
log.info("init")
@@ -162,14 +174,14 @@ export namespace Format {
const file = Effect.fn("Format.file")(function* (filepath: string) {
const { formatFile } = yield* InstanceState.get(state)
yield* Effect.promise(() => formatFile(filepath))
yield* formatFile(filepath)
})
return Service.of({ init, status, file })
}),
)
export const defaultLayer = layer.pipe(Layer.provide(Config.defaultLayer))
export const defaultLayer = layer.pipe(Layer.provide(Config.defaultLayer), Layer.provide(CrossSpawnSpawner.defaultLayer))
const { runPromise } = makeRuntime(Service, defaultLayer)

View File

@@ -23,6 +23,9 @@ export namespace ProviderError {
/request entity too large/i, // HTTP 413
/context length is only \d+ tokens/i, // vLLM
/input length.*exceeds.*context length/i, // vLLM
/prompt too long; exceeded (?:max )?context length/i, // Ollama explicit overflow error
/too large for model with \d+ maximum context length/i, // Mistral
/model_context_window_exceeded/i, // z.ai non-standard finish_reason surfaced as error text
]
function isOpenAiErrorRetryable(e: APICallError) {

View File

@@ -9,6 +9,7 @@ import { BunProc } from "../bun"
import { Hash } from "../util/hash"
import { Plugin } from "../plugin"
import { NamedError } from "@opencode-ai/util/error"
import { type LanguageModelV3 } from "@ai-sdk/provider"
import { ModelsDev } from "./models"
import { Auth } from "../auth"
import { Env } from "../env"
@@ -28,7 +29,7 @@ import { createVertex } from "@ai-sdk/google-vertex"
import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic"
import { createOpenAI } from "@ai-sdk/openai"
import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
import { createOpenRouter, type LanguageModelV2 } from "@openrouter/ai-sdk-provider"
import { createOpenRouter } from "@openrouter/ai-sdk-provider"
import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/copilot"
import { createXai } from "@ai-sdk/xai"
import { createMistral } from "@ai-sdk/mistral"
@@ -109,7 +110,11 @@ export namespace Provider {
})
}
const BUNDLED_PROVIDERS: Record<string, (options: any) => SDK> = {
type BundledSDK = {
languageModel(modelId: string): LanguageModelV3
}
const BUNDLED_PROVIDERS: Record<string, (options: any) => BundledSDK> = {
"@ai-sdk/amazon-bedrock": createAmazonBedrock,
"@ai-sdk/anthropic": createAnthropic,
"@ai-sdk/azure": createAzure,
@@ -130,7 +135,6 @@ export namespace Provider {
"@ai-sdk/perplexity": createPerplexity,
"@ai-sdk/vercel": createVercel,
"gitlab-ai-provider": createGitLab,
// @ts-ignore (TODO: kill this code so we dont have to maintain it)
"@ai-sdk/github-copilot": createGitHubCopilotOpenAICompatible,
}
@@ -591,7 +595,12 @@ export namespace Provider {
if (!result.models.length) {
log.info("gitlab model discovery skipped: no models found", {
project: result.project ? { id: result.project.id, path: result.project.pathWithNamespace } : null,
project: result.project
? {
id: result.project.id,
path: result.project.pathWithNamespace,
}
: null,
})
return {}
}
@@ -619,8 +628,20 @@ export namespace Provider {
reasoning: true,
attachment: true,
toolcall: true,
input: { text: true, audio: false, image: true, video: false, pdf: true },
output: { text: true, audio: false, image: false, video: false, pdf: false },
input: {
text: true,
audio: false,
image: true,
video: false,
pdf: true,
},
output: {
text: true,
audio: false,
image: false,
video: false,
pdf: false,
},
interleaved: false,
},
release_date: "",
@@ -930,17 +951,17 @@ export namespace Provider {
}
const providers: Record<ProviderID, Info> = {} as Record<ProviderID, Info>
const languages = new Map<string, LanguageModelV2>()
const languages = new Map<string, LanguageModelV3>()
const modelLoaders: {
[providerID: string]: CustomModelLoader
} = {}
const varsLoaders: {
[providerID: string]: CustomVarsLoader
} = {}
const sdk = new Map<string, BundledSDK>()
const discoveryLoaders: {
[providerID: string]: CustomDiscoverModels
} = {}
const sdk = new Map<string, SDK>()
log.info("init")
@@ -1232,7 +1253,13 @@ export namespace Provider {
...model.headers,
}
const key = Hash.fast(JSON.stringify({ providerID: model.providerID, npm: model.api.npm, options }))
const key = Hash.fast(
JSON.stringify({
providerID: model.providerID,
npm: model.api.npm,
options,
}),
)
const existing = s.sdk.get(key)
if (existing) return existing
@@ -1285,7 +1312,10 @@ export namespace Provider {
const bundledFn = BUNDLED_PROVIDERS[model.api.npm]
if (bundledFn) {
log.info("using bundled provider", { providerID: model.providerID, pkg: model.api.npm })
log.info("using bundled provider", {
providerID: model.providerID,
pkg: model.api.npm,
})
const loaded = bundledFn({
name: model.providerID,
...options,
@@ -1325,7 +1355,10 @@ export namespace Provider {
const provider = s.providers[providerID]
if (!provider) {
const availableProviders = Object.keys(s.providers)
const matches = fuzzysort.go(providerID, availableProviders, { limit: 3, threshold: -10000 })
const matches = fuzzysort.go(providerID, availableProviders, {
limit: 3,
threshold: -10000,
})
const suggestions = matches.map((m) => m.target)
throw new ModelNotFoundError({ providerID, modelID, suggestions })
}
@@ -1333,14 +1366,17 @@ export namespace Provider {
const info = provider.models[modelID]
if (!info) {
const availableModels = Object.keys(provider.models)
const matches = fuzzysort.go(modelID, availableModels, { limit: 3, threshold: -10000 })
const matches = fuzzysort.go(modelID, availableModels, {
limit: 3,
threshold: -10000,
})
const suggestions = matches.map((m) => m.target)
throw new ModelNotFoundError({ providerID, modelID, suggestions })
}
return info
}
export async function getLanguage(model: Model): Promise<LanguageModelV2> {
export async function getLanguage(model: Model): Promise<LanguageModelV3> {
const s = await state()
const key = `${model.providerID}/${model.id}`
if (s.models.has(key)) return s.models.get(key)!
@@ -1350,7 +1386,10 @@ export namespace Provider {
try {
const language = s.modelLoaders[model.providerID]
? await s.modelLoaders[model.providerID](sdk, model.api.id, { ...provider.options, ...model.options })
? await s.modelLoaders[model.providerID](sdk, model.api.id, {
...provider.options,
...model.options,
})
: sdk.languageModel(model.api.id)
s.models.set(key, language)
return language
@@ -1457,9 +1496,9 @@ export namespace Provider {
if (cfg.model) return parseModel(cfg.model)
const providers = await list()
const recent = (await Filesystem.readJson<{ recent?: { providerID: ProviderID; modelID: ModelID }[] }>(
path.join(Global.Path.state, "model.json"),
)
const recent = (await Filesystem.readJson<{
recent?: { providerID: ProviderID; modelID: ModelID }[]
}>(path.join(Global.Path.state, "model.json"))
.then((x) => (Array.isArray(x.recent) ? x.recent : []))
.catch(() => [])) as { providerID: ProviderID; modelID: ModelID }[]
for (const entry of recent) {

View File

@@ -1,16 +1,16 @@
import {
type LanguageModelV2Prompt,
type SharedV2ProviderMetadata,
type LanguageModelV3Prompt,
type SharedV3ProviderOptions,
UnsupportedFunctionalityError,
} from "@ai-sdk/provider"
import type { OpenAICompatibleChatPrompt } from "./openai-compatible-api-types"
import { convertToBase64 } from "@ai-sdk/provider-utils"
function getOpenAIMetadata(message: { providerOptions?: SharedV2ProviderMetadata }) {
function getOpenAIMetadata(message: { providerOptions?: SharedV3ProviderOptions }) {
return message?.providerOptions?.copilot ?? {}
}
export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Prompt): OpenAICompatibleChatPrompt {
export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV3Prompt): OpenAICompatibleChatPrompt {
const messages: OpenAICompatibleChatPrompt = []
for (const { role, content, ...message } of prompt) {
const metadata = getOpenAIMetadata({ ...message })
@@ -127,6 +127,9 @@ export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Pro
case "tool": {
for (const toolResponse of content) {
if (toolResponse.type === "tool-approval-response") {
continue
}
const output = toolResponse.output
let contentValue: string
@@ -135,6 +138,9 @@ export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Pro
case "error-text":
contentValue = output.value
break
case "execution-denied":
contentValue = output.reason ?? "Tool execution denied."
break
case "content":
case "json":
case "error-json":

View File

@@ -1,6 +1,8 @@
import type { LanguageModelV2FinishReason } from "@ai-sdk/provider"
import type { LanguageModelV3FinishReason } from "@ai-sdk/provider"
export function mapOpenAICompatibleFinishReason(finishReason: string | null | undefined): LanguageModelV2FinishReason {
export function mapOpenAICompatibleFinishReason(
finishReason: string | null | undefined,
): LanguageModelV3FinishReason["unified"] {
switch (finishReason) {
case "stop":
return "stop"
@@ -12,6 +14,6 @@ export function mapOpenAICompatibleFinishReason(finishReason: string | null | un
case "tool_calls":
return "tool-calls"
default:
return "unknown"
return "other"
}
}

View File

@@ -1,12 +1,12 @@
import {
APICallError,
InvalidResponseDataError,
type LanguageModelV2,
type LanguageModelV2CallWarning,
type LanguageModelV2Content,
type LanguageModelV2FinishReason,
type LanguageModelV2StreamPart,
type SharedV2ProviderMetadata,
type LanguageModelV3,
type LanguageModelV3CallOptions,
type LanguageModelV3Content,
type LanguageModelV3StreamPart,
type SharedV3ProviderMetadata,
type SharedV3Warning,
} from "@ai-sdk/provider"
import {
combineHeaders,
@@ -47,11 +47,11 @@ export type OpenAICompatibleChatConfig = {
/**
* The supported URLs for the model.
*/
supportedUrls?: () => LanguageModelV2["supportedUrls"]
supportedUrls?: () => LanguageModelV3["supportedUrls"]
}
export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
readonly specificationVersion = "v2"
export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
readonly specificationVersion = "v3"
readonly supportsStructuredOutputs: boolean
@@ -98,8 +98,8 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
seed,
toolChoice,
tools,
}: Parameters<LanguageModelV2["doGenerate"]>[0]) {
const warnings: LanguageModelV2CallWarning[] = []
}: LanguageModelV3CallOptions) {
const warnings: SharedV3Warning[] = []
// Parse provider options
const compatibleOptions = Object.assign(
@@ -116,13 +116,13 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
)
if (topK != null) {
warnings.push({ type: "unsupported-setting", setting: "topK" })
warnings.push({ type: "unsupported", feature: "topK" })
}
if (responseFormat?.type === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
warnings.push({
type: "unsupported-setting",
setting: "responseFormat",
type: "unsupported",
feature: "responseFormat",
details: "JSON response format schema is only supported with structuredOutputs",
})
}
@@ -189,9 +189,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
}
}
async doGenerate(
options: Parameters<LanguageModelV2["doGenerate"]>[0],
): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
async doGenerate(options: LanguageModelV3CallOptions) {
const { args, warnings } = await this.getArgs({ ...options })
const body = JSON.stringify(args)
@@ -214,7 +212,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
})
const choice = responseBody.choices[0]
const content: Array<LanguageModelV2Content> = []
const content: Array<LanguageModelV3Content> = []
// text content:
const text = choice.message.content
@@ -257,7 +255,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
}
// provider metadata:
const providerMetadata: SharedV2ProviderMetadata = {
const providerMetadata: SharedV3ProviderMetadata = {
[this.providerOptionsName]: {},
...(await this.config.metadataExtractor?.extractMetadata?.({
parsedBody: rawResponse,
@@ -275,13 +273,23 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
return {
content,
finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
finishReason: {
unified: mapOpenAICompatibleFinishReason(choice.finish_reason),
raw: choice.finish_reason ?? undefined,
},
usage: {
inputTokens: responseBody.usage?.prompt_tokens ?? undefined,
outputTokens: responseBody.usage?.completion_tokens ?? undefined,
totalTokens: responseBody.usage?.total_tokens ?? undefined,
reasoningTokens: responseBody.usage?.completion_tokens_details?.reasoning_tokens ?? undefined,
cachedInputTokens: responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
inputTokens: {
total: responseBody.usage?.prompt_tokens ?? undefined,
noCache: undefined,
cacheRead: responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
cacheWrite: undefined,
},
outputTokens: {
total: responseBody.usage?.completion_tokens ?? undefined,
text: undefined,
reasoning: responseBody.usage?.completion_tokens_details?.reasoning_tokens ?? undefined,
},
raw: responseBody.usage ?? undefined,
},
providerMetadata,
request: { body },
@@ -294,9 +302,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
}
}
async doStream(
options: Parameters<LanguageModelV2["doStream"]>[0],
): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
async doStream(options: LanguageModelV3CallOptions) {
const { args, warnings } = await this.getArgs({ ...options })
const body = {
@@ -332,7 +338,13 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
hasFinished: boolean
}> = []
let finishReason: LanguageModelV2FinishReason = "unknown"
let finishReason: {
unified: ReturnType<typeof mapOpenAICompatibleFinishReason>
raw: string | undefined
} = {
unified: "other",
raw: undefined,
}
const usage: {
completionTokens: number | undefined
completionTokensDetails: {
@@ -366,7 +378,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
return {
stream: response.pipeThrough(
new TransformStream<ParseResult<z.infer<typeof this.chunkSchema>>, LanguageModelV2StreamPart>({
new TransformStream<ParseResult<z.infer<typeof this.chunkSchema>>, LanguageModelV3StreamPart>({
start(controller) {
controller.enqueue({ type: "stream-start", warnings })
},
@@ -380,7 +392,10 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
// handle failed chunk parsing / validation:
if (!chunk.success) {
finishReason = "error"
finishReason = {
unified: "error",
raw: undefined,
}
controller.enqueue({ type: "error", error: chunk.error })
return
}
@@ -390,7 +405,10 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
// handle error chunks:
if ("error" in value) {
finishReason = "error"
finishReason = {
unified: "error",
raw: undefined,
}
controller.enqueue({ type: "error", error: value.error.message })
return
}
@@ -435,7 +453,10 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
const choice = value.choices[0]
if (choice?.finish_reason != null) {
finishReason = mapOpenAICompatibleFinishReason(choice.finish_reason)
finishReason = {
unified: mapOpenAICompatibleFinishReason(choice.finish_reason),
raw: choice.finish_reason ?? undefined,
}
}
if (choice?.delta == null) {
@@ -652,7 +673,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
})
}
const providerMetadata: SharedV2ProviderMetadata = {
const providerMetadata: SharedV3ProviderMetadata = {
[providerOptionsName]: {},
// Include reasoning_opaque for Copilot multi-turn reasoning
...(reasoningOpaque ? { copilot: { reasoningOpaque } } : {}),
@@ -671,11 +692,25 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
type: "finish",
finishReason,
usage: {
inputTokens: usage.promptTokens ?? undefined,
outputTokens: usage.completionTokens ?? undefined,
totalTokens: usage.totalTokens ?? undefined,
reasoningTokens: usage.completionTokensDetails.reasoningTokens ?? undefined,
cachedInputTokens: usage.promptTokensDetails.cachedTokens ?? undefined,
inputTokens: {
total: usage.promptTokens,
noCache:
usage.promptTokens != undefined && usage.promptTokensDetails.cachedTokens != undefined
? usage.promptTokens - usage.promptTokensDetails.cachedTokens
: undefined,
cacheRead: usage.promptTokensDetails.cachedTokens,
cacheWrite: undefined,
},
outputTokens: {
total: usage.completionTokens,
text: undefined,
reasoning: usage.completionTokensDetails.reasoningTokens,
},
raw: {
prompt_tokens: usage.promptTokens ?? null,
completion_tokens: usage.completionTokens ?? null,
total_tokens: usage.totalTokens ?? null,
},
},
providerMetadata,
})

View File

@@ -1,4 +1,4 @@
import type { SharedV2ProviderMetadata } from "@ai-sdk/provider"
import type { SharedV3ProviderMetadata } from "@ai-sdk/provider"
/**
Extracts provider-specific metadata from API responses.
@@ -14,7 +14,7 @@ export type MetadataExtractor = {
* @returns Provider-specific metadata or undefined if no metadata is available.
* The metadata should be under a key indicating the provider id.
*/
extractMetadata: ({ parsedBody }: { parsedBody: unknown }) => Promise<SharedV2ProviderMetadata | undefined>
extractMetadata: ({ parsedBody }: { parsedBody: unknown }) => Promise<SharedV3ProviderMetadata | undefined>
/**
* Creates an extractor for handling streaming responses. The returned object provides
@@ -39,6 +39,6 @@ export type MetadataExtractor = {
* @returns Provider-specific metadata or undefined if no metadata is available.
* The metadata should be under a key indicating the provider id.
*/
buildMetadata(): SharedV2ProviderMetadata | undefined
buildMetadata(): SharedV3ProviderMetadata | undefined
}
}

View File

@@ -1,15 +1,11 @@
import {
type LanguageModelV2CallOptions,
type LanguageModelV2CallWarning,
UnsupportedFunctionalityError,
} from "@ai-sdk/provider"
import { type LanguageModelV3CallOptions, type SharedV3Warning, UnsupportedFunctionalityError } from "@ai-sdk/provider"
export function prepareTools({
tools,
toolChoice,
}: {
tools: LanguageModelV2CallOptions["tools"]
toolChoice?: LanguageModelV2CallOptions["toolChoice"]
tools: LanguageModelV3CallOptions["tools"]
toolChoice?: LanguageModelV3CallOptions["toolChoice"]
}): {
tools:
| undefined
@@ -22,12 +18,12 @@ export function prepareTools({
}
}>
toolChoice: { type: "function"; function: { name: string } } | "auto" | "none" | "required" | undefined
toolWarnings: LanguageModelV2CallWarning[]
toolWarnings: SharedV3Warning[]
} {
// when the tools array is empty, change it to undefined to prevent errors:
tools = tools?.length ? tools : undefined
const toolWarnings: LanguageModelV2CallWarning[] = []
const toolWarnings: SharedV3Warning[] = []
if (tools == null) {
return { tools: undefined, toolChoice: undefined, toolWarnings }
@@ -43,8 +39,8 @@ export function prepareTools({
}> = []
for (const tool of tools) {
if (tool.type === "provider-defined") {
toolWarnings.push({ type: "unsupported-tool", tool })
if (tool.type === "provider") {
toolWarnings.push({ type: "unsupported", feature: `tool type: ${tool.type}` })
} else {
openaiCompatTools.push({
type: "function",

View File

@@ -1,4 +1,4 @@
import type { LanguageModelV2 } from "@ai-sdk/provider"
import type { LanguageModelV3 } from "@ai-sdk/provider"
import { type FetchFunction, withoutTrailingSlash, withUserAgentSuffix } from "@ai-sdk/provider-utils"
import { OpenAICompatibleChatLanguageModel } from "./chat/openai-compatible-chat-language-model"
import { OpenAIResponsesLanguageModel } from "./responses/openai-responses-language-model"
@@ -36,10 +36,10 @@ export interface OpenaiCompatibleProviderSettings {
}
export interface OpenaiCompatibleProvider {
(modelId: OpenaiCompatibleModelId): LanguageModelV2
chat(modelId: OpenaiCompatibleModelId): LanguageModelV2
responses(modelId: OpenaiCompatibleModelId): LanguageModelV2
languageModel(modelId: OpenaiCompatibleModelId): LanguageModelV2
(modelId: OpenaiCompatibleModelId): LanguageModelV3
chat(modelId: OpenaiCompatibleModelId): LanguageModelV3
responses(modelId: OpenaiCompatibleModelId): LanguageModelV3
languageModel(modelId: OpenaiCompatibleModelId): LanguageModelV3
// embeddingModel(modelId: any): EmbeddingModelV2

View File

@@ -1,7 +1,7 @@
import {
type LanguageModelV2CallWarning,
type LanguageModelV2Prompt,
type LanguageModelV2ToolCallPart,
type LanguageModelV3Prompt,
type LanguageModelV3ToolCallPart,
type SharedV3Warning,
UnsupportedFunctionalityError,
} from "@ai-sdk/provider"
import { convertToBase64, parseProviderOptions } from "@ai-sdk/provider-utils"
@@ -25,17 +25,18 @@ export async function convertToOpenAIResponsesInput({
store,
hasLocalShellTool = false,
}: {
prompt: LanguageModelV2Prompt
prompt: LanguageModelV3Prompt
systemMessageMode: "system" | "developer" | "remove"
fileIdPrefixes?: readonly string[]
store: boolean
hasLocalShellTool?: boolean
}): Promise<{
input: OpenAIResponsesInput
warnings: Array<LanguageModelV2CallWarning>
warnings: Array<SharedV3Warning>
}> {
const input: OpenAIResponsesInput = []
const warnings: Array<LanguageModelV2CallWarning> = []
const warnings: Array<SharedV3Warning> = []
const processedApprovalIds = new Set<string>()
for (const { role, content } of prompt) {
switch (role) {
@@ -118,7 +119,7 @@ export async function convertToOpenAIResponsesInput({
case "assistant": {
const reasoningMessages: Record<string, OpenAIResponsesReasoning> = {}
const toolCallParts: Record<string, LanguageModelV2ToolCallPart> = {}
const toolCallParts: Record<string, LanguageModelV3ToolCallPart> = {}
for (const part of content) {
switch (part.type) {
@@ -251,8 +252,36 @@ export async function convertToOpenAIResponsesInput({
case "tool": {
for (const part of content) {
if (part.type === "tool-approval-response") {
if (processedApprovalIds.has(part.approvalId)) {
continue
}
processedApprovalIds.add(part.approvalId)
if (store) {
input.push({
type: "item_reference",
id: part.approvalId,
})
}
input.push({
type: "mcp_approval_response",
approval_request_id: part.approvalId,
approve: part.approved,
})
continue
}
const output = part.output
if (output.type === "execution-denied") {
const approvalId = (output.providerOptions?.openai as { approvalId?: string } | undefined)?.approvalId
if (approvalId) {
continue
}
}
if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
input.push({
type: "local_shell_call_output",
@@ -268,6 +297,9 @@ export async function convertToOpenAIResponsesInput({
case "error-text":
contentValue = output.value
break
case "execution-denied":
contentValue = output.reason ?? "Tool execution denied."
break
case "content":
case "json":
case "error-json":

View File

@@ -1,4 +1,4 @@
import type { LanguageModelV2FinishReason } from "@ai-sdk/provider"
import type { LanguageModelV3FinishReason } from "@ai-sdk/provider"
export function mapOpenAIResponseFinishReason({
finishReason,
@@ -7,7 +7,7 @@ export function mapOpenAIResponseFinishReason({
finishReason: string | null | undefined
// flag that checks if there have been client-side tool calls (not executed by openai)
hasFunctionCall: boolean
}): LanguageModelV2FinishReason {
}): LanguageModelV3FinishReason["unified"] {
switch (finishReason) {
case undefined:
case null:
@@ -17,6 +17,6 @@ export function mapOpenAIResponseFinishReason({
case "content_filter":
return "content-filter"
default:
return hasFunctionCall ? "tool-calls" : "unknown"
return hasFunctionCall ? "tool-calls" : "other"
}
}

View File

@@ -13,6 +13,7 @@ export type OpenAIResponsesInputItem =
| OpenAIResponsesLocalShellCallOutput
| OpenAIResponsesReasoning
| OpenAIResponsesItemReference
| OpenAIResponsesMcpApprovalResponse
export type OpenAIResponsesIncludeValue =
| "web_search_call.action.sources"
@@ -93,6 +94,12 @@ export type OpenAIResponsesItemReference = {
id: string
}
export type OpenAIResponsesMcpApprovalResponse = {
type: "mcp_approval_response"
approval_request_id: string
approve: boolean
}
/**
* A filter used to compare a specified attribute key to a given value using a defined comparison operation.
*/

View File

@@ -1,13 +1,13 @@
import {
APICallError,
type LanguageModelV2,
type LanguageModelV2CallWarning,
type LanguageModelV2Content,
type LanguageModelV2FinishReason,
type LanguageModelV2ProviderDefinedTool,
type LanguageModelV2StreamPart,
type LanguageModelV2Usage,
type SharedV2ProviderMetadata,
type JSONValue,
type LanguageModelV3,
type LanguageModelV3CallOptions,
type LanguageModelV3Content,
type LanguageModelV3ProviderTool,
type LanguageModelV3StreamPart,
type SharedV3ProviderMetadata,
type SharedV3Warning,
} from "@ai-sdk/provider"
import {
combineHeaders,
@@ -128,8 +128,8 @@ const LOGPROBS_SCHEMA = z.array(
}),
)
export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
readonly specificationVersion = "v2"
export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
readonly specificationVersion = "v3"
readonly modelId: OpenAIResponsesModelId
@@ -163,34 +163,34 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
tools,
toolChoice,
responseFormat,
}: Parameters<LanguageModelV2["doGenerate"]>[0]) {
const warnings: LanguageModelV2CallWarning[] = []
}: LanguageModelV3CallOptions) {
const warnings: SharedV3Warning[] = []
const modelConfig = getResponsesModelConfig(this.modelId)
if (topK != null) {
warnings.push({ type: "unsupported-setting", setting: "topK" })
warnings.push({ type: "unsupported", feature: "topK" })
}
if (seed != null) {
warnings.push({ type: "unsupported-setting", setting: "seed" })
warnings.push({ type: "unsupported", feature: "seed" })
}
if (presencePenalty != null) {
warnings.push({
type: "unsupported-setting",
setting: "presencePenalty",
type: "unsupported",
feature: "presencePenalty",
})
}
if (frequencyPenalty != null) {
warnings.push({
type: "unsupported-setting",
setting: "frequencyPenalty",
type: "unsupported",
feature: "frequencyPenalty",
})
}
if (stopSequences != null) {
warnings.push({ type: "unsupported-setting", setting: "stopSequences" })
warnings.push({ type: "unsupported", feature: "stopSequences" })
}
const openaiOptions = await parseProviderOptions({
@@ -218,7 +218,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
}
function hasOpenAITool(id: string) {
return tools?.find((tool) => tool.type === "provider-defined" && tool.id === id) != null
return tools?.find((tool) => tool.type === "provider" && tool.id === id) != null
}
// when logprobs are requested, automatically include them:
@@ -237,9 +237,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
const webSearchToolName = (
tools?.find(
(tool) =>
tool.type === "provider-defined" &&
(tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"),
) as LanguageModelV2ProviderDefinedTool | undefined
tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"),
) as LanguageModelV3ProviderTool | undefined
)?.name
if (webSearchToolName) {
@@ -315,8 +314,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
if (baseArgs.temperature != null) {
baseArgs.temperature = undefined
warnings.push({
type: "unsupported-setting",
setting: "temperature",
type: "unsupported",
feature: "temperature",
details: "temperature is not supported for reasoning models",
})
}
@@ -324,24 +323,24 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
if (baseArgs.top_p != null) {
baseArgs.top_p = undefined
warnings.push({
type: "unsupported-setting",
setting: "topP",
type: "unsupported",
feature: "topP",
details: "topP is not supported for reasoning models",
})
}
} else {
if (openaiOptions?.reasoningEffort != null) {
warnings.push({
type: "unsupported-setting",
setting: "reasoningEffort",
type: "unsupported",
feature: "reasoningEffort",
details: "reasoningEffort is not supported for non-reasoning models",
})
}
if (openaiOptions?.reasoningSummary != null) {
warnings.push({
type: "unsupported-setting",
setting: "reasoningSummary",
type: "unsupported",
feature: "reasoningSummary",
details: "reasoningSummary is not supported for non-reasoning models",
})
}
@@ -350,8 +349,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
// Validate flex processing support
if (openaiOptions?.serviceTier === "flex" && !modelConfig.supportsFlexProcessing) {
warnings.push({
type: "unsupported-setting",
setting: "serviceTier",
type: "unsupported",
feature: "serviceTier",
details: "flex processing is only available for o3, o4-mini, and gpt-5 models",
})
// Remove from args if not supported
@@ -361,8 +360,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
// Validate priority processing support
if (openaiOptions?.serviceTier === "priority" && !modelConfig.supportsPriorityProcessing) {
warnings.push({
type: "unsupported-setting",
setting: "serviceTier",
type: "unsupported",
feature: "serviceTier",
details:
"priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported",
})
@@ -391,9 +390,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
}
}
async doGenerate(
options: Parameters<LanguageModelV2["doGenerate"]>[0],
): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
async doGenerate(options: LanguageModelV3CallOptions) {
const { args: body, warnings, webSearchToolName } = await this.getArgs(options)
const url = this.config.url({
path: "/responses",
@@ -508,7 +505,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
})
}
const content: Array<LanguageModelV2Content> = []
const content: Array<LanguageModelV3Content> = []
const logprobs: Array<z.infer<typeof LOGPROBS_SCHEMA>> = []
// flag that checks if there have been client-side tool calls (not executed by openai)
@@ -554,7 +551,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
result: {
result: part.result,
} satisfies z.infer<typeof imageGenerationOutputSchema>,
providerExecuted: true,
})
break
@@ -648,7 +644,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
toolCallId: part.id,
toolName: webSearchToolName ?? "web_search",
result: { status: part.status },
providerExecuted: true,
})
break
@@ -671,7 +666,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
type: "computer_use_tool_result",
status: part.status || "completed",
},
providerExecuted: true,
})
break
}
@@ -693,14 +687,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
queries: part.queries,
results:
part.results?.map((result) => ({
attributes: result.attributes,
attributes: result.attributes as Record<string, JSONValue>,
fileId: result.file_id,
filename: result.filename,
score: result.score,
text: result.text,
})) ?? null,
} satisfies z.infer<typeof fileSearchOutputSchema>,
providerExecuted: true,
})
break
}
@@ -724,14 +717,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
result: {
outputs: part.outputs,
} satisfies z.infer<typeof codeInterpreterOutputSchema>,
providerExecuted: true,
})
break
}
}
}
const providerMetadata: SharedV2ProviderMetadata = {
const providerMetadata: SharedV3ProviderMetadata = {
openai: { responseId: response.id },
}
@@ -745,16 +737,29 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
return {
content,
finishReason: mapOpenAIResponseFinishReason({
finishReason: response.incomplete_details?.reason,
hasFunctionCall,
}),
finishReason: {
unified: mapOpenAIResponseFinishReason({
finishReason: response.incomplete_details?.reason,
hasFunctionCall,
}),
raw: response.incomplete_details?.reason,
},
usage: {
inputTokens: response.usage.input_tokens,
outputTokens: response.usage.output_tokens,
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
reasoningTokens: response.usage.output_tokens_details?.reasoning_tokens ?? undefined,
cachedInputTokens: response.usage.input_tokens_details?.cached_tokens ?? undefined,
inputTokens: {
total: response.usage.input_tokens,
noCache:
response.usage.input_tokens_details?.cached_tokens != null
? response.usage.input_tokens - response.usage.input_tokens_details.cached_tokens
: undefined,
cacheRead: response.usage.input_tokens_details?.cached_tokens ?? undefined,
cacheWrite: undefined,
},
outputTokens: {
total: response.usage.output_tokens,
text: undefined,
reasoning: response.usage.output_tokens_details?.reasoning_tokens ?? undefined,
},
raw: response.usage,
},
request: { body },
response: {
@@ -769,9 +774,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
}
}
async doStream(
options: Parameters<LanguageModelV2["doStream"]>[0],
): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
async doStream(options: LanguageModelV3CallOptions) {
const { args: body, warnings, webSearchToolName } = await this.getArgs(options)
const { responseHeaders, value: response } = await postJsonToApi({
@@ -792,11 +795,25 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
const self = this
let finishReason: LanguageModelV2FinishReason = "unknown"
const usage: LanguageModelV2Usage = {
let finishReason: {
unified: ReturnType<typeof mapOpenAIResponseFinishReason>
raw: string | undefined
} = {
unified: "other",
raw: undefined,
}
const usage: {
inputTokens: number | undefined
outputTokens: number | undefined
totalTokens: number | undefined
reasoningTokens: number | undefined
cachedInputTokens: number | undefined
} = {
inputTokens: undefined,
outputTokens: undefined,
totalTokens: undefined,
reasoningTokens: undefined,
cachedInputTokens: undefined,
}
const logprobs: Array<z.infer<typeof LOGPROBS_SCHEMA>> = []
let responseId: string | null = null
@@ -837,7 +854,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
return {
stream: response.pipeThrough(
new TransformStream<ParseResult<z.infer<typeof openaiResponsesChunkSchema>>, LanguageModelV2StreamPart>({
new TransformStream<ParseResult<z.infer<typeof openaiResponsesChunkSchema>>, LanguageModelV3StreamPart>({
start(controller) {
controller.enqueue({ type: "stream-start", warnings })
},
@@ -849,7 +866,10 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
// handle failed chunk parsing / validation:
if (!chunk.success) {
finishReason = "error"
finishReason = {
unified: "error",
raw: undefined,
}
controller.enqueue({ type: "error", error: chunk.error })
return
}
@@ -999,7 +1019,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
toolCallId: value.item.id,
toolName: "web_search",
result: { status: value.item.status },
providerExecuted: true,
})
} else if (value.item.type === "computer_call") {
ongoingToolCalls[value.output_index] = undefined
@@ -1025,7 +1044,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
type: "computer_use_tool_result",
status: value.item.status || "completed",
},
providerExecuted: true,
})
} else if (value.item.type === "file_search_call") {
ongoingToolCalls[value.output_index] = undefined
@@ -1038,14 +1056,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
queries: value.item.queries,
results:
value.item.results?.map((result) => ({
attributes: result.attributes,
attributes: result.attributes as Record<string, JSONValue>,
fileId: result.file_id,
filename: result.filename,
score: result.score,
text: result.text,
})) ?? null,
} satisfies z.infer<typeof fileSearchOutputSchema>,
providerExecuted: true,
})
} else if (value.item.type === "code_interpreter_call") {
ongoingToolCalls[value.output_index] = undefined
@@ -1057,7 +1074,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
result: {
outputs: value.item.outputs,
} satisfies z.infer<typeof codeInterpreterOutputSchema>,
providerExecuted: true,
})
} else if (value.item.type === "image_generation_call") {
controller.enqueue({
@@ -1067,7 +1083,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
result: {
result: value.item.result,
} satisfies z.infer<typeof imageGenerationOutputSchema>,
providerExecuted: true,
})
} else if (value.item.type === "local_shell_call") {
ongoingToolCalls[value.output_index] = undefined
@@ -1137,7 +1152,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
result: {
result: value.partial_image_b64,
} satisfies z.infer<typeof imageGenerationOutputSchema>,
providerExecuted: true,
})
} else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
const toolCall = ongoingToolCalls[value.output_index]
@@ -1244,10 +1258,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
})
}
} else if (isResponseFinishedChunk(value)) {
finishReason = mapOpenAIResponseFinishReason({
finishReason: value.response.incomplete_details?.reason,
hasFunctionCall,
})
finishReason = {
unified: mapOpenAIResponseFinishReason({
finishReason: value.response.incomplete_details?.reason,
hasFunctionCall,
}),
raw: value.response.incomplete_details?.reason ?? undefined,
}
usage.inputTokens = value.response.usage.input_tokens
usage.outputTokens = value.response.usage.output_tokens
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens
@@ -1287,7 +1304,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
currentTextId = null
}
const providerMetadata: SharedV2ProviderMetadata = {
const providerMetadata: SharedV3ProviderMetadata = {
openai: {
responseId,
},
@@ -1304,7 +1321,27 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
controller.enqueue({
type: "finish",
finishReason,
usage,
usage: {
inputTokens: {
total: usage.inputTokens,
noCache:
usage.inputTokens != null && usage.cachedInputTokens != null
? usage.inputTokens - usage.cachedInputTokens
: undefined,
cacheRead: usage.cachedInputTokens,
cacheWrite: undefined,
},
outputTokens: {
total: usage.outputTokens,
text: undefined,
reasoning: usage.reasoningTokens,
},
raw: {
input_tokens: usage.inputTokens,
output_tokens: usage.outputTokens,
total_tokens: usage.totalTokens,
},
},
providerMetadata,
})
},

View File

@@ -1,8 +1,4 @@
import {
type LanguageModelV2CallOptions,
type LanguageModelV2CallWarning,
UnsupportedFunctionalityError,
} from "@ai-sdk/provider"
import { type LanguageModelV3CallOptions, type SharedV3Warning, UnsupportedFunctionalityError } from "@ai-sdk/provider"
import { codeInterpreterArgsSchema } from "./tool/code-interpreter"
import { fileSearchArgsSchema } from "./tool/file-search"
import { webSearchArgsSchema } from "./tool/web-search"
@@ -15,8 +11,8 @@ export function prepareResponsesTools({
toolChoice,
strictJsonSchema,
}: {
tools: LanguageModelV2CallOptions["tools"]
toolChoice?: LanguageModelV2CallOptions["toolChoice"]
tools: LanguageModelV3CallOptions["tools"]
toolChoice?: LanguageModelV3CallOptions["toolChoice"]
strictJsonSchema: boolean
}): {
tools?: Array<OpenAIResponsesTool>
@@ -30,12 +26,12 @@ export function prepareResponsesTools({
| { type: "function"; name: string }
| { type: "code_interpreter" }
| { type: "image_generation" }
toolWarnings: LanguageModelV2CallWarning[]
toolWarnings: SharedV3Warning[]
} {
// when the tools array is empty, change it to undefined to prevent errors:
tools = tools?.length ? tools : undefined
const toolWarnings: LanguageModelV2CallWarning[] = []
const toolWarnings: SharedV3Warning[] = []
if (tools == null) {
return { tools: undefined, toolChoice: undefined, toolWarnings }
@@ -54,7 +50,7 @@ export function prepareResponsesTools({
strict: strictJsonSchema,
})
break
case "provider-defined": {
case "provider": {
switch (tool.id) {
case "openai.file_search": {
const args = fileSearchArgsSchema.parse(tool.args)
@@ -138,7 +134,7 @@ export function prepareResponsesTools({
break
}
default:
toolWarnings.push({ type: "unsupported-tool", tool })
toolWarnings.push({ type: "unsupported", feature: "tool type" })
break
}
}

View File

@@ -1,4 +1,4 @@
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
export const codeInterpreterInputSchema = z.object({
@@ -37,7 +37,7 @@ type CodeInterpreterArgs = {
container?: string | { fileIds?: string[] }
}
export const codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOutputSchema<
export const codeInterpreterToolFactory = createProviderToolFactoryWithOutputSchema<
{
/**
* The code to run, or null if not available.
@@ -76,7 +76,6 @@ export const codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOu
CodeInterpreterArgs
>({
id: "openai.code_interpreter",
name: "code_interpreter",
inputSchema: codeInterpreterInputSchema,
outputSchema: codeInterpreterOutputSchema,
})

View File

@@ -1,4 +1,4 @@
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import type {
OpenAIResponsesFileSearchToolComparisonFilter,
OpenAIResponsesFileSearchToolCompoundFilter,
@@ -43,7 +43,7 @@ export const fileSearchOutputSchema = z.object({
.nullable(),
})
export const fileSearch = createProviderDefinedToolFactoryWithOutputSchema<
export const fileSearch = createProviderToolFactoryWithOutputSchema<
{},
{
/**
@@ -122,7 +122,6 @@ export const fileSearch = createProviderDefinedToolFactoryWithOutputSchema<
}
>({
id: "openai.file_search",
name: "file_search",
inputSchema: z.object({}),
outputSchema: fileSearchOutputSchema,
})

View File

@@ -1,4 +1,4 @@
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
export const imageGenerationArgsSchema = z
@@ -92,7 +92,7 @@ type ImageGenerationArgs = {
size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024"
}
const imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSchema<
const imageGenerationToolFactory = createProviderToolFactoryWithOutputSchema<
{},
{
/**
@@ -103,7 +103,6 @@ const imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSch
ImageGenerationArgs
>({
id: "openai.image_generation",
name: "image_generation",
inputSchema: z.object({}),
outputSchema: imageGenerationOutputSchema,
})

View File

@@ -1,4 +1,4 @@
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
export const localShellInputSchema = z.object({
@@ -16,7 +16,7 @@ export const localShellOutputSchema = z.object({
output: z.string(),
})
export const localShell = createProviderDefinedToolFactoryWithOutputSchema<
export const localShell = createProviderToolFactoryWithOutputSchema<
{
/**
* Execute a shell command on the server.
@@ -59,7 +59,6 @@ export const localShell = createProviderDefinedToolFactoryWithOutputSchema<
{}
>({
id: "openai.local_shell",
name: "local_shell",
inputSchema: localShellInputSchema,
outputSchema: localShellOutputSchema,
})

View File

@@ -1,4 +1,4 @@
import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils"
import { createProviderToolFactory } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
// Args validation schema
@@ -40,7 +40,7 @@ export const webSearchPreviewArgsSchema = z.object({
.optional(),
})
export const webSearchPreview = createProviderDefinedToolFactory<
export const webSearchPreview = createProviderToolFactory<
{
// Web search doesn't take input parameters - it's controlled by the prompt
},
@@ -81,7 +81,6 @@ export const webSearchPreview = createProviderDefinedToolFactory<
}
>({
id: "openai.web_search_preview",
name: "web_search_preview",
inputSchema: z.object({
action: z
.discriminatedUnion("type", [

View File

@@ -1,4 +1,4 @@
import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils"
import { createProviderToolFactory } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
export const webSearchArgsSchema = z.object({
@@ -21,7 +21,7 @@ export const webSearchArgsSchema = z.object({
.optional(),
})
export const webSearchToolFactory = createProviderDefinedToolFactory<
export const webSearchToolFactory = createProviderToolFactory<
{
// Web search doesn't take input parameters - it's controlled by the prompt
},
@@ -74,7 +74,6 @@ export const webSearchToolFactory = createProviderDefinedToolFactory<
}
>({
id: "openai.web_search",
name: "web_search",
inputSchema: z.object({
action: z
.discriminatedUnion("type", [

View File

@@ -25,8 +25,9 @@ export namespace ProviderTransform {
switch (npm) {
case "@ai-sdk/github-copilot":
return "copilot"
case "@ai-sdk/openai":
case "@ai-sdk/azure":
return "azure"
case "@ai-sdk/openai":
return "openai"
case "@ai-sdk/amazon-bedrock":
return "bedrock"
@@ -34,6 +35,7 @@ export namespace ProviderTransform {
case "@ai-sdk/google-vertex/anthropic":
return "anthropic"
case "@ai-sdk/google-vertex":
return "vertex"
case "@ai-sdk/google":
return "google"
case "@ai-sdk/gateway":
@@ -72,17 +74,29 @@ export namespace ProviderTransform {
}
if (model.api.id.includes("claude")) {
const scrub = (id: string) => id.replace(/[^a-zA-Z0-9_-]/g, "_")
return msgs.map((msg) => {
if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
msg.content = msg.content.map((part) => {
if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
return {
...part,
toolCallId: part.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_"),
if (msg.role === "assistant" && Array.isArray(msg.content)) {
return {
...msg,
content: msg.content.map((part) => {
if (part.type === "tool-call" || part.type === "tool-result") {
return { ...part, toolCallId: scrub(part.toolCallId) }
}
}
return part
})
return part
}),
}
}
if (msg.role === "tool" && Array.isArray(msg.content)) {
return {
...msg,
content: msg.content.map((part) => {
if (part.type === "tool-result") {
return { ...part, toolCallId: scrub(part.toolCallId) }
}
return part
}),
}
}
return msg
})
@@ -92,29 +106,33 @@ export namespace ProviderTransform {
model.api.id.toLowerCase().includes("mistral") ||
model.api.id.toLocaleLowerCase().includes("devstral")
) {
const scrub = (id: string) => {
return id
.replace(/[^a-zA-Z0-9]/g, "") // Remove non-alphanumeric characters
.substring(0, 9) // Take first 9 characters
.padEnd(9, "0") // Pad with zeros if less than 9 characters
}
const result: ModelMessage[] = []
for (let i = 0; i < msgs.length; i++) {
const msg = msgs[i]
const nextMsg = msgs[i + 1]
if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
if (msg.role === "assistant" && Array.isArray(msg.content)) {
msg.content = msg.content.map((part) => {
if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
// Mistral requires alphanumeric tool call IDs with exactly 9 characters
const normalizedId = part.toolCallId
.replace(/[^a-zA-Z0-9]/g, "") // Remove non-alphanumeric characters
.substring(0, 9) // Take first 9 characters
.padEnd(9, "0") // Pad with zeros if less than 9 characters
return {
...part,
toolCallId: normalizedId,
}
if (part.type === "tool-call" || part.type === "tool-result") {
return { ...part, toolCallId: scrub(part.toolCallId) }
}
return part
})
}
if (msg.role === "tool" && Array.isArray(msg.content)) {
msg.content = msg.content.map((part) => {
if (part.type === "tool-result") {
return { ...part, toolCallId: scrub(part.toolCallId) }
}
return part
})
}
result.push(msg)
// Fix message sequence: tool messages cannot be followed by user messages
@@ -202,7 +220,12 @@ export namespace ProviderTransform {
if (shouldUseContentOptions) {
const lastContent = msg.content[msg.content.length - 1]
if (lastContent && typeof lastContent === "object") {
if (
lastContent &&
typeof lastContent === "object" &&
lastContent.type !== "tool-approval-request" &&
lastContent.type !== "tool-approval-response"
) {
lastContent.providerOptions = mergeDeep(lastContent.providerOptions ?? {}, providerOptions)
continue
}
@@ -284,7 +307,12 @@ export namespace ProviderTransform {
return {
...msg,
providerOptions: remap(msg.providerOptions),
content: msg.content.map((part) => ({ ...part, providerOptions: remap(part.providerOptions) })),
content: msg.content.map((part) => {
if (part.type === "tool-approval-request" || part.type === "tool-approval-response") {
return { ...part }
}
return { ...part, providerOptions: remap(part.providerOptions) }
}),
} as typeof msg
})
}

View File

@@ -14,7 +14,6 @@ import { Global } from "../global"
import { LSP } from "../lsp"
import { Command } from "../command"
import { Flag } from "../flag/flag"
import { Filesystem } from "@/util/filesystem"
import { QuestionRoutes } from "./routes/question"
import { PermissionRoutes } from "./routes/permission"
import { ProjectRoutes } from "./routes/project"
@@ -26,7 +25,6 @@ import { ConfigRoutes } from "./routes/config"
import { ExperimentalRoutes } from "./routes/experimental"
import { ProviderRoutes } from "./routes/provider"
import { EventRoutes } from "./routes/event"
import { InstanceBootstrap } from "../project/bootstrap"
import { errorHandler } from "./middleware"
const log = Log.create({ service: "server" })
@@ -45,26 +43,6 @@ const csp = (hash = "") =>
export const InstanceRoutes = (app?: Hono) =>
(app ?? new Hono())
.onError(errorHandler(log))
.use(async (c, next) => {
const raw = c.req.query("directory") || c.req.header("x-opencode-directory") || process.cwd()
const directory = Filesystem.resolve(
(() => {
try {
return decodeURIComponent(raw)
} catch {
return raw
}
})(),
)
return Instance.provide({
directory,
init: InstanceBootstrap,
async fn() {
return next()
},
})
})
.route("/project", ProjectRoutes())
.route("/pty", PtyRoutes())
.route("/config", ConfigRoutes())

View File

@@ -0,0 +1,99 @@
import type { MiddlewareHandler } from "hono"
import { getAdaptor } from "@/control-plane/adaptors"
import { WorkspaceID } from "@/control-plane/schema"
import { Workspace } from "@/control-plane/workspace"
import { lazy } from "@/util/lazy"
import { Filesystem } from "@/util/filesystem"
import { Instance } from "@/project/instance"
import { InstanceBootstrap } from "@/project/bootstrap"
import { InstanceRoutes } from "./instance"
type Rule = { method?: string; path: string; exact?: boolean; action: "local" | "forward" }
const RULES: Array<Rule> = [
{ path: "/session/status", action: "forward" },
{ method: "GET", path: "/session", action: "local" },
]
function local(method: string, path: string) {
for (const rule of RULES) {
if (rule.method && rule.method !== method) continue
const match = rule.exact ? path === rule.path : path === rule.path || path.startsWith(rule.path + "/")
if (match) return rule.action === "local"
}
return false
}
const routes = lazy(() => InstanceRoutes())
export const WorkspaceRouterMiddleware: MiddlewareHandler = async (c) => {
const raw = c.req.query("directory") || c.req.header("x-opencode-directory") || process.cwd()
const directory = Filesystem.resolve(
(() => {
try {
return decodeURIComponent(raw)
} catch {
return raw
}
})(),
)
const url = new URL(c.req.url)
const workspaceParam = url.searchParams.get("workspace")
// TODO: If session is being routed, force it to lookup the
// project/workspace
// If no workspace is provided we use the "project" workspace
if (!workspaceParam) {
return Instance.provide({
directory,
init: InstanceBootstrap,
async fn() {
return routes().fetch(c.req.raw, c.env)
},
})
}
const workspaceID = WorkspaceID.make(workspaceParam)
const workspace = await Workspace.get(workspaceID)
if (!workspace) {
return new Response(`Workspace not found: ${workspaceID}`, {
status: 500,
headers: {
"content-type": "text/plain; charset=utf-8",
},
})
}
// Handle local workspaces directly so we can pass env to `fetch`,
// necessary for websocket upgrades
if (workspace.type === "worktree") {
return Instance.provide({
directory: workspace.directory!,
init: InstanceBootstrap,
async fn() {
return routes().fetch(c.req.raw, c.env)
},
})
}
// Remote workspaces
if (local(c.req.method, url.pathname)) {
// No instance provided because we are serving cached data; there
// is no instance to work with
return routes().fetch(c.req.raw, c.env)
}
const adaptor = await getAdaptor(workspace.type)
const headers = new Headers(c.req.raw.headers)
headers.delete("x-opencode-workspace")
return adaptor.fetch(workspace, `${url.pathname}${url.search}`, {
method: c.req.method,
body: c.req.method === "GET" || c.req.method === "HEAD" ? undefined : await c.req.raw.arrayBuffer(),
signal: c.req.raw.signal,
headers,
})
}

View File

@@ -8,7 +8,7 @@ import z from "zod"
import { Auth } from "../auth"
import { Flag } from "../flag/flag"
import { ProviderID } from "../provider/schema"
import { WorkspaceRouterMiddleware } from "../control-plane/workspace-router-middleware"
import { WorkspaceRouterMiddleware } from "./router"
import { websocket } from "hono/bun"
import { errors } from "./error"
import { GlobalRoutes } from "./routes/global"

View File

@@ -215,7 +215,7 @@ When constructing the summary, try to stick to this template:
tools: {},
system: [],
messages: [
...MessageV2.toModelMessages(msgs, model, { stripMedia: true }),
...(await MessageV2.toModelMessages(msgs, model, { stripMedia: true })),
{
role: "user",
content: [

View File

@@ -33,6 +33,8 @@ import { Permission } from "@/permission"
import { Global } from "@/global"
import type { LanguageModelV2Usage } from "@ai-sdk/provider"
import { iife } from "@/util/iife"
import { Effect, Layer, Scope, ServiceMap } from "effect"
import { makeRuntime } from "@/effect/run-service"
export namespace Session {
const log = Log.create({ service: "session" })
@@ -233,6 +235,473 @@ export namespace Session {
),
}
export function plan(input: { slug: string; time: { created: number } }) {
const base = Instance.project.vcs
? path.join(Instance.worktree, ".opencode", "plans")
: path.join(Global.Path.data, "plans")
return path.join(base, [input.time.created, input.slug].join("-") + ".md")
}
export const getUsage = (input: {
model: Provider.Model
usage: LanguageModelV2Usage
metadata?: ProviderMetadata
}) => {
const safe = (value: number) => {
if (!Number.isFinite(value)) return 0
return value
}
const inputTokens = safe(input.usage.inputTokens ?? 0)
const outputTokens = safe(input.usage.outputTokens ?? 0)
const reasoningTokens = safe(input.usage.reasoningTokens ?? 0)
const cacheReadInputTokens = safe(input.usage.cachedInputTokens ?? 0)
const cacheWriteInputTokens = safe(
(input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
// @ts-expect-error
input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
// @ts-expect-error
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
0) as number,
)
// OpenRouter provides inputTokens as the total count of input tokens (including cached).
// AFAIK other providers (OpenRouter/OpenAI/Gemini etc.) do it the same way e.g. vercel/ai#8794 (comment)
// Anthropic does it differently though - inputTokens doesn't include cached tokens.
// It looks like OpenCode's cost calculation assumes all providers return inputTokens the same way Anthropic does (I'm guessing getUsage logic was originally implemented with anthropic), so it's causing incorrect cost calculation for OpenRouter and others.
const excludesCachedTokens = !!(input.metadata?.["anthropic"] || input.metadata?.["bedrock"])
const adjustedInputTokens = safe(
excludesCachedTokens ? inputTokens : inputTokens - cacheReadInputTokens - cacheWriteInputTokens,
)
const total = iife(() => {
// Anthropic doesn't provide total_tokens, also ai sdk will vastly undercount if we
// don't compute from components
if (
input.model.api.npm === "@ai-sdk/anthropic" ||
input.model.api.npm === "@ai-sdk/amazon-bedrock" ||
input.model.api.npm === "@ai-sdk/google-vertex/anthropic"
) {
return adjustedInputTokens + outputTokens + cacheReadInputTokens + cacheWriteInputTokens
}
return input.usage.totalTokens
})
const tokens = {
total,
input: adjustedInputTokens,
output: outputTokens,
reasoning: reasoningTokens,
cache: {
write: cacheWriteInputTokens,
read: cacheReadInputTokens,
},
}
const costInfo =
input.model.cost?.experimentalOver200K && tokens.input + tokens.cache.read > 200_000
? input.model.cost.experimentalOver200K
: input.model.cost
return {
cost: safe(
new Decimal(0)
.add(new Decimal(tokens.input).mul(costInfo?.input ?? 0).div(1_000_000))
.add(new Decimal(tokens.output).mul(costInfo?.output ?? 0).div(1_000_000))
.add(new Decimal(tokens.cache.read).mul(costInfo?.cache?.read ?? 0).div(1_000_000))
.add(new Decimal(tokens.cache.write).mul(costInfo?.cache?.write ?? 0).div(1_000_000))
// TODO: update models.dev to have better pricing model, for now:
// charge reasoning tokens at the same rate as output tokens
.add(new Decimal(tokens.reasoning).mul(costInfo?.output ?? 0).div(1_000_000))
.toNumber(),
),
tokens,
}
}
export class BusyError extends Error {
constructor(public readonly sessionID: string) {
super(`Session ${sessionID} is busy`)
}
}
export interface Interface {
readonly create: (input?: {
parentID?: SessionID
title?: string
permission?: Permission.Ruleset
workspaceID?: WorkspaceID
}) => Effect.Effect<Info>
readonly fork: (input: { sessionID: SessionID; messageID?: MessageID }) => Effect.Effect<Info>
readonly touch: (sessionID: SessionID) => Effect.Effect<void>
readonly get: (id: SessionID) => Effect.Effect<Info>
readonly share: (id: SessionID) => Effect.Effect<{ url: string }>
readonly unshare: (id: SessionID) => Effect.Effect<void>
readonly setTitle: (input: { sessionID: SessionID; title: string }) => Effect.Effect<void>
readonly setArchived: (input: { sessionID: SessionID; time?: number }) => Effect.Effect<void>
readonly setPermission: (input: { sessionID: SessionID; permission: Permission.Ruleset }) => Effect.Effect<void>
readonly setRevert: (input: {
sessionID: SessionID
revert: Info["revert"]
summary: Info["summary"]
}) => Effect.Effect<void>
readonly clearRevert: (sessionID: SessionID) => Effect.Effect<void>
readonly setSummary: (input: { sessionID: SessionID; summary: Info["summary"] }) => Effect.Effect<void>
readonly diff: (sessionID: SessionID) => Effect.Effect<Snapshot.FileDiff[]>
readonly messages: (input: { sessionID: SessionID; limit?: number }) => Effect.Effect<MessageV2.WithParts[]>
readonly children: (parentID: SessionID) => Effect.Effect<Info[]>
readonly remove: (sessionID: SessionID) => Effect.Effect<void>
readonly updateMessage: (msg: MessageV2.Info) => Effect.Effect<MessageV2.Info>
readonly removeMessage: (input: { sessionID: SessionID; messageID: MessageID }) => Effect.Effect<MessageID>
readonly removePart: (input: {
sessionID: SessionID
messageID: MessageID
partID: PartID
}) => Effect.Effect<PartID>
readonly updatePart: (part: MessageV2.Part) => Effect.Effect<MessageV2.Part>
readonly updatePartDelta: (input: {
sessionID: SessionID
messageID: MessageID
partID: PartID
field: string
delta: string
}) => Effect.Effect<void>
readonly initialize: (input: {
sessionID: SessionID
modelID: ModelID
providerID: ProviderID
messageID: MessageID
}) => Effect.Effect<void>
}
export class Service extends ServiceMap.Service<Service, Interface>()("@opencode/Session") {}
type Patch = z.infer<typeof Event.Updated.schema>["info"]
const db = <T>(fn: (d: Parameters<typeof Database.use>[0] extends (trx: infer D) => any ? D : never) => T) =>
Effect.sync(() => Database.use(fn))
export const layer: Layer.Layer<Service, never, Bus.Service | Config.Service> = Layer.effect(
Service,
Effect.gen(function* () {
const bus = yield* Bus.Service
const config = yield* Config.Service
const scope = yield* Scope.Scope
const createNext = Effect.fn("Session.createNext")(function* (input: {
id?: SessionID
title?: string
parentID?: SessionID
workspaceID?: WorkspaceID
directory: string
permission?: Permission.Ruleset
}) {
const result: Info = {
id: SessionID.descending(input.id),
slug: Slug.create(),
version: Installation.VERSION,
projectID: Instance.project.id,
directory: input.directory,
workspaceID: input.workspaceID,
parentID: input.parentID,
title: input.title ?? createDefaultTitle(!!input.parentID),
permission: input.permission,
time: {
created: Date.now(),
updated: Date.now(),
},
}
log.info("created", result)
yield* Effect.sync(() => SyncEvent.run(Event.Created, { sessionID: result.id, info: result }))
const cfg = yield* config.get()
if (!result.parentID && (Flag.OPENCODE_AUTO_SHARE || cfg.share === "auto")) {
yield* share(result.id).pipe(Effect.ignore, Effect.forkIn(scope))
}
if (!Flag.OPENCODE_EXPERIMENTAL_WORKSPACES) {
// This only exist for backwards compatibility. We should not be
// manually publishing this event; it is a sync event now
yield* bus.publish(Event.Updated, {
sessionID: result.id,
info: result,
})
}
return result
})
const get = Effect.fn("Session.get")(function* (id: SessionID) {
const row = yield* db((d) => d.select().from(SessionTable).where(eq(SessionTable.id, id)).get())
if (!row) throw new NotFoundError({ message: `Session not found: ${id}` })
return fromRow(row)
})
const share = Effect.fn("Session.share")(function* (id: SessionID) {
const cfg = yield* config.get()
if (cfg.share === "disabled") throw new Error("Sharing is disabled in configuration")
const result = yield* Effect.promise(async () => {
const { ShareNext } = await import("@/share/share-next")
return ShareNext.create(id)
})
yield* Effect.sync(() => SyncEvent.run(Event.Updated, { sessionID: id, info: { share: { url: result.url } } }))
return result
})
const unshare = Effect.fn("Session.unshare")(function* (id: SessionID) {
yield* Effect.promise(async () => {
const { ShareNext } = await import("@/share/share-next")
await ShareNext.remove(id)
})
yield* Effect.sync(() => SyncEvent.run(Event.Updated, { sessionID: id, info: { share: { url: null } } }))
})
const children = Effect.fn("Session.children")(function* (parentID: SessionID) {
const project = Instance.project
const rows = yield* db((d) =>
d
.select()
.from(SessionTable)
.where(and(eq(SessionTable.project_id, project.id), eq(SessionTable.parent_id, parentID)))
.all(),
)
return rows.map(fromRow)
})
const remove: (sessionID: SessionID) => Effect.Effect<void> = Effect.fnUntraced(function* (sessionID: SessionID) {
try {
const session = yield* get(sessionID)
const kids = yield* children(sessionID)
for (const child of kids) {
yield* remove(child.id)
}
yield* unshare(sessionID).pipe(Effect.ignore)
yield* Effect.sync(() => {
SyncEvent.run(Event.Deleted, { sessionID, info: session })
SyncEvent.remove(sessionID)
})
} catch (e) {
log.error(e)
}
})
const updateMessage = Effect.fn("Session.updateMessage")(function* (msg: MessageV2.Info) {
yield* Effect.sync(() =>
SyncEvent.run(MessageV2.Event.Updated, {
sessionID: msg.sessionID,
info: msg,
}),
)
return msg
})
const updatePart = Effect.fn("Session.updatePart")(function* (part: MessageV2.Part) {
yield* Effect.sync(() =>
SyncEvent.run(MessageV2.Event.PartUpdated, {
sessionID: part.sessionID,
part: structuredClone(part),
time: Date.now(),
}),
)
return part
})
const create = Effect.fn("Session.create")(function* (input?: {
parentID?: SessionID
title?: string
permission?: Permission.Ruleset
workspaceID?: WorkspaceID
}) {
return yield* createNext({
parentID: input?.parentID,
directory: Instance.directory,
title: input?.title,
permission: input?.permission,
workspaceID: input?.workspaceID,
})
})
const fork = Effect.fn("Session.fork")(function* (input: { sessionID: SessionID; messageID?: MessageID }) {
const original = yield* get(input.sessionID)
const title = getForkedTitle(original.title)
const session = yield* createNext({
directory: Instance.directory,
workspaceID: original.workspaceID,
title,
})
const msgs = yield* messages({ sessionID: input.sessionID })
const idMap = new Map<string, MessageID>()
for (const msg of msgs) {
if (input.messageID && msg.info.id >= input.messageID) break
const newID = MessageID.ascending()
idMap.set(msg.info.id, newID)
const parentID = msg.info.role === "assistant" && msg.info.parentID ? idMap.get(msg.info.parentID) : undefined
const cloned = yield* updateMessage({
...msg.info,
sessionID: session.id,
id: newID,
...(parentID && { parentID }),
})
for (const part of msg.parts) {
yield* updatePart({
...part,
id: PartID.ascending(),
messageID: cloned.id,
sessionID: session.id,
})
}
}
return session
})
const patch = (sessionID: SessionID, info: Patch) =>
Effect.sync(() => SyncEvent.run(Event.Updated, { sessionID, info }))
const touch = Effect.fn("Session.touch")(function* (sessionID: SessionID) {
yield* patch(sessionID, { time: { updated: Date.now() } })
})
const setTitle = Effect.fn("Session.setTitle")(function* (input: { sessionID: SessionID; title: string }) {
yield* patch(input.sessionID, { title: input.title })
})
const setArchived = Effect.fn("Session.setArchived")(function* (input: { sessionID: SessionID; time?: number }) {
yield* patch(input.sessionID, { time: { archived: input.time } })
})
const setPermission = Effect.fn("Session.setPermission")(function* (input: {
sessionID: SessionID
permission: Permission.Ruleset
}) {
yield* patch(input.sessionID, { permission: input.permission, time: { updated: Date.now() } })
})
const setRevert = Effect.fn("Session.setRevert")(function* (input: {
sessionID: SessionID
revert: Info["revert"]
summary: Info["summary"]
}) {
yield* patch(input.sessionID, { summary: input.summary, time: { updated: Date.now() }, revert: input.revert })
})
const clearRevert = Effect.fn("Session.clearRevert")(function* (sessionID: SessionID) {
yield* patch(sessionID, { time: { updated: Date.now() }, revert: null })
})
const setSummary = Effect.fn("Session.setSummary")(function* (input: {
sessionID: SessionID
summary: Info["summary"]
}) {
yield* patch(input.sessionID, { time: { updated: Date.now() }, summary: input.summary })
})
const diff = Effect.fn("Session.diff")(function* (sessionID: SessionID) {
return yield* Effect.tryPromise(() => Storage.read<Snapshot.FileDiff[]>(["session_diff", sessionID])).pipe(
Effect.orElseSucceed(() => [] as Snapshot.FileDiff[]),
)
})
const messages = Effect.fn("Session.messages")(function* (input: { sessionID: SessionID; limit?: number }) {
return yield* Effect.promise(async () => {
const result = [] as MessageV2.WithParts[]
for await (const msg of MessageV2.stream(input.sessionID)) {
if (input.limit && result.length >= input.limit) break
result.push(msg)
}
result.reverse()
return result
})
})
const removeMessage = Effect.fn("Session.removeMessage")(function* (input: {
sessionID: SessionID
messageID: MessageID
}) {
yield* Effect.sync(() =>
SyncEvent.run(MessageV2.Event.Removed, {
sessionID: input.sessionID,
messageID: input.messageID,
}),
)
return input.messageID
})
const removePart = Effect.fn("Session.removePart")(function* (input: {
sessionID: SessionID
messageID: MessageID
partID: PartID
}) {
yield* Effect.sync(() =>
SyncEvent.run(MessageV2.Event.PartRemoved, {
sessionID: input.sessionID,
messageID: input.messageID,
partID: input.partID,
}),
)
return input.partID
})
const updatePartDelta = Effect.fn("Session.updatePartDelta")(function* (input: {
sessionID: SessionID
messageID: MessageID
partID: PartID
field: string
delta: string
}) {
yield* bus.publish(MessageV2.Event.PartDelta, input)
})
const initialize = Effect.fn("Session.initialize")(function* (input: {
sessionID: SessionID
modelID: ModelID
providerID: ProviderID
messageID: MessageID
}) {
yield* Effect.promise(() =>
SessionPrompt.command({
sessionID: input.sessionID,
messageID: input.messageID,
model: input.providerID + "/" + input.modelID,
command: Command.Default.INIT,
arguments: "",
}),
)
})
return Service.of({
create,
fork,
touch,
get,
share,
unshare,
setTitle,
setArchived,
setPermission,
setRevert,
clearRevert,
setSummary,
diff,
messages,
children,
remove,
updateMessage,
removeMessage,
removePart,
updatePart,
updatePartDelta,
initialize,
})
}),
)
export const defaultLayer = layer.pipe(Layer.provide(Bus.layer), Layer.provide(Config.defaultLayer))
const { runPromise } = makeRuntime(Service, defaultLayer)
export const create = fn(
z
.object({
@@ -242,244 +711,46 @@ export namespace Session {
workspaceID: WorkspaceID.zod.optional(),
})
.optional(),
async (input) => {
return createNext({
parentID: input?.parentID,
directory: Instance.directory,
title: input?.title,
permission: input?.permission,
workspaceID: input?.workspaceID,
})
},
(input) => runPromise((svc) => svc.create(input)),
)
export const fork = fn(
z.object({
sessionID: SessionID.zod,
messageID: MessageID.zod.optional(),
}),
async (input) => {
const original = await get(input.sessionID)
if (!original) throw new Error("session not found")
const title = getForkedTitle(original.title)
const session = await createNext({
directory: Instance.directory,
workspaceID: original.workspaceID,
title,
})
const msgs = await messages({ sessionID: input.sessionID })
const idMap = new Map<string, MessageID>()
for (const msg of msgs) {
if (input.messageID && msg.info.id >= input.messageID) break
const newID = MessageID.ascending()
idMap.set(msg.info.id, newID)
const parentID = msg.info.role === "assistant" && msg.info.parentID ? idMap.get(msg.info.parentID) : undefined
const cloned = await updateMessage({
...msg.info,
sessionID: session.id,
id: newID,
...(parentID && { parentID }),
})
for (const part of msg.parts) {
await updatePart({
...part,
id: PartID.ascending(),
messageID: cloned.id,
sessionID: session.id,
})
}
}
return session
},
export const fork = fn(z.object({ sessionID: SessionID.zod, messageID: MessageID.zod.optional() }), (input) =>
runPromise((svc) => svc.fork(input)),
)
export const touch = fn(SessionID.zod, async (sessionID) => {
const time = Date.now()
SyncEvent.run(Event.Updated, { sessionID, info: { time: { updated: time } } })
})
export const touch = fn(SessionID.zod, (id) => runPromise((svc) => svc.touch(id)))
export const get = fn(SessionID.zod, (id) => runPromise((svc) => svc.get(id)))
export const share = fn(SessionID.zod, (id) => runPromise((svc) => svc.share(id)))
export const unshare = fn(SessionID.zod, (id) => runPromise((svc) => svc.unshare(id)))
export async function createNext(input: {
id?: SessionID
title?: string
parentID?: SessionID
workspaceID?: WorkspaceID
directory: string
permission?: Permission.Ruleset
}) {
const result: Info = {
id: SessionID.descending(input.id),
slug: Slug.create(),
version: Installation.VERSION,
projectID: Instance.project.id,
directory: input.directory,
workspaceID: input.workspaceID,
parentID: input.parentID,
title: input.title ?? createDefaultTitle(!!input.parentID),
permission: input.permission,
time: {
created: Date.now(),
updated: Date.now(),
},
}
log.info("created", result)
SyncEvent.run(Event.Created, { sessionID: result.id, info: result })
const cfg = await Config.get()
if (!result.parentID && (Flag.OPENCODE_AUTO_SHARE || cfg.share === "auto")) {
share(result.id).catch(() => {
// Silently ignore sharing errors during session creation
})
}
if (!Flag.OPENCODE_EXPERIMENTAL_WORKSPACES) {
// This only exist for backwards compatibility. We should not be
// manually publishing this event; it is a sync event now
Bus.publish(Event.Updated, {
sessionID: result.id,
info: result,
})
}
return result
}
export function plan(input: { slug: string; time: { created: number } }) {
const base = Instance.project.vcs
? path.join(Instance.worktree, ".opencode", "plans")
: path.join(Global.Path.data, "plans")
return path.join(base, [input.time.created, input.slug].join("-") + ".md")
}
export const get = fn(SessionID.zod, async (id) => {
const row = Database.use((db) => db.select().from(SessionTable).where(eq(SessionTable.id, id)).get())
if (!row) throw new NotFoundError({ message: `Session not found: ${id}` })
return fromRow(row)
})
export const share = fn(SessionID.zod, async (id) => {
const cfg = await Config.get()
if (cfg.share === "disabled") {
throw new Error("Sharing is disabled in configuration")
}
const { ShareNext } = await import("@/share/share-next")
const share = await ShareNext.create(id)
SyncEvent.run(Event.Updated, { sessionID: id, info: { share: { url: share.url } } })
return share
})
export const unshare = fn(SessionID.zod, async (id) => {
// Use ShareNext to remove the share (same as share function uses ShareNext to create)
const { ShareNext } = await import("@/share/share-next")
await ShareNext.remove(id)
SyncEvent.run(Event.Updated, { sessionID: id, info: { share: { url: null } } })
})
export const setTitle = fn(
z.object({
sessionID: SessionID.zod,
title: z.string(),
}),
async (input) => {
SyncEvent.run(Event.Updated, { sessionID: input.sessionID, info: { title: input.title } })
},
export const setTitle = fn(z.object({ sessionID: SessionID.zod, title: z.string() }), (input) =>
runPromise((svc) => svc.setTitle(input)),
)
export const setArchived = fn(
z.object({
sessionID: SessionID.zod,
time: z.number().optional(),
}),
async (input) => {
SyncEvent.run(Event.Updated, { sessionID: input.sessionID, info: { time: { archived: input.time } } })
},
export const setArchived = fn(z.object({ sessionID: SessionID.zod, time: z.number().optional() }), (input) =>
runPromise((svc) => svc.setArchived(input)),
)
export const setPermission = fn(
z.object({
sessionID: SessionID.zod,
permission: Permission.Ruleset,
}),
async (input) => {
SyncEvent.run(Event.Updated, {
sessionID: input.sessionID,
info: { permission: input.permission, time: { updated: Date.now() } },
})
},
export const setPermission = fn(z.object({ sessionID: SessionID.zod, permission: Permission.Ruleset }), (input) =>
runPromise((svc) => svc.setPermission(input)),
)
export const setRevert = fn(
z.object({
sessionID: SessionID.zod,
revert: Info.shape.revert,
summary: Info.shape.summary,
}),
async (input) => {
SyncEvent.run(Event.Updated, {
sessionID: input.sessionID,
info: {
summary: input.summary,
time: { updated: Date.now() },
revert: input.revert,
},
})
},
z.object({ sessionID: SessionID.zod, revert: Info.shape.revert, summary: Info.shape.summary }),
(input) =>
runPromise((svc) => svc.setRevert({ sessionID: input.sessionID, revert: input.revert, summary: input.summary })),
)
export const clearRevert = fn(SessionID.zod, async (sessionID) => {
SyncEvent.run(Event.Updated, {
sessionID,
info: {
time: { updated: Date.now() },
revert: null,
},
})
})
export const clearRevert = fn(SessionID.zod, (id) => runPromise((svc) => svc.clearRevert(id)))
export const setSummary = fn(
z.object({
sessionID: SessionID.zod,
summary: Info.shape.summary,
}),
async (input) => {
SyncEvent.run(Event.Updated, {
sessionID: input.sessionID,
info: {
time: { updated: Date.now() },
summary: input.summary,
},
})
},
export const setSummary = fn(z.object({ sessionID: SessionID.zod, summary: Info.shape.summary }), (input) =>
runPromise((svc) => svc.setSummary({ sessionID: input.sessionID, summary: input.summary })),
)
export const diff = fn(SessionID.zod, async (sessionID) => {
try {
return await Storage.read<Snapshot.FileDiff[]>(["session_diff", sessionID])
} catch {
return []
}
})
export const diff = fn(SessionID.zod, (id) => runPromise((svc) => svc.diff(id)))
export const messages = fn(
z.object({
sessionID: SessionID.zod,
limit: z.number().optional(),
}),
async (input) => {
const result = [] as MessageV2.WithParts[]
for await (const msg of MessageV2.stream(input.sessionID)) {
if (input.limit && result.length >= input.limit) break
result.push(msg)
}
result.reverse()
return result
},
export const messages = fn(z.object({ sessionID: SessionID.zod, limit: z.number().optional() }), (input) =>
runPromise((svc) => svc.messages(input)),
)
export function* list(input?: {
@@ -594,84 +865,20 @@ export namespace Session {
}
}
export const children = fn(SessionID.zod, async (parentID) => {
const project = Instance.project
const rows = Database.use((db) =>
db
.select()
.from(SessionTable)
.where(and(eq(SessionTable.project_id, project.id), eq(SessionTable.parent_id, parentID)))
.all(),
)
return rows.map(fromRow)
})
export const children = fn(SessionID.zod, (id) => runPromise((svc) => svc.children(id)))
export const remove = fn(SessionID.zod, (id) => runPromise((svc) => svc.remove(id)))
export const updateMessage = fn(MessageV2.Info, (msg) => runPromise((svc) => svc.updateMessage(msg)))
export const remove = fn(SessionID.zod, async (sessionID) => {
try {
const session = await get(sessionID)
for (const child of await children(sessionID)) {
await remove(child.id)
}
await unshare(sessionID).catch(() => {})
SyncEvent.run(Event.Deleted, { sessionID, info: session })
// Eagerly remove event sourcing data to free up space
SyncEvent.remove(sessionID)
} catch (e) {
log.error(e)
}
})
export const updateMessage = fn(MessageV2.Info, async (msg) => {
SyncEvent.run(MessageV2.Event.Updated, {
sessionID: msg.sessionID,
info: msg,
})
return msg
})
export const removeMessage = fn(
z.object({
sessionID: SessionID.zod,
messageID: MessageID.zod,
}),
async (input) => {
SyncEvent.run(MessageV2.Event.Removed, {
sessionID: input.sessionID,
messageID: input.messageID,
})
return input.messageID
},
export const removeMessage = fn(z.object({ sessionID: SessionID.zod, messageID: MessageID.zod }), (input) =>
runPromise((svc) => svc.removeMessage(input)),
)
export const removePart = fn(
z.object({
sessionID: SessionID.zod,
messageID: MessageID.zod,
partID: PartID.zod,
}),
async (input) => {
SyncEvent.run(MessageV2.Event.PartRemoved, {
sessionID: input.sessionID,
messageID: input.messageID,
partID: input.partID,
})
return input.partID
},
z.object({ sessionID: SessionID.zod, messageID: MessageID.zod, partID: PartID.zod }),
(input) => runPromise((svc) => svc.removePart(input)),
)
const UpdatePartInput = MessageV2.Part
export const updatePart = fn(UpdatePartInput, async (part) => {
SyncEvent.run(MessageV2.Event.PartUpdated, {
sessionID: part.sessionID,
part: structuredClone(part),
time: Date.now(),
})
return part
})
export const updatePart = fn(MessageV2.Part, (part) => runPromise((svc) => svc.updatePart(part)))
export const updatePartDelta = fn(
z.object({
@@ -681,111 +888,11 @@ export namespace Session {
field: z.string(),
delta: z.string(),
}),
async (input) => {
Bus.publish(MessageV2.Event.PartDelta, input)
},
(input) => runPromise((svc) => svc.updatePartDelta(input)),
)
export const getUsage = fn(
z.object({
model: z.custom<Provider.Model>(),
usage: z.custom<LanguageModelV2Usage>(),
metadata: z.custom<ProviderMetadata>().optional(),
}),
(input) => {
const safe = (value: number) => {
if (!Number.isFinite(value)) return 0
return value
}
const inputTokens = safe(input.usage.inputTokens ?? 0)
const outputTokens = safe(input.usage.outputTokens ?? 0)
const reasoningTokens = safe(input.usage.reasoningTokens ?? 0)
const cacheReadInputTokens = safe(input.usage.cachedInputTokens ?? 0)
const cacheWriteInputTokens = safe(
(input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
// @ts-expect-error
input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
// @ts-expect-error
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
0) as number,
)
// OpenRouter provides inputTokens as the total count of input tokens (including cached).
// AFAIK other providers (OpenRouter/OpenAI/Gemini etc.) do it the same way e.g. vercel/ai#8794 (comment)
// Anthropic does it differently though - inputTokens doesn't include cached tokens.
// It looks like OpenCode's cost calculation assumes all providers return inputTokens the same way Anthropic does (I'm guessing getUsage logic was originally implemented with anthropic), so it's causing incorrect cost calculation for OpenRouter and others.
const excludesCachedTokens = !!(input.metadata?.["anthropic"] || input.metadata?.["bedrock"])
const adjustedInputTokens = safe(
excludesCachedTokens ? inputTokens : inputTokens - cacheReadInputTokens - cacheWriteInputTokens,
)
const total = iife(() => {
// Anthropic doesn't provide total_tokens, also ai sdk will vastly undercount if we
// don't compute from components
if (
input.model.api.npm === "@ai-sdk/anthropic" ||
input.model.api.npm === "@ai-sdk/amazon-bedrock" ||
input.model.api.npm === "@ai-sdk/google-vertex/anthropic"
) {
return adjustedInputTokens + outputTokens + cacheReadInputTokens + cacheWriteInputTokens
}
return input.usage.totalTokens
})
const tokens = {
total,
input: adjustedInputTokens,
output: outputTokens,
reasoning: reasoningTokens,
cache: {
write: cacheWriteInputTokens,
read: cacheReadInputTokens,
},
}
const costInfo =
input.model.cost?.experimentalOver200K && tokens.input + tokens.cache.read > 200_000
? input.model.cost.experimentalOver200K
: input.model.cost
return {
cost: safe(
new Decimal(0)
.add(new Decimal(tokens.input).mul(costInfo?.input ?? 0).div(1_000_000))
.add(new Decimal(tokens.output).mul(costInfo?.output ?? 0).div(1_000_000))
.add(new Decimal(tokens.cache.read).mul(costInfo?.cache?.read ?? 0).div(1_000_000))
.add(new Decimal(tokens.cache.write).mul(costInfo?.cache?.write ?? 0).div(1_000_000))
// TODO: update models.dev to have better pricing model, for now:
// charge reasoning tokens at the same rate as output tokens
.add(new Decimal(tokens.reasoning).mul(costInfo?.output ?? 0).div(1_000_000))
.toNumber(),
),
tokens,
}
},
)
export class BusyError extends Error {
constructor(public readonly sessionID: string) {
super(`Session ${sessionID} is busy`)
}
}
export const initialize = fn(
z.object({
sessionID: SessionID.zod,
modelID: ModelID.zod,
providerID: ProviderID.zod,
messageID: MessageID.zod,
}),
async (input) => {
await SessionPrompt.command({
sessionID: input.sessionID,
messageID: input.messageID,
model: input.providerID + "/" + input.modelID,
command: Command.Default.INIT,
arguments: "",
})
},
z.object({ sessionID: SessionID.zod, modelID: ModelID.zod, providerID: ProviderID.zod, messageID: MessageID.zod }),
(input) => runPromise((svc) => svc.initialize(input)),
)
}

View File

@@ -1,16 +1,6 @@
import { Installation } from "@/installation"
import { Provider } from "@/provider/provider"
import { Log } from "@/util/log"
import {
streamText,
wrapLanguageModel,
type ModelMessage,
type StreamTextResult,
type Tool,
type ToolSet,
tool,
jsonSchema,
} from "ai"
import { streamText, wrapLanguageModel, type ModelMessage, type Tool, tool, jsonSchema } from "ai"
import { mergeDeep, pipe } from "remeda"
import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider"
import { ProviderTransform } from "@/provider/transform"
@@ -23,6 +13,7 @@ import { SystemPrompt } from "./system"
import { Flag } from "@/flag/flag"
import { Permission } from "@/permission"
import { Auth } from "@/auth"
import { Installation } from "@/installation"
export namespace LLM {
const log = Log.create({ service: "llm" })
@@ -43,8 +34,6 @@ export namespace LLM {
toolChoice?: "auto" | "required" | "none"
}
export type StreamOutput = StreamTextResult<ToolSet, unknown>
export async function stream(input: StreamInput) {
const l = log
.clone()
@@ -273,8 +262,10 @@ export namespace LLM {
model: language,
middleware: [
{
specificationVersion: "v3" as const,
async transformParams(args) {
if (args.type === "stream") {
// TODO: verify that LanguageModelV3Prompt is still compat here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// @ts-expect-error
args.params.prompt = ProviderTransform.message(args.params.prompt, input.model, options)
}

View File

@@ -573,11 +573,11 @@ export namespace MessageV2 {
}))
}
export function toModelMessages(
export async function toModelMessages(
input: WithParts[],
model: Provider.Model,
options?: { stripMedia?: boolean },
): ModelMessage[] {
): Promise<ModelMessage[]> {
const result: UIMessage[] = []
const toolNames = new Set<string>()
// Track media from tool results that need to be injected as user messages
@@ -601,7 +601,8 @@ export namespace MessageV2 {
return false
})()
const toModelOutput = (output: unknown) => {
const toModelOutput = (options: { toolCallId: string; input: unknown; output: unknown }) => {
const output = options.output
if (typeof output === "string") {
return { type: "text", value: output }
}
@@ -799,7 +800,7 @@ export namespace MessageV2 {
const tools = Object.fromEntries(Array.from(toolNames).map((toolName) => [toolName, { toModelOutput }]))
return convertToModelMessages(
return await convertToModelMessages(
result.filter((msg) => msg.parts.some((part) => part.type !== "step-start")),
{
//@ts-expect-error (convertToModelMessages expects a ToolSet but only actually needs tools[name]?.toModelOutput)
@@ -871,7 +872,13 @@ export namespace MessageV2 {
db.select().from(PartTable).where(eq(PartTable.message_id, message_id)).orderBy(PartTable.id).all(),
)
return rows.map(
(row) => ({ ...row.data, id: row.id, sessionID: row.session_id, messageID: row.message_id }) as MessageV2.Part,
(row) =>
({
...row.data,
id: row.id,
sessionID: row.session_id,
messageID: row.message_id,
}) as MessageV2.Part,
)
})

View File

@@ -11,7 +11,7 @@ import { Session } from "."
import { Agent } from "../agent/agent"
import { Provider } from "../provider/provider"
import { ModelID, ProviderID } from "../provider/schema"
import { type Tool as AITool, tool, jsonSchema, type ToolCallOptions, asSchema } from "ai"
import { type Tool as AITool, tool, jsonSchema, type ToolExecutionOptions, asSchema } from "ai"
import { SessionCompaction } from "./compaction"
import { Instance } from "../project/instance"
import { Bus } from "../bus"
@@ -321,7 +321,13 @@ export namespace SessionPrompt {
if (!lastUser) throw new Error("No user message found in stream. This should never happen.")
if (
lastAssistant?.finish &&
!["tool-calls", "unknown"].includes(lastAssistant.finish) &&
![
"tool-calls",
// in v6 unknown became other but other existed in v5 too and was distinctly different
// I think there are certain providers that used to have bad stop reasons, not rlly sure which
// ones if any still have this?
// "unknown",
].includes(lastAssistant.finish) &&
lastUser.id < lastAssistant.id
) {
log.info("exiting loop", { sessionID })
@@ -692,7 +698,7 @@ export namespace SessionPrompt {
sessionID,
system,
messages: [
...MessageV2.toModelMessages(msgs, model),
...(await MessageV2.toModelMessages(msgs, model)),
...(isLastStep
? [
{
@@ -775,7 +781,7 @@ export namespace SessionPrompt {
using _ = log.time("resolveTools")
const tools: Record<string, AITool> = {}
const context = (args: any, options: ToolCallOptions): Tool.Context => ({
const context = (args: any, options: ToolExecutionOptions): Tool.Context => ({
sessionID: input.session.id,
abort: options.abortSignal!,
messageID: input.processor.message.id,
@@ -861,7 +867,8 @@ export namespace SessionPrompt {
const execute = item.execute
if (!execute) continue
const transformed = ProviderTransform.schema(input.model, asSchema(item.inputSchema).jsonSchema)
const schema = await asSchema(item.inputSchema).jsonSchema
const transformed = ProviderTransform.schema(input.model, schema)
item.inputSchema = jsonSchema(transformed)
// Wrap execute to add plugin hooks and format output
item.execute = async (args, opts) => {
@@ -974,10 +981,10 @@ export namespace SessionPrompt {
metadata: { valid: true },
}
},
toModelOutput(result) {
toModelOutput({ output }) {
return {
type: "text",
value: result.output,
value: output.output,
}
},
})
@@ -2010,28 +2017,28 @@ NOTE: At any point in time through this workflow you should feel free to ask the
(await Provider.getSmallModel(input.providerID)) ?? (await Provider.getModel(input.providerID, input.modelID))
)
})
const result = await LLM.stream({
agent,
user: firstRealUser.info as MessageV2.User,
system: [],
small: true,
tools: {},
model,
abort: new AbortController().signal,
sessionID: input.session.id,
retries: 2,
messages: [
{
role: "user",
content: "Generate a title for this conversation:\n",
},
...(hasOnlySubtaskParts
? [{ role: "user" as const, content: subtaskParts.map((p) => p.prompt).join("\n") }]
: MessageV2.toModelMessages(contextMessages, model)),
],
})
const text = await result.text.catch((err) => log.error("failed to generate title", { error: err }))
if (text) {
try {
const result = await LLM.stream({
agent,
user: firstRealUser.info as MessageV2.User,
system: [],
small: true,
tools: {},
model,
abort: new AbortController().signal,
sessionID: input.session.id,
retries: 2,
messages: [
{
role: "user",
content: "Generate a title for this conversation:\n",
},
...(hasOnlySubtaskParts
? [{ role: "user" as const, content: subtaskParts.map((p) => p.prompt).join("\n") }]
: await MessageV2.toModelMessages(contextMessages, model)),
],
})
const text = await result.text
const cleaned = text
.replace(/<think>[\s\S]*?<\/think>\s*/g, "")
.split("\n")
@@ -2044,6 +2051,8 @@ NOTE: At any point in time through this workflow you should feel free to ask the
if (NotFoundError.isInstance(err)) return
throw err
})
} catch (error) {
log.error("failed to generate title", { error })
}
}
}

View File

@@ -479,11 +479,11 @@ test("continues loading when a plugin is missing config metadata", async () => {
try {
await TuiPluginRuntime.init(createTuiPluginApi())
// bad plugin was skipped (no metadata entry)
expect(await fs.readFile(path.join(tmp.path, "bad.txt"), "utf8")).rejects.toThrow()
await expect(fs.readFile(path.join(tmp.path, "bad.txt"), "utf8")).rejects.toThrow()
// good plugin loaded fine
expect(await fs.readFile(tmp.extra.goodMarker, "utf8")).resolves.toBe("called")
await expect(fs.readFile(tmp.extra.goodMarker, "utf8")).resolves.toBe("called")
// bare string spec gets undefined options
expect(await fs.readFile(tmp.extra.bareMarker, "utf8")).resolves.toBe("undefined")
await expect(fs.readFile(tmp.extra.bareMarker, "utf8")).resolves.toBe("undefined")
} finally {
await TuiPluginRuntime.dispose()
cwd.mockRestore()

View File

@@ -1,7 +1,15 @@
import { describe, expect, mock, test } from "bun:test"
import { afterEach, describe, expect, mock, spyOn, test } from "bun:test"
import fs from "fs/promises"
import path from "path"
import { tmpdir } from "../../fixture/fixture"
import * as App from "../../../src/cli/cmd/tui/app"
import { Rpc } from "../../../src/util/rpc"
import { UI } from "../../../src/cli/ui"
import * as Timeout from "../../../src/util/timeout"
import * as Network from "../../../src/cli/network"
import * as Win32 from "../../../src/cli/cmd/tui/win32"
import { TuiConfig } from "../../../src/config/tui"
import { Instance } from "../../../src/project/instance"
const stop = new Error("stop")
const seen = {
@@ -9,81 +17,43 @@ const seen = {
inst: [] as string[],
}
mock.module("../../../src/cli/cmd/tui/app", () => ({
tui: async (input: { directory: string }) => {
seen.tui.push(input.directory)
function setup() {
// Intentionally avoid mock.module() here: Bun keeps module overrides in cache
// and mock.restore() does not reset mock.module values. If this switches back
// to module mocks, later suites can see mocked @/config/tui and fail (e.g.
// plugin-loader tests expecting real TuiConfig.waitForDependencies). See:
// https://github.com/oven-sh/bun/issues/7823 and #12823.
spyOn(App, "tui").mockImplementation(async (input) => {
if (input.directory) seen.tui.push(input.directory)
throw stop
},
}))
mock.module("@/util/rpc", () => ({
Rpc: {
client: () => ({
call: async () => ({ url: "http://127.0.0.1" }),
on: () => {},
}),
},
}))
mock.module("@/cli/ui", () => ({
UI: {
error: () => {},
},
}))
mock.module("@/util/log", () => ({
Log: {
init: async () => {},
create: () => ({
error: () => {},
info: () => {},
warn: () => {},
debug: () => {},
time: () => ({ stop: () => {} }),
}),
Default: {
error: () => {},
info: () => {},
warn: () => {},
debug: () => {},
},
},
}))
mock.module("@/util/timeout", () => ({
withTimeout: <T>(input: Promise<T>) => input,
}))
mock.module("@/cli/network", () => ({
withNetworkOptions: <T>(input: T) => input,
resolveNetworkOptions: async () => ({
})
spyOn(Rpc, "client").mockImplementation(() => ({
call: async () => ({ url: "http://127.0.0.1" }) as never,
on: () => () => {},
}))
spyOn(UI, "error").mockImplementation(() => {})
spyOn(Timeout, "withTimeout").mockImplementation((input) => input)
spyOn(Network, "resolveNetworkOptions").mockResolvedValue({
mdns: false,
port: 0,
hostname: "127.0.0.1",
}),
}))
mock.module("../../../src/cli/cmd/tui/win32", () => ({
win32DisableProcessedInput: () => {},
win32InstallCtrlCGuard: () => undefined,
}))
mock.module("@/config/tui", () => ({
TuiConfig: {
get: () => ({}),
},
}))
mock.module("@/project/instance", () => ({
Instance: {
provide: async (input: { directory: string; fn: () => Promise<unknown> | unknown }) => {
seen.inst.push(input.directory)
return input.fn()
},
},
}))
mdnsDomain: "opencode.local",
cors: [],
})
spyOn(Win32, "win32DisableProcessedInput").mockImplementation(() => {})
spyOn(Win32, "win32InstallCtrlCGuard").mockReturnValue(undefined)
spyOn(TuiConfig, "get").mockResolvedValue({})
spyOn(Instance, "provide").mockImplementation(async (input) => {
seen.inst.push(input.directory)
return input.fn()
})
}
describe("tui thread", () => {
afterEach(() => {
mock.restore()
})
async function call(project?: string) {
const { TuiThreadCommand } = await import("../../../src/cli/cmd/tui/thread")
const args: Parameters<NonNullable<typeof TuiThreadCommand.handler>>[0] = {
@@ -107,6 +77,7 @@ describe("tui thread", () => {
}
async function check(project?: string) {
setup()
await using tmp = await tmpdir({ git: true })
const cwd = process.cwd()
const pwd = process.env.PWD

View File

@@ -821,9 +821,12 @@ test("dedupes concurrent config dependency installs for the same dir", async ()
})
const online = spyOn(Network, "online").mockReturnValue(false)
const run = spyOn(BunProc, "run").mockImplementation(async (_cmd, opts) => {
calls += 1
start()
await gate
const hit = path.normalize(opts?.cwd ?? "") === path.normalize(dir)
if (hit) {
calls += 1
start()
await gate
}
const mod = path.join(opts?.cwd ?? "", "node_modules", "@opencode-ai", "plugin")
await fs.mkdir(mod, { recursive: true })
await Filesystem.write(
@@ -883,12 +886,16 @@ test("serializes config dependency installs across dirs", async () => {
const online = spyOn(Network, "online").mockReturnValue(false)
const run = spyOn(BunProc, "run").mockImplementation(async (_cmd, opts) => {
calls += 1
open += 1
peak = Math.max(peak, open)
if (calls === 1) {
start()
await gate
const cwd = path.normalize(opts?.cwd ?? "")
const hit = cwd === path.normalize(a) || cwd === path.normalize(b)
if (hit) {
calls += 1
open += 1
peak = Math.max(peak, open)
if (calls === 1) {
start()
await gate
}
}
const mod = path.join(opts?.cwd ?? "", "node_modules", "@opencode-ai", "plugin")
await fs.mkdir(mod, { recursive: true })
@@ -896,7 +903,9 @@ test("serializes config dependency installs across dirs", async () => {
path.join(mod, "package.json"),
JSON.stringify({ name: "@opencode-ai/plugin", version: "1.0.0" }),
)
open -= 1
if (hit) {
open -= 1
}
return {
code: 0,
stdout: Buffer.alloc(0),

View File

@@ -1,17 +1,13 @@
import { NodeChildProcessSpawner, NodeFileSystem, NodePath } from "@effect/platform-node"
import { NodeFileSystem } from "@effect/platform-node"
import { describe, expect } from "bun:test"
import { Effect, Layer } from "effect"
import { provideTmpdirInstance } from "../fixture/fixture"
import { testEffect } from "../lib/effect"
import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
import { Format } from "../../src/format"
import { Config } from "../../src/config/config"
import * as Formatter from "../../src/format/formatter"
const node = NodeChildProcessSpawner.layer.pipe(
Layer.provideMerge(Layer.mergeAll(NodeFileSystem.layer, NodePath.layer)),
)
const it = testEffect(Layer.mergeAll(Format.layer, node).pipe(Layer.provide(Config.defaultLayer)))
const it = testEffect(Layer.mergeAll(Format.defaultLayer, CrossSpawnSpawner.defaultLayer, NodeFileSystem.layer))
describe("Format", () => {
it.effect("status() returns built-in formatters when no config overrides", () =>

View File

@@ -1,6 +1,6 @@
import { OpenAICompatibleChatLanguageModel } from "@/provider/sdk/copilot/chat/openai-compatible-chat-language-model"
import { describe, test, expect, mock } from "bun:test"
import type { LanguageModelV2Prompt } from "@ai-sdk/provider"
import type { LanguageModelV3Prompt } from "@ai-sdk/provider"
async function convertReadableStreamToArray<T>(stream: ReadableStream<T>): Promise<T[]> {
const reader = stream.getReader()
@@ -13,7 +13,7 @@ async function convertReadableStreamToArray<T>(stream: ReadableStream<T>): Promi
return result
}
const TEST_PROMPT: LanguageModelV2Prompt = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
const TEST_PROMPT: LanguageModelV3Prompt = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
// Fixtures from copilot_test.exs
const FIXTURES = {
@@ -123,7 +123,7 @@ describe("doStream", () => {
{ type: "text-delta", id: "txt-0", delta: " world" },
{ type: "text-delta", id: "txt-0", delta: "!" },
{ type: "text-end", id: "txt-0" },
{ type: "finish", finishReason: "stop" },
{ type: "finish", finishReason: { unified: "stop" } },
])
})
@@ -201,10 +201,10 @@ describe("doStream", () => {
const finish = parts.find((p) => p.type === "finish")
expect(finish).toMatchObject({
type: "finish",
finishReason: "tool-calls",
finishReason: { unified: "tool-calls" },
usage: {
inputTokens: 19581,
outputTokens: 53,
inputTokens: { total: 19581 },
outputTokens: { total: 53 },
},
})
})
@@ -256,10 +256,10 @@ describe("doStream", () => {
const finish = parts.find((p) => p.type === "finish")
expect(finish).toMatchObject({
type: "finish",
finishReason: "stop",
finishReason: { unified: "stop" },
usage: {
inputTokens: 5778,
outputTokens: 59,
inputTokens: { total: 5778 },
outputTokens: { total: 59 },
},
providerMetadata: {
copilot: {
@@ -315,7 +315,7 @@ describe("doStream", () => {
const finish = parts.find((p) => p.type === "finish")
expect(finish).toMatchObject({
type: "finish",
finishReason: "stop",
finishReason: { unified: "stop" },
})
})
@@ -388,10 +388,10 @@ describe("doStream", () => {
const finish = parts.find((p) => p.type === "finish")
expect(finish).toMatchObject({
type: "finish",
finishReason: "tool-calls",
finishReason: { unified: "tool-calls" },
usage: {
inputTokens: 3767,
outputTokens: 19,
inputTokens: { total: 3767 },
outputTokens: { total: 19 },
},
})
})
@@ -449,7 +449,7 @@ describe("doStream", () => {
const finish = parts.find((p) => p.type === "finish")
expect(finish).toMatchObject({
type: "finish",
finishReason: "tool-calls",
finishReason: { unified: "tool-calls" },
})
})

View File

@@ -1,408 +1,412 @@
import { test, expect, describe } from "bun:test"
import path from "path"
// TODO: UNCOMMENT WHEN GITLAB SUPPORT IS COMPLETED
//
//
//
// import { test, expect, describe } from "bun:test"
// import path from "path"
import { ProviderID, ModelID } from "../../src/provider/schema"
import { tmpdir } from "../fixture/fixture"
import { Instance } from "../../src/project/instance"
import { Provider } from "../../src/provider/provider"
import { Env } from "../../src/env"
import { Global } from "../../src/global"
import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider"
// import { ProviderID, ModelID } from "../../src/provider/schema"
// import { tmpdir } from "../fixture/fixture"
// import { Instance } from "../../src/project/instance"
// import { Provider } from "../../src/provider/provider"
// import { Env } from "../../src/env"
// import { Global } from "../../src/global"
// import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider"
test("GitLab Duo: loads provider with API key from environment", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_TOKEN", "test-gitlab-token")
},
fn: async () => {
const providers = await Provider.list()
expect(providers[ProviderID.gitlab]).toBeDefined()
expect(providers[ProviderID.gitlab].key).toBe("test-gitlab-token")
},
})
})
// test("GitLab Duo: loads provider with API key from environment", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(
// path.join(dir, "opencode.json"),
// JSON.stringify({
// $schema: "https://opencode.ai/config.json",
// }),
// )
// },
// })
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_TOKEN", "test-gitlab-token")
// },
// fn: async () => {
// const providers = await Provider.list()
// expect(providers[ProviderID.gitlab]).toBeDefined()
// expect(providers[ProviderID.gitlab].key).toBe("test-gitlab-token")
// },
// })
// })
test("GitLab Duo: config instanceUrl option sets baseURL", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
provider: {
gitlab: {
options: {
instanceUrl: "https://gitlab.example.com",
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_TOKEN", "test-token")
Env.set("GITLAB_INSTANCE_URL", "https://gitlab.example.com")
},
fn: async () => {
const providers = await Provider.list()
expect(providers[ProviderID.gitlab]).toBeDefined()
expect(providers[ProviderID.gitlab].options?.instanceUrl).toBe("https://gitlab.example.com")
},
})
})
// test("GitLab Duo: config instanceUrl option sets baseURL", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(
// path.join(dir, "opencode.json"),
// JSON.stringify({
// $schema: "https://opencode.ai/config.json",
// provider: {
// gitlab: {
// options: {
// instanceUrl: "https://gitlab.example.com",
// },
// },
// },
// }),
// )
// },
// })
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_TOKEN", "test-token")
// Env.set("GITLAB_INSTANCE_URL", "https://gitlab.example.com")
// },
// fn: async () => {
// const providers = await Provider.list()
// expect(providers[ProviderID.gitlab]).toBeDefined()
// expect(providers[ProviderID.gitlab].options?.instanceUrl).toBe("https://gitlab.example.com")
// },
// })
// })
test("GitLab Duo: loads with OAuth token from auth.json", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
}),
)
},
})
// test("GitLab Duo: loads with OAuth token from auth.json", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(
// path.join(dir, "opencode.json"),
// JSON.stringify({
// $schema: "https://opencode.ai/config.json",
// }),
// )
// },
// })
const authPath = path.join(Global.Path.data, "auth.json")
await Bun.write(
authPath,
JSON.stringify({
gitlab: {
type: "oauth",
access: "test-access-token",
refresh: "test-refresh-token",
expires: Date.now() + 3600000,
},
}),
)
// const authPath = path.join(Global.Path.data, "auth.json")
// await Bun.write(
// authPath,
// JSON.stringify({
// gitlab: {
// type: "oauth",
// access: "test-access-token",
// refresh: "test-refresh-token",
// expires: Date.now() + 3600000,
// },
// }),
// )
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_TOKEN", "")
},
fn: async () => {
const providers = await Provider.list()
expect(providers[ProviderID.gitlab]).toBeDefined()
},
})
})
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_TOKEN", "")
// },
// fn: async () => {
// const providers = await Provider.list()
// expect(providers[ProviderID.gitlab]).toBeDefined()
// },
// })
// })
test("GitLab Duo: loads with Personal Access Token from auth.json", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
}),
)
},
})
// test("GitLab Duo: loads with Personal Access Token from auth.json", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(
// path.join(dir, "opencode.json"),
// JSON.stringify({
// $schema: "https://opencode.ai/config.json",
// }),
// )
// },
// })
const authPath2 = path.join(Global.Path.data, "auth.json")
await Bun.write(
authPath2,
JSON.stringify({
gitlab: {
type: "api",
key: "glpat-test-pat-token",
},
}),
)
// const authPath2 = path.join(Global.Path.data, "auth.json")
// await Bun.write(
// authPath2,
// JSON.stringify({
// gitlab: {
// type: "api",
// key: "glpat-test-pat-token",
// },
// }),
// )
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_TOKEN", "")
},
fn: async () => {
const providers = await Provider.list()
expect(providers[ProviderID.gitlab]).toBeDefined()
expect(providers[ProviderID.gitlab].key).toBe("glpat-test-pat-token")
},
})
})
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_TOKEN", "")
// },
// fn: async () => {
// const providers = await Provider.list()
// expect(providers[ProviderID.gitlab]).toBeDefined()
// expect(providers[ProviderID.gitlab].key).toBe("glpat-test-pat-token")
// },
// })
// })
test("GitLab Duo: supports self-hosted instance configuration", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
provider: {
gitlab: {
options: {
instanceUrl: "https://gitlab.company.internal",
apiKey: "glpat-internal-token",
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_INSTANCE_URL", "https://gitlab.company.internal")
},
fn: async () => {
const providers = await Provider.list()
expect(providers[ProviderID.gitlab]).toBeDefined()
expect(providers[ProviderID.gitlab].options?.instanceUrl).toBe("https://gitlab.company.internal")
},
})
})
// test("GitLab Duo: supports self-hosted instance configuration", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(
// path.join(dir, "opencode.json"),
// JSON.stringify({
// $schema: "https://opencode.ai/config.json",
// provider: {
// gitlab: {
// options: {
// instanceUrl: "https://gitlab.company.internal",
// apiKey: "glpat-internal-token",
// },
// },
// },
// }),
// )
// },
// })
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_INSTANCE_URL", "https://gitlab.company.internal")
// },
// fn: async () => {
// const providers = await Provider.list()
// expect(providers[ProviderID.gitlab]).toBeDefined()
// expect(providers[ProviderID.gitlab].options?.instanceUrl).toBe("https://gitlab.company.internal")
// },
// })
// })
test("GitLab Duo: config apiKey takes precedence over environment variable", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
provider: {
gitlab: {
options: {
apiKey: "config-token",
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_TOKEN", "env-token")
},
fn: async () => {
const providers = await Provider.list()
expect(providers[ProviderID.gitlab]).toBeDefined()
},
})
})
// test("GitLab Duo: config apiKey takes precedence over environment variable", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(
// path.join(dir, "opencode.json"),
// JSON.stringify({
// $schema: "https://opencode.ai/config.json",
// provider: {
// gitlab: {
// options: {
// apiKey: "config-token",
// },
// },
// },
// }),
// )
// },
// })
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_TOKEN", "env-token")
// },
// fn: async () => {
// const providers = await Provider.list()
// expect(providers[ProviderID.gitlab]).toBeDefined()
// },
// })
// })
test("GitLab Duo: includes context-1m beta header in aiGatewayHeaders", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_TOKEN", "test-token")
},
fn: async () => {
const providers = await Provider.list()
expect(providers[ProviderID.gitlab]).toBeDefined()
expect(providers[ProviderID.gitlab].options?.aiGatewayHeaders?.["anthropic-beta"]).toContain(
"context-1m-2025-08-07",
)
},
})
})
// test("GitLab Duo: includes context-1m beta header in aiGatewayHeaders", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(
// path.join(dir, "opencode.json"),
// JSON.stringify({
// $schema: "https://opencode.ai/config.json",
// }),
// )
// },
// })
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_TOKEN", "test-token")
// },
// fn: async () => {
// const providers = await Provider.list()
// expect(providers[ProviderID.gitlab]).toBeDefined()
// expect(providers[ProviderID.gitlab].options?.aiGatewayHeaders?.["anthropic-beta"]).toContain(
// "context-1m-2025-08-07",
// )
// },
// })
// })
test("GitLab Duo: supports feature flags configuration", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
provider: {
gitlab: {
options: {
featureFlags: {
duo_agent_platform_agentic_chat: true,
duo_agent_platform: true,
},
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_TOKEN", "test-token")
},
fn: async () => {
const providers = await Provider.list()
expect(providers[ProviderID.gitlab]).toBeDefined()
expect(providers[ProviderID.gitlab].options?.featureFlags).toBeDefined()
expect(providers[ProviderID.gitlab].options?.featureFlags?.duo_agent_platform_agentic_chat).toBe(true)
},
})
})
// test("GitLab Duo: supports feature flags configuration", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(
// path.join(dir, "opencode.json"),
// JSON.stringify({
// $schema: "https://opencode.ai/config.json",
// provider: {
// gitlab: {
// options: {
// featureFlags: {
// duo_agent_platform_agentic_chat: true,
// duo_agent_platform: true,
// },
// },
// },
// },
// }),
// )
// },
// })
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_TOKEN", "test-token")
// },
// fn: async () => {
// const providers = await Provider.list()
// expect(providers[ProviderID.gitlab]).toBeDefined()
// expect(providers[ProviderID.gitlab].options?.featureFlags).toBeDefined()
// expect(providers[ProviderID.gitlab].options?.featureFlags?.duo_agent_platform_agentic_chat).toBe(true)
// },
// })
// })
test("GitLab Duo: has multiple agentic chat models available", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
}),
)
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_TOKEN", "test-token")
},
fn: async () => {
const providers = await Provider.list()
expect(providers[ProviderID.gitlab]).toBeDefined()
const models = Object.keys(providers[ProviderID.gitlab].models)
expect(models.length).toBeGreaterThan(0)
expect(models).toContain("duo-chat-haiku-4-5")
expect(models).toContain("duo-chat-sonnet-4-5")
expect(models).toContain("duo-chat-opus-4-5")
},
})
})
// test("GitLab Duo: has multiple agentic chat models available", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(
// path.join(dir, "opencode.json"),
// JSON.stringify({
// $schema: "https://opencode.ai/config.json",
// }),
// )
// },
// })
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_TOKEN", "test-token")
// },
// fn: async () => {
// const providers = await Provider.list()
// expect(providers[ProviderID.gitlab]).toBeDefined()
// const models = Object.keys(providers[ProviderID.gitlab].models)
// expect(models.length).toBeGreaterThan(0)
// expect(models).toContain("duo-chat-haiku-4-5")
// expect(models).toContain("duo-chat-sonnet-4-5")
// expect(models).toContain("duo-chat-opus-4-5")
// },
// })
// })
describe("GitLab Duo: workflow model routing", () => {
test("duo-workflow-* model routes through workflowChat", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_TOKEN", "test-token")
},
fn: async () => {
const providers = await Provider.list()
const gitlab = providers[ProviderID.gitlab]
expect(gitlab).toBeDefined()
gitlab.models["duo-workflow-sonnet-4-6"] = {
id: ModelID.make("duo-workflow-sonnet-4-6"),
providerID: ProviderID.make("gitlab"),
name: "Agent Platform (Claude Sonnet 4.6)",
family: "",
api: { id: "duo-workflow-sonnet-4-6", url: "https://gitlab.com", npm: "gitlab-ai-provider" },
status: "active",
headers: {},
options: { workflowRef: "claude_sonnet_4_6" },
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
limit: { context: 200000, output: 64000 },
capabilities: {
temperature: false,
reasoning: true,
attachment: true,
toolcall: true,
input: { text: true, audio: false, image: true, video: false, pdf: true },
output: { text: true, audio: false, image: false, video: false, pdf: false },
interleaved: false,
},
release_date: "",
variants: {},
}
const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-workflow-sonnet-4-6"))
expect(model).toBeDefined()
expect(model.options?.workflowRef).toBe("claude_sonnet_4_6")
const language = await Provider.getLanguage(model)
expect(language).toBeDefined()
expect(language).toBeInstanceOf(GitLabWorkflowLanguageModel)
},
})
})
// describe("GitLab Duo: workflow model routing", () => {
// test("duo-workflow-* model routes through workflowChat", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
// },
// })
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_TOKEN", "test-token")
// },
// fn: async () => {
// const providers = await Provider.list()
// const gitlab = providers[ProviderID.gitlab]
// expect(gitlab).toBeDefined()
// gitlab.models["duo-workflow-sonnet-4-6"] = {
// id: ModelID.make("duo-workflow-sonnet-4-6"),
// providerID: ProviderID.make("gitlab"),
// name: "Agent Platform (Claude Sonnet 4.6)",
// family: "",
// api: { id: "duo-workflow-sonnet-4-6", url: "https://gitlab.com", npm: "gitlab-ai-provider" },
// status: "active",
// headers: {},
// options: { workflowRef: "claude_sonnet_4_6" },
// cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
// limit: { context: 200000, output: 64000 },
// capabilities: {
// temperature: false,
// reasoning: true,
// attachment: true,
// toolcall: true,
// input: { text: true, audio: false, image: true, video: false, pdf: true },
// output: { text: true, audio: false, image: false, video: false, pdf: false },
// interleaved: false,
// },
// release_date: "",
// variants: {},
// }
// const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-workflow-sonnet-4-6"))
// expect(model).toBeDefined()
// expect(model.options?.workflowRef).toBe("claude_sonnet_4_6")
// const language = await Provider.getLanguage(model)
// expect(language).toBeDefined()
// expect(language).toBeInstanceOf(GitLabWorkflowLanguageModel)
// },
// })
// })
test("duo-chat-* model routes through agenticChat (not workflow)", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_TOKEN", "test-token")
},
fn: async () => {
const providers = await Provider.list()
expect(providers[ProviderID.gitlab]).toBeDefined()
const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-chat-sonnet-4-5"))
expect(model).toBeDefined()
const language = await Provider.getLanguage(model)
expect(language).toBeDefined()
expect(language).not.toBeInstanceOf(GitLabWorkflowLanguageModel)
},
})
})
// test("duo-chat-* model routes through agenticChat (not workflow)", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
// },
// })
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_TOKEN", "test-token")
// },
// fn: async () => {
// const providers = await Provider.list()
// expect(providers[ProviderID.gitlab]).toBeDefined()
// const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-chat-sonnet-4-5"))
// expect(model).toBeDefined()
// const language = await Provider.getLanguage(model)
// expect(language).toBeDefined()
// expect(language).not.toBeInstanceOf(GitLabWorkflowLanguageModel)
// },
// })
// })
test("model.options merged with provider.options in getLanguage", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_TOKEN", "test-token")
},
fn: async () => {
const providers = await Provider.list()
const gitlab = providers[ProviderID.gitlab]
expect(gitlab.options?.featureFlags).toBeDefined()
const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-chat-sonnet-4-5"))
expect(model).toBeDefined()
expect(model.options).toBeDefined()
},
})
})
})
// test("model.options merged with provider.options in getLanguage", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
// },
// })
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_TOKEN", "test-token")
// },
// fn: async () => {
// const providers = await Provider.list()
// const gitlab = providers[ProviderID.gitlab]
// expect(gitlab.options?.featureFlags).toBeDefined()
// const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-chat-sonnet-4-5"))
// expect(model).toBeDefined()
// expect(model.options).toBeDefined()
// },
// })
// })
// })
describe("GitLab Duo: static models", () => {
test("static duo-chat models always present regardless of discovery", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
},
})
await Instance.provide({
directory: tmp.path,
init: async () => {
Env.set("GITLAB_TOKEN", "test-token")
},
fn: async () => {
const providers = await Provider.list()
const models = Object.keys(providers[ProviderID.gitlab].models)
expect(models).toContain("duo-chat-haiku-4-5")
expect(models).toContain("duo-chat-sonnet-4-5")
expect(models).toContain("duo-chat-opus-4-5")
},
})
})
})
// describe("GitLab Duo: static models", () => {
// test("static duo-chat models always present regardless of discovery", async () => {
// await using tmp = await tmpdir({
// init: async (dir) => {
// await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
// },
// })
// await Instance.provide({
// directory: tmp.path,
// init: async () => {
// Env.set("GITLAB_TOKEN", "test-token")
// },
// fn: async () => {
// const providers = await Provider.list()
// const models = Object.keys(providers[ProviderID.gitlab].models)
// expect(models).toContain("duo-chat-haiku-4-5")
// expect(models).toContain("duo-chat-sonnet-4-5")
// expect(models).toContain("duo-chat-opus-4-5")
// },
// })
// })
// })

View File

@@ -3,7 +3,6 @@ import path from "path"
import { tool, type ModelMessage } from "ai"
import z from "zod"
import { LLM } from "../../src/session/llm"
import { Global } from "../../src/global"
import { Instance } from "../../src/project/instance"
import { Provider } from "../../src/provider/provider"
import { ProviderTransform } from "../../src/provider/transform"
@@ -535,6 +534,130 @@ describe("session.llm.stream", () => {
})
})
test("accepts user image attachments as data URLs for OpenAI models", async () => {
const server = state.server
if (!server) {
throw new Error("Server not initialized")
}
const source = await loadFixture("openai", "gpt-5.2")
const model = source.model
const chunks = [
{
type: "response.created",
response: {
id: "resp-data-url",
created_at: Math.floor(Date.now() / 1000),
model: model.id,
service_tier: null,
},
},
{
type: "response.output_text.delta",
item_id: "item-data-url",
delta: "Looks good",
logprobs: null,
},
{
type: "response.completed",
response: {
incomplete_details: null,
usage: {
input_tokens: 1,
input_tokens_details: null,
output_tokens: 1,
output_tokens_details: null,
},
service_tier: null,
},
},
]
const request = waitRequest("/responses", createEventResponse(chunks, true))
const image = `data:image/png;base64,${Buffer.from(
await Bun.file(path.join(import.meta.dir, "../tool/fixtures/large-image.png")).arrayBuffer(),
).toString("base64")}`
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
enabled_providers: ["openai"],
provider: {
openai: {
name: "OpenAI",
env: ["OPENAI_API_KEY"],
npm: "@ai-sdk/openai",
api: "https://api.openai.com/v1",
models: {
[model.id]: model,
},
options: {
apiKey: "test-openai-key",
baseURL: `${server.url.origin}/v1`,
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const resolved = await Provider.getModel(ProviderID.openai, ModelID.make(model.id))
const sessionID = SessionID.make("session-test-data-url")
const agent = {
name: "test",
mode: "primary",
options: {},
permission: [{ permission: "*", pattern: "*", action: "allow" }],
} satisfies Agent.Info
const user = {
id: MessageID.make("user-data-url"),
sessionID,
role: "user",
time: { created: Date.now() },
agent: agent.name,
model: { providerID: ProviderID.make("openai"), modelID: resolved.id },
} satisfies MessageV2.User
const stream = await LLM.stream({
user,
sessionID,
model: resolved,
agent,
system: ["You are a helpful assistant."],
abort: new AbortController().signal,
messages: [
{
role: "user",
content: [
{ type: "text", text: "Describe this image" },
{
type: "file",
mediaType: "image/png",
filename: "large-image.png",
data: image,
},
],
},
] as ModelMessage[],
tools: {},
})
for await (const _ of stream.fullStream) {
}
const capture = await request
expect(capture.url.pathname.endsWith("/responses")).toBe(true)
},
})
})
test("sends messages API payload for Anthropic models", async () => {
const server = state.server
if (!server) {
@@ -625,7 +748,7 @@ describe("session.llm.stream", () => {
role: "user",
time: { created: Date.now() },
agent: agent.name,
model: { providerID: ProviderID.make(providerID), modelID: resolved.id },
model: { providerID: ProviderID.make("minimax"), modelID: ModelID.make("MiniMax-M2.7") },
} satisfies MessageV2.User
const stream = await LLM.stream({

View File

@@ -108,7 +108,7 @@ function basePart(messageID: string, id: string) {
}
describe("session.message-v2.toModelMessage", () => {
test("filters out messages with no parts", () => {
test("filters out messages with no parts", async () => {
const input: MessageV2.WithParts[] = [
{
info: userInfo("m-empty"),
@@ -126,7 +126,7 @@ describe("session.message-v2.toModelMessage", () => {
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "hello" }],
@@ -134,7 +134,7 @@ describe("session.message-v2.toModelMessage", () => {
])
})
test("filters out messages with only ignored parts", () => {
test("filters out messages with only ignored parts", async () => {
const messageID = "m-user"
const input: MessageV2.WithParts[] = [
@@ -151,10 +151,10 @@ describe("session.message-v2.toModelMessage", () => {
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([])
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([])
})
test("includes synthetic text parts", () => {
test("includes synthetic text parts", async () => {
const messageID = "m-user"
const input: MessageV2.WithParts[] = [
@@ -182,7 +182,7 @@ describe("session.message-v2.toModelMessage", () => {
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "hello" }],
@@ -194,7 +194,7 @@ describe("session.message-v2.toModelMessage", () => {
])
})
test("converts user text/file parts and injects compaction/subtask prompts", () => {
test("converts user text/file parts and injects compaction/subtask prompts", async () => {
const messageID = "m-user"
const input: MessageV2.WithParts[] = [
@@ -249,7 +249,7 @@ describe("session.message-v2.toModelMessage", () => {
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [
@@ -267,7 +267,7 @@ describe("session.message-v2.toModelMessage", () => {
])
})
test("converts assistant tool completion into tool-call + tool-result messages with attachments", () => {
test("converts assistant tool completion into tool-call + tool-result messages with attachments", async () => {
const userID = "m-user"
const assistantID = "m-assistant"
@@ -319,7 +319,7 @@ describe("session.message-v2.toModelMessage", () => {
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "run tool" }],
@@ -359,7 +359,7 @@ describe("session.message-v2.toModelMessage", () => {
])
})
test("omits provider metadata when assistant model differs", () => {
test("omits provider metadata when assistant model differs", async () => {
const userID = "m-user"
const assistantID = "m-assistant"
@@ -402,7 +402,7 @@ describe("session.message-v2.toModelMessage", () => {
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "run tool" }],
@@ -434,7 +434,7 @@ describe("session.message-v2.toModelMessage", () => {
])
})
test("replaces compacted tool output with placeholder", () => {
test("replaces compacted tool output with placeholder", async () => {
const userID = "m-user"
const assistantID = "m-assistant"
@@ -470,7 +470,7 @@ describe("session.message-v2.toModelMessage", () => {
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "run tool" }],
@@ -501,7 +501,7 @@ describe("session.message-v2.toModelMessage", () => {
])
})
test("converts assistant tool error into error-text tool result", () => {
test("converts assistant tool error into error-text tool result", async () => {
const userID = "m-user"
const assistantID = "m-assistant"
@@ -537,7 +537,7 @@ describe("session.message-v2.toModelMessage", () => {
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "run tool" }],
@@ -570,7 +570,7 @@ describe("session.message-v2.toModelMessage", () => {
])
})
test("filters assistant messages with non-abort errors", () => {
test("filters assistant messages with non-abort errors", async () => {
const assistantID = "m-assistant"
const input: MessageV2.WithParts[] = [
@@ -590,10 +590,10 @@ describe("session.message-v2.toModelMessage", () => {
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([])
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([])
})
test("includes aborted assistant messages only when they have non-step-start/reasoning content", () => {
test("includes aborted assistant messages only when they have non-step-start/reasoning content", async () => {
const assistantID1 = "m-assistant-1"
const assistantID2 = "m-assistant-2"
@@ -633,7 +633,7 @@ describe("session.message-v2.toModelMessage", () => {
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "assistant",
content: [
@@ -644,7 +644,7 @@ describe("session.message-v2.toModelMessage", () => {
])
})
test("splits assistant messages on step-start boundaries", () => {
test("splits assistant messages on step-start boundaries", async () => {
const assistantID = "m-assistant"
const input: MessageV2.WithParts[] = [
@@ -669,7 +669,7 @@ describe("session.message-v2.toModelMessage", () => {
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "assistant",
content: [{ type: "text", text: "first" }],
@@ -681,7 +681,7 @@ describe("session.message-v2.toModelMessage", () => {
])
})
test("drops messages that only contain step-start parts", () => {
test("drops messages that only contain step-start parts", async () => {
const assistantID = "m-assistant"
const input: MessageV2.WithParts[] = [
@@ -696,10 +696,10 @@ describe("session.message-v2.toModelMessage", () => {
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([])
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([])
})
test("converts pending/running tool calls to error results to prevent dangling tool_use", () => {
test("converts pending/running tool calls to error results to prevent dangling tool_use", async () => {
const userID = "m-user"
const assistantID = "m-assistant"
@@ -743,7 +743,7 @@ describe("session.message-v2.toModelMessage", () => {
},
]
const result = MessageV2.toModelMessages(input, model)
const result = await MessageV2.toModelMessages(input, model)
expect(result).toStrictEqual([
{

View File

@@ -363,20 +363,25 @@ describe("structured-output.createStructuredOutputTool", () => {
expect(inputSchema.jsonSchema?.properties?.tags?.items?.type).toBe("string")
})
test("toModelOutput returns text value", () => {
test("toModelOutput returns text value", async () => {
const tool = SessionPrompt.createStructuredOutputTool({
schema: { type: "object" },
onSuccess: () => {},
})
expect(tool.toModelOutput).toBeDefined()
const modelOutput = tool.toModelOutput!({
output: "Test output",
title: "Test",
metadata: { valid: true },
})
const modelOutput = await Promise.resolve(
tool.toModelOutput!({
toolCallId: "test-call-id",
input: {},
output: {
output: "Test output",
},
}),
)
expect(modelOutput.type).toBe("text")
if (modelOutput.type !== "text") throw new Error("expected text model output")
expect(modelOutput.value).toBe("Test output")
})

View File

@@ -9,7 +9,8 @@
overflow: visible;
&.tool-collapsible {
gap: 8px;
--tool-content-gap: 8px;
gap: var(--tool-content-gap);
}
[data-slot="collapsible-trigger"] {

View File

@@ -636,14 +636,17 @@
}
[data-component="context-tool-group-list"] {
padding: 6px 0 4px 0;
padding-top: 6px;
padding-right: 0;
padding-bottom: 4px;
padding-left: 13px;
display: flex;
flex-direction: column;
gap: 2px;
gap: 8px;
[data-slot="context-tool-group-item"] {
min-width: 0;
padding: 6px 0;
padding: 0;
}
}
@@ -1154,8 +1157,8 @@
position: sticky;
top: var(--sticky-accordion-top, 0px);
z-index: 20;
height: 40px;
padding-bottom: 8px;
height: calc(32px + var(--tool-content-gap));
padding-bottom: var(--tool-content-gap);
background-color: var(--background-stronger);
}
}

View File

@@ -156,37 +156,75 @@ export type PartComponent = Component<MessagePartProps>
export const PART_MAPPING: Record<string, PartComponent | undefined> = {}
const TEXT_RENDER_THROTTLE_MS = 100
const TEXT_RENDER_PACE_MS = 24
const TEXT_RENDER_SNAP = /[\s.,!?;:)\]]/
function createThrottledValue(getValue: () => string) {
function step(size: number) {
if (size <= 12) return 2
if (size <= 48) return 4
if (size <= 96) return 8
return Math.min(24, Math.ceil(size / 8))
}
function next(text: string, start: number) {
const end = Math.min(text.length, start + step(text.length - start))
const max = Math.min(text.length, end + 8)
for (let i = end; i < max; i++) {
if (TEXT_RENDER_SNAP.test(text[i] ?? "")) return i + 1
}
return end
}
function createPacedValue(getValue: () => string, live?: () => boolean) {
const [value, setValue] = createSignal(getValue())
let shown = getValue()
let timeout: ReturnType<typeof setTimeout> | undefined
let last = 0
createEffect(() => {
const next = getValue()
const now = Date.now()
const clear = () => {
if (!timeout) return
clearTimeout(timeout)
timeout = undefined
}
const remaining = TEXT_RENDER_THROTTLE_MS - (now - last)
if (remaining <= 0) {
if (timeout) {
clearTimeout(timeout)
timeout = undefined
}
last = now
setValue(next)
const sync = (text: string) => {
shown = text
setValue(text)
}
const run = () => {
timeout = undefined
const text = getValue()
if (!live?.()) {
sync(text)
return
}
if (timeout) clearTimeout(timeout)
timeout = setTimeout(() => {
last = Date.now()
setValue(next)
timeout = undefined
}, remaining)
if (!text.startsWith(shown) || text.length <= shown.length) {
sync(text)
return
}
const end = next(text, shown.length)
sync(text.slice(0, end))
if (end < text.length) timeout = setTimeout(run, TEXT_RENDER_PACE_MS)
}
createEffect(() => {
const text = getValue()
if (!live?.()) {
clear()
sync(text)
return
}
if (!text.startsWith(shown) || text.length < shown.length) {
clear()
sync(text)
return
}
if (text.length === shown.length || timeout) return
timeout = setTimeout(run, TEXT_RENDER_PACE_MS)
})
onCleanup(() => {
if (timeout) clearTimeout(timeout)
clear()
})
return value
@@ -790,7 +828,7 @@ function ContextToolGroup(props: { parts: ToolPart[]; busy?: boolean }) {
const summary = createMemo(() => contextToolSummary(props.parts))
return (
<Collapsible open={open()} onOpenChange={setOpen} variant="ghost">
<Collapsible open={open()} onOpenChange={setOpen} variant="ghost" class="tool-collapsible">
<Collapsible.Trigger>
<div data-component="context-tool-group-trigger">
<span
@@ -1332,11 +1370,11 @@ PART_MAPPING["text"] = function TextPartDisplay(props) {
return items.filter((x) => !!x).join(" \u00B7 ")
})
const displayText = () => (part().text ?? "").trim()
const throttledText = createThrottledValue(displayText)
const streaming = createMemo(
() => props.message.role === "assistant" && typeof (props.message as AssistantMessage).time.completed !== "number",
)
const displayText = () => (part().text ?? "").trim()
const throttledText = createPacedValue(displayText, streaming)
const isLastTextPart = createMemo(() => {
const last = (data.store.part?.[props.message.id] ?? [])
.filter((item): item is TextPart => item?.type === "text" && !!item.text?.trim())
@@ -1395,11 +1433,11 @@ PART_MAPPING["text"] = function TextPartDisplay(props) {
PART_MAPPING["reasoning"] = function ReasoningPartDisplay(props) {
const part = () => props.part as ReasoningPart
const text = () => part().text.trim()
const throttledText = createThrottledValue(text)
const streaming = createMemo(
() => props.message.role === "assistant" && typeof (props.message as AssistantMessage).time.completed !== "number",
)
const text = () => part().text.trim()
const throttledText = createPacedValue(text, streaming)
return (
<Show when={throttledText()}>

View File

@@ -567,6 +567,7 @@ function compactionPart(): CompactionPart {
const MD = "markdown.css"
const MP = "message-part.css"
const ST = "session-turn.css"
const CL = "collapsible.css"
/**
* Source mapping for a CSS control.
@@ -1039,6 +1040,48 @@ const CSS_CONTROLS: CSSControl[] = [
},
// --- Tool parts ---
{
key: "tool-content-gap",
label: "Trigger/content gap",
group: "Tool Parts",
type: "range",
initial: "8",
selector: '[data-component="collapsible"].tool-collapsible',
property: "--tool-content-gap",
min: "0",
max: "24",
step: "1",
unit: "px",
source: { file: CL, anchor: "&.tool-collapsible {", prop: "--tool-content-gap", format: px },
},
{
key: "context-tool-gap",
label: "Explored tool gap",
group: "Explored Group",
type: "range",
initial: "14",
selector: '[data-component="context-tool-group-list"]',
property: "gap",
min: "0",
max: "40",
step: "1",
unit: "px",
source: { file: MP, anchor: '[data-component="context-tool-group-list"]', prop: "gap", format: px },
},
{
key: "context-tool-indent",
label: "Explored indent",
group: "Explored Group",
type: "range",
initial: "0",
selector: '[data-component="context-tool-group-list"]',
property: "padding-left",
min: "0",
max: "48",
step: "1",
unit: "px",
source: { file: MP, anchor: '[data-component="context-tool-group-list"]', prop: "padding-left", format: px },
},
{
key: "bash-max-height",
label: "Shell output max-height",
@@ -1099,8 +1142,9 @@ function Playground() {
const el = (root.querySelector(sample(ctrl)) ?? root.querySelector(ctrl.selector)) as HTMLElement | null
if (!el) continue
const styles = getComputedStyle(el)
// Use bracket access — getPropertyValue doesn't resolve shorthands
const raw = (styles as any)[ctrl.property] as string
const raw = ctrl.property.startsWith("--")
? styles.getPropertyValue(ctrl.property).trim()
: ((styles as any)[ctrl.property] as string)
if (!raw) continue
// Shorthands may return "24px 0px" — take the first value
const num = parseFloat(raw.split(" ")[0])

View File

@@ -0,0 +1,119 @@
--- a/dist/index.js
+++ b/dist/index.js
@@ -3155,15 +3155,6 @@
});
}
baseArgs.max_tokens = maxTokens + (thinkingBudget != null ? thinkingBudget : 0);
- } else {
- if (topP != null && temperature != null) {
- warnings.push({
- type: "unsupported",
- feature: "topP",
- details: `topP is not supported when temperature is set. topP is ignored.`
- });
- baseArgs.top_p = void 0;
- }
}
if (isKnownModel && baseArgs.max_tokens > maxOutputTokensForModel) {
if (maxOutputTokens != null) {
@@ -5180,4 +5171,4 @@
createAnthropic,
forwardAnthropicContainerIdFromLastStep
});
-//# sourceMappingURL=index.js.map
\ No newline at end of file
+//# sourceMappingURL=index.js.map
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -3192,15 +3192,6 @@
});
}
baseArgs.max_tokens = maxTokens + (thinkingBudget != null ? thinkingBudget : 0);
- } else {
- if (topP != null && temperature != null) {
- warnings.push({
- type: "unsupported",
- feature: "topP",
- details: `topP is not supported when temperature is set. topP is ignored.`
- });
- baseArgs.top_p = void 0;
- }
}
if (isKnownModel && baseArgs.max_tokens > maxOutputTokensForModel) {
if (maxOutputTokens != null) {
@@ -5256,4 +5247,4 @@
createAnthropic,
forwardAnthropicContainerIdFromLastStep
};
-//# sourceMappingURL=index.mjs.map
\ No newline at end of file
+//# sourceMappingURL=index.mjs.map
--- a/dist/internal/index.js
+++ b/dist/internal/index.js
@@ -3147,15 +3147,6 @@
});
}
baseArgs.max_tokens = maxTokens + (thinkingBudget != null ? thinkingBudget : 0);
- } else {
- if (topP != null && temperature != null) {
- warnings.push({
- type: "unsupported",
- feature: "topP",
- details: `topP is not supported when temperature is set. topP is ignored.`
- });
- baseArgs.top_p = void 0;
- }
}
if (isKnownModel && baseArgs.max_tokens > maxOutputTokensForModel) {
if (maxOutputTokens != null) {
@@ -5080,4 +5071,4 @@
anthropicTools,
prepareTools
});
-//# sourceMappingURL=index.js.map
\ No newline at end of file
+//# sourceMappingURL=index.js.map
--- a/dist/internal/index.mjs
+++ b/dist/internal/index.mjs
@@ -3176,15 +3176,6 @@
});
}
baseArgs.max_tokens = maxTokens + (thinkingBudget != null ? thinkingBudget : 0);
- } else {
- if (topP != null && temperature != null) {
- warnings.push({
- type: "unsupported",
- feature: "topP",
- details: `topP is not supported when temperature is set. topP is ignored.`
- });
- baseArgs.top_p = void 0;
- }
}
if (isKnownModel && baseArgs.max_tokens > maxOutputTokensForModel) {
if (maxOutputTokens != null) {
@@ -5148,4 +5139,4 @@
anthropicTools,
prepareTools
};
-//# sourceMappingURL=index.mjs.map
\ No newline at end of file
+//# sourceMappingURL=index.mjs.map
--- a/src/anthropic-messages-language-model.ts
+++ b/src/anthropic-messages-language-model.ts
@@ -534,16 +534,6 @@
// adjust max tokens to account for thinking:
baseArgs.max_tokens = maxTokens + (thinkingBudget ?? 0);
- } else {
- // Only check temperature/topP mutual exclusivity when thinking is not enabled
- if (topP != null && temperature != null) {
- warnings.push({
- type: 'unsupported',
- feature: 'topP',
- details: `topP is not supported when temperature is set. topP is ignored.`,
- });
- baseArgs.top_p = undefined;
- }
}
// limit to max output tokens for known models to enable model switching without breaking it:

View File

@@ -0,0 +1,61 @@
diff --git a/dist/index.js b/dist/index.js
index 9aa8e83684777e860d905ff7a6895995a7347a4f..820797581ac2a33e731e139da3ebc98b4d93fdcf 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -395,10 +395,13 @@ function validateDownloadUrl(url) {
message: `Invalid URL: ${url}`
});
}
+ if (parsed.protocol === "data:") {
+ return;
+ }
if (parsed.protocol !== "http:" && parsed.protocol !== "https:") {
throw new DownloadError({
url,
- message: `URL scheme must be http or https, got ${parsed.protocol}`
+ message: `URL scheme must be http, https, or data, got ${parsed.protocol}`
});
}
const hostname = parsed.hostname;
diff --git a/dist/index.mjs b/dist/index.mjs
index 095fdc188b1d7f227b42591c78ecb71fe2e2cf8b..ca5227d3b6e358aea8ecd85782a0a2b48130a2c9 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -299,10 +299,13 @@ function validateDownloadUrl(url) {
message: `Invalid URL: ${url}`
});
}
+ if (parsed.protocol === "data:") {
+ return;
+ }
if (parsed.protocol !== "http:" && parsed.protocol !== "https:") {
throw new DownloadError({
url,
- message: `URL scheme must be http or https, got ${parsed.protocol}`
+ message: `URL scheme must be http, https, or data, got ${parsed.protocol}`
});
}
const hostname = parsed.hostname;
diff --git a/src/validate-download-url.ts b/src/validate-download-url.ts
index 7c026ad6b400aef551ce3a424c343e1cedc60997..6a2f11398e58f80a8e11995ac1ce5f4d7c110561 100644
--- a/src/validate-download-url.ts
+++ b/src/validate-download-url.ts
@@ -18,11 +18,16 @@ export function validateDownloadUrl(url: string): void {
});
}
- // Only allow http and https protocols
+ // data: URLs are inline content and do not make network requests.
+ if (parsed.protocol === 'data:') {
+ return;
+ }
+
+ // Only allow http and https network protocols
if (parsed.protocol !== 'http:' && parsed.protocol !== 'https:') {
throw new DownloadError({
url,
- message: `URL scheme must be http or https, got ${parsed.protocol}`,
+ message: `URL scheme must be http, https, or data, got ${parsed.protocol}`,
});
}

View File

@@ -1,108 +0,0 @@
diff --git a/dist/index.mjs b/dist/index.mjs
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -959,7 +959,7 @@
model: z4.string().nullish(),
object: z4.literal("response"),
output: z4.array(outputItemSchema),
- usage: xaiResponsesUsageSchema,
+ usage: xaiResponsesUsageSchema.nullish(),
status: z4.string()
});
var xaiResponsesChunkSchema = z4.union([
\ No newline at end of file
@@ -1143,6 +1143,18 @@
z4.object({
type: z4.literal("response.completed"),
response: xaiResponsesResponseSchema
+ }),
+ z4.object({
+ type: z4.literal("response.function_call_arguments.delta"),
+ item_id: z4.string(),
+ output_index: z4.number(),
+ delta: z4.string()
+ }),
+ z4.object({
+ type: z4.literal("response.function_call_arguments.done"),
+ item_id: z4.string(),
+ output_index: z4.number(),
+ arguments: z4.string()
})
]);
\ No newline at end of file
@@ -1940,6 +1952,9 @@
if (response2.status) {
finishReason = mapXaiResponsesFinishReason(response2.status);
}
+ if (seenToolCalls.size > 0 && finishReason !== "tool-calls") {
+ finishReason = "tool-calls";
+ }
return;
}
if (event.type === "response.output_item.added" || event.type === "response.output_item.done") {
\ No newline at end of file
@@ -2024,7 +2039,7 @@
}
}
} else if (part.type === "function_call") {
- if (!seenToolCalls.has(part.call_id)) {
+ if (event.type === "response.output_item.done" && !seenToolCalls.has(part.call_id)) {
seenToolCalls.add(part.call_id);
controller.enqueue({
type: "tool-input-start",
\ No newline at end of file
diff --git a/dist/index.js b/dist/index.js
--- a/dist/index.js
+++ b/dist/index.js
@@ -964,7 +964,7 @@
model: import_v44.z.string().nullish(),
object: import_v44.z.literal("response"),
output: import_v44.z.array(outputItemSchema),
- usage: xaiResponsesUsageSchema,
+ usage: xaiResponsesUsageSchema.nullish(),
status: import_v44.z.string()
});
var xaiResponsesChunkSchema = import_v44.z.union([
\ No newline at end of file
@@ -1148,6 +1148,18 @@
import_v44.z.object({
type: import_v44.z.literal("response.completed"),
response: xaiResponsesResponseSchema
+ }),
+ import_v44.z.object({
+ type: import_v44.z.literal("response.function_call_arguments.delta"),
+ item_id: import_v44.z.string(),
+ output_index: import_v44.z.number(),
+ delta: import_v44.z.string()
+ }),
+ import_v44.z.object({
+ type: import_v44.z.literal("response.function_call_arguments.done"),
+ item_id: import_v44.z.string(),
+ output_index: import_v44.z.number(),
+ arguments: import_v44.z.string()
})
]);
\ No newline at end of file
@@ -1935,6 +1947,9 @@
if (response2.status) {
finishReason = mapXaiResponsesFinishReason(response2.status);
}
+ if (seenToolCalls.size > 0 && finishReason !== "tool-calls") {
+ finishReason = "tool-calls";
+ }
return;
}
if (event.type === "response.output_item.added" || event.type === "response.output_item.done") {
\ No newline at end of file
@@ -2019,7 +2034,7 @@
}
}
} else if (part.type === "function_call") {
- if (!seenToolCalls.has(part.call_id)) {
+ if (event.type === "response.output_item.done" && !seenToolCalls.has(part.call_id)) {
seenToolCalls.add(part.call_id);
controller.enqueue({
type: "tool-input-start",
\ No newline at end of file

View File

@@ -1,128 +0,0 @@
diff --git a/dist/index.js b/dist/index.js
index f33510a50d11a2cb92a90ea70cc0ac84c89f29b9..e887a60352c0c08ab794b1e6821854dfeefd20cc 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -2110,7 +2110,12 @@ var OpenRouterChatLanguageModel = class {
if (reasoningStarted && !textStarted) {
controller.enqueue({
type: "reasoning-end",
- id: reasoningId || generateId()
+ id: reasoningId || generateId(),
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
+ openrouter: {
+ reasoning_details: accumulatedReasoningDetails
+ }
+ } : undefined
});
reasoningStarted = false;
}
@@ -2307,7 +2312,12 @@ var OpenRouterChatLanguageModel = class {
if (reasoningStarted) {
controller.enqueue({
type: "reasoning-end",
- id: reasoningId || generateId()
+ id: reasoningId || generateId(),
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
+ openrouter: {
+ reasoning_details: accumulatedReasoningDetails
+ }
+ } : undefined
});
}
if (textStarted) {
diff --git a/dist/index.mjs b/dist/index.mjs
index 8a688331b88b4af738ee4ca8062b5f24124d3d81..6310cb8b7c8d0a728d86e1eed09906c6b4c91ae2 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -2075,7 +2075,12 @@ var OpenRouterChatLanguageModel = class {
if (reasoningStarted && !textStarted) {
controller.enqueue({
type: "reasoning-end",
- id: reasoningId || generateId()
+ id: reasoningId || generateId(),
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
+ openrouter: {
+ reasoning_details: accumulatedReasoningDetails
+ }
+ } : undefined
});
reasoningStarted = false;
}
@@ -2272,7 +2277,12 @@ var OpenRouterChatLanguageModel = class {
if (reasoningStarted) {
controller.enqueue({
type: "reasoning-end",
- id: reasoningId || generateId()
+ id: reasoningId || generateId(),
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
+ openrouter: {
+ reasoning_details: accumulatedReasoningDetails
+ }
+ } : undefined
});
}
if (textStarted) {
diff --git a/dist/internal/index.js b/dist/internal/index.js
index d40fa66125941155ac13a4619503caba24d89f8a..8dd86d1b473f2fa31c1acd9881d72945b294a197 100644
--- a/dist/internal/index.js
+++ b/dist/internal/index.js
@@ -2064,7 +2064,12 @@ var OpenRouterChatLanguageModel = class {
if (reasoningStarted && !textStarted) {
controller.enqueue({
type: "reasoning-end",
- id: reasoningId || generateId()
+ id: reasoningId || generateId(),
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
+ openrouter: {
+ reasoning_details: accumulatedReasoningDetails
+ }
+ } : undefined
});
reasoningStarted = false;
}
@@ -2261,7 +2266,12 @@ var OpenRouterChatLanguageModel = class {
if (reasoningStarted) {
controller.enqueue({
type: "reasoning-end",
- id: reasoningId || generateId()
+ id: reasoningId || generateId(),
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
+ openrouter: {
+ reasoning_details: accumulatedReasoningDetails
+ }
+ } : undefined
});
}
if (textStarted) {
diff --git a/dist/internal/index.mjs b/dist/internal/index.mjs
index b0ed9d113549c5c55ea3b1e08abb3db6f92ae5a7..5695930a8e038facc071d58a4179a369a29be9c7 100644
--- a/dist/internal/index.mjs
+++ b/dist/internal/index.mjs
@@ -2030,7 +2030,12 @@ var OpenRouterChatLanguageModel = class {
if (reasoningStarted && !textStarted) {
controller.enqueue({
type: "reasoning-end",
- id: reasoningId || generateId()
+ id: reasoningId || generateId(),
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
+ openrouter: {
+ reasoning_details: accumulatedReasoningDetails
+ }
+ } : undefined
});
reasoningStarted = false;
}
@@ -2227,7 +2232,12 @@ var OpenRouterChatLanguageModel = class {
if (reasoningStarted) {
controller.enqueue({
type: "reasoning-end",
- id: reasoningId || generateId()
+ id: reasoningId || generateId(),
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
+ openrouter: {
+ reasoning_details: accumulatedReasoningDetails
+ }
+ } : undefined
});
}
if (textStarted) {