mirror of
https://github.com/anomalyco/opencode.git
synced 2026-03-31 19:14:39 +00:00
Compare commits
15 Commits
opencode/g
...
production
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
58f60629a1 | ||
|
|
39a47c9b8c | ||
|
|
ea88044f2e | ||
|
|
e6f6f7aff1 | ||
|
|
48e97b47af | ||
|
|
fe120e3cbf | ||
|
|
f2dd774660 | ||
|
|
e7ff0f17c8 | ||
|
|
2ed756c72c | ||
|
|
054f4be185 | ||
|
|
e3e1e9af50 | ||
|
|
c8389cf96d | ||
|
|
c5442d418d | ||
|
|
fa95a61c4e | ||
|
|
9f3c2bd861 |
38
bun.lock
38
bun.lock
@@ -26,7 +26,7 @@
|
||||
},
|
||||
"packages/app": {
|
||||
"name": "@opencode-ai/app",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@kobalte/core": "catalog:",
|
||||
"@opencode-ai/sdk": "workspace:*",
|
||||
@@ -79,7 +79,7 @@
|
||||
},
|
||||
"packages/console/app": {
|
||||
"name": "@opencode-ai/console-app",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@cloudflare/vite-plugin": "1.15.2",
|
||||
"@ibm/plex": "6.4.1",
|
||||
@@ -113,7 +113,7 @@
|
||||
},
|
||||
"packages/console/core": {
|
||||
"name": "@opencode-ai/console-core",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-sts": "3.782.0",
|
||||
"@jsx-email/render": "1.1.1",
|
||||
@@ -140,7 +140,7 @@
|
||||
},
|
||||
"packages/console/function": {
|
||||
"name": "@opencode-ai/console-function",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@ai-sdk/anthropic": "3.0.64",
|
||||
"@ai-sdk/openai": "3.0.48",
|
||||
@@ -164,7 +164,7 @@
|
||||
},
|
||||
"packages/console/mail": {
|
||||
"name": "@opencode-ai/console-mail",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@jsx-email/all": "2.2.3",
|
||||
"@jsx-email/cli": "1.4.3",
|
||||
@@ -188,7 +188,7 @@
|
||||
},
|
||||
"packages/desktop": {
|
||||
"name": "@opencode-ai/desktop",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@opencode-ai/app": "workspace:*",
|
||||
"@opencode-ai/ui": "workspace:*",
|
||||
@@ -221,7 +221,7 @@
|
||||
},
|
||||
"packages/desktop-electron": {
|
||||
"name": "@opencode-ai/desktop-electron",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@opencode-ai/app": "workspace:*",
|
||||
"@opencode-ai/ui": "workspace:*",
|
||||
@@ -252,7 +252,7 @@
|
||||
},
|
||||
"packages/enterprise": {
|
||||
"name": "@opencode-ai/enterprise",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@opencode-ai/ui": "workspace:*",
|
||||
"@opencode-ai/util": "workspace:*",
|
||||
@@ -281,7 +281,7 @@
|
||||
},
|
||||
"packages/function": {
|
||||
"name": "@opencode-ai/function",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@octokit/auth-app": "8.0.1",
|
||||
"@octokit/rest": "catalog:",
|
||||
@@ -297,7 +297,7 @@
|
||||
},
|
||||
"packages/opencode": {
|
||||
"name": "opencode",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"bin": {
|
||||
"opencode": "./bin/opencode",
|
||||
},
|
||||
@@ -323,7 +323,7 @@
|
||||
"@ai-sdk/provider-utils": "4.0.21",
|
||||
"@ai-sdk/togetherai": "2.0.41",
|
||||
"@ai-sdk/vercel": "2.0.39",
|
||||
"@ai-sdk/xai": "3.0.74",
|
||||
"@ai-sdk/xai": "3.0.75",
|
||||
"@aws-sdk/credential-providers": "3.993.0",
|
||||
"@clack/prompts": "1.0.0-alpha.1",
|
||||
"@effect/platform-node": "catalog:",
|
||||
@@ -423,7 +423,7 @@
|
||||
},
|
||||
"packages/plugin": {
|
||||
"name": "@opencode-ai/plugin",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@opencode-ai/sdk": "workspace:*",
|
||||
"zod": "catalog:",
|
||||
@@ -457,7 +457,7 @@
|
||||
},
|
||||
"packages/sdk/js": {
|
||||
"name": "@opencode-ai/sdk",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"devDependencies": {
|
||||
"@hey-api/openapi-ts": "0.90.10",
|
||||
"@tsconfig/node22": "catalog:",
|
||||
@@ -468,7 +468,7 @@
|
||||
},
|
||||
"packages/slack": {
|
||||
"name": "@opencode-ai/slack",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@opencode-ai/sdk": "workspace:*",
|
||||
"@slack/bolt": "^3.17.1",
|
||||
@@ -503,7 +503,7 @@
|
||||
},
|
||||
"packages/ui": {
|
||||
"name": "@opencode-ai/ui",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@kobalte/core": "catalog:",
|
||||
"@opencode-ai/sdk": "workspace:*",
|
||||
@@ -550,7 +550,7 @@
|
||||
},
|
||||
"packages/util": {
|
||||
"name": "@opencode-ai/util",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"zod": "catalog:",
|
||||
},
|
||||
@@ -561,7 +561,7 @@
|
||||
},
|
||||
"packages/web": {
|
||||
"name": "@opencode-ai/web",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@astrojs/cloudflare": "12.6.3",
|
||||
"@astrojs/markdown-remark": "6.3.1",
|
||||
@@ -719,7 +719,7 @@
|
||||
|
||||
"@ai-sdk/vercel": ["@ai-sdk/vercel@2.0.39", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-8eu3ljJpkCTP4ppcyYB+NcBrkcBoSOFthCSgk5VnjaxnDaOJFaxnPwfddM7wx3RwMk2CiK1O61Px/LlqNc7QkQ=="],
|
||||
|
||||
"@ai-sdk/xai": ["@ai-sdk/xai@3.0.74", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HDDLsT+QrzE3c2QZLRV/HKAwMtXDb0PMDdk1PYUXLJ3r9Qv76zGKGyvJLX7Pu6c8TOHD1mwLrOVYrsTpC/eTMw=="],
|
||||
"@ai-sdk/xai": ["@ai-sdk/xai@3.0.75", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-V8UKK4fNpI9cnrtsZBvUp9O9J6Y9fTKBRoSLyEaNGPirACewixmLDbXsSgAeownPVWiWpK34bFysd+XouI5Ywg=="],
|
||||
|
||||
"@alloc/quick-lru": ["@alloc/quick-lru@5.2.0", "", {}, "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw=="],
|
||||
|
||||
@@ -5289,6 +5289,8 @@
|
||||
|
||||
"accepts/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/xai": ["@ai-sdk/xai@3.0.74", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HDDLsT+QrzE3c2QZLRV/HKAwMtXDb0PMDdk1PYUXLJ3r9Qv76zGKGyvJLX7Pu6c8TOHD1mwLrOVYrsTpC/eTMw=="],
|
||||
|
||||
"ajv-keywords/ajv": ["ajv@6.14.0", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw=="],
|
||||
|
||||
"ansi-align/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
{
|
||||
"nodeModules": {
|
||||
"x86_64-linux": "sha256-0JgEA54d1ZZ0IWUmxCJP2fnQ2cpmLO25G+hafUbHFLw=",
|
||||
"aarch64-linux": "sha256-oB4Ptc+MH76MEw0DZodmCCz87qOmbzi26751ZM4DYyE=",
|
||||
"aarch64-darwin": "sha256-712rb7B5gTRz1uTx4cJQSrmq9DoBUe+UxbvawYV4XTE=",
|
||||
"x86_64-darwin": "sha256-GRCiEBDDEyVx1et04xqdIEQr3ykRMOBJoQy/xddSsCA="
|
||||
"x86_64-linux": "sha256-5w+DwEvUrCly9LHZuTa1yTSD45X56cGJG8sds/N29mU=",
|
||||
"aarch64-linux": "sha256-pLhyzajYinBlFyGWwPypyC8gHEU8S7fVXIs6aqgBmhg=",
|
||||
"aarch64-darwin": "sha256-vN0sXYs7pLtpq7U9SorR2z6st/wMfHA3dybOnwIh1pU=",
|
||||
"x86_64-darwin": "sha256-P8fgyBcZJmY5VbNxNer/EL4r/F28dNxaqheaqNZH488="
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@opencode-ai/app",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"description": "",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@opencode-ai/console-app",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
|
||||
@@ -139,19 +139,16 @@ export async function handler(
|
||||
const startTimestamp = Date.now()
|
||||
const reqUrl = providerInfo.modifyUrl(providerInfo.api, isStream)
|
||||
const reqBody = JSON.stringify(
|
||||
providerInfo.modifyBody(
|
||||
{
|
||||
...createBodyConverter(opts.format, providerInfo.format)(body),
|
||||
model: providerInfo.model,
|
||||
...(providerInfo.payloadModifier ?? {}),
|
||||
...Object.fromEntries(
|
||||
Object.entries(providerInfo.payloadMappings ?? {})
|
||||
.map(([k, v]) => [k, input.request.headers.get(v)])
|
||||
.filter(([_k, v]) => !!v),
|
||||
),
|
||||
},
|
||||
authInfo?.workspaceID,
|
||||
),
|
||||
providerInfo.modifyBody({
|
||||
...createBodyConverter(opts.format, providerInfo.format)(body),
|
||||
model: providerInfo.model,
|
||||
...(providerInfo.payloadModifier ?? {}),
|
||||
...Object.fromEntries(
|
||||
Object.entries(providerInfo.payloadMappings ?? {})
|
||||
.map(([k, v]) => [k, input.request.headers.get(v)])
|
||||
.filter(([_k, v]) => !!v),
|
||||
),
|
||||
}),
|
||||
)
|
||||
logger.debug("REQUEST URL: " + reqUrl)
|
||||
logger.debug("REQUEST: " + reqBody.substring(0, 300) + "...")
|
||||
@@ -470,15 +467,17 @@ export async function handler(
|
||||
...(() => {
|
||||
const providerProps = zenData.providers[modelProvider.id]
|
||||
const format = providerProps.format
|
||||
const providerModel = modelProvider.model
|
||||
if (format === "anthropic") return anthropicHelper({ reqModel, providerModel })
|
||||
if (format === "google") return googleHelper({ reqModel, providerModel })
|
||||
if (format === "openai") return openaiHelper({ reqModel, providerModel })
|
||||
return oaCompatHelper({
|
||||
const opts = {
|
||||
reqModel,
|
||||
providerModel,
|
||||
providerModel: modelProvider.model,
|
||||
adjustCacheUsage: providerProps.adjustCacheUsage,
|
||||
})
|
||||
safetyIdentifier: modelProvider.safetyIdentifier ? ip : undefined,
|
||||
workspaceID: authInfo?.workspaceID,
|
||||
}
|
||||
if (format === "anthropic") return anthropicHelper(opts)
|
||||
if (format === "google") return googleHelper(opts)
|
||||
if (format === "openai") return openaiHelper(opts)
|
||||
return oaCompatHelper(opts)
|
||||
})(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,17 +21,18 @@ type Usage = {
|
||||
}
|
||||
}
|
||||
|
||||
export const oaCompatHelper: ProviderHelper = ({ adjustCacheUsage }) => ({
|
||||
export const oaCompatHelper: ProviderHelper = ({ adjustCacheUsage, safetyIdentifier }) => ({
|
||||
format: "oa-compat",
|
||||
modifyUrl: (providerApi: string) => providerApi + "/chat/completions",
|
||||
modifyHeaders: (headers: Headers, body: Record<string, any>, apiKey: string) => {
|
||||
headers.set("authorization", `Bearer ${apiKey}`)
|
||||
headers.set("x-session-affinity", headers.get("x-opencode-session") ?? "")
|
||||
},
|
||||
modifyBody: (body: Record<string, any>) => {
|
||||
modifyBody: (body: Record<string, any>, workspaceID?: string) => {
|
||||
return {
|
||||
...body,
|
||||
...(body.stream ? { stream_options: { include_usage: true } } : {}),
|
||||
...(safetyIdentifier ? { safety_identifier: safetyIdentifier } : {}),
|
||||
}
|
||||
},
|
||||
createBinaryStreamDecoder: () => undefined,
|
||||
|
||||
@@ -12,13 +12,13 @@ type Usage = {
|
||||
total_tokens?: number
|
||||
}
|
||||
|
||||
export const openaiHelper: ProviderHelper = () => ({
|
||||
export const openaiHelper: ProviderHelper = ({ workspaceID }) => ({
|
||||
format: "openai",
|
||||
modifyUrl: (providerApi: string) => providerApi + "/responses",
|
||||
modifyHeaders: (headers: Headers, body: Record<string, any>, apiKey: string) => {
|
||||
headers.set("authorization", `Bearer ${apiKey}`)
|
||||
},
|
||||
modifyBody: (body: Record<string, any>, workspaceID?: string) => ({
|
||||
modifyBody: (body: Record<string, any>) => ({
|
||||
...body,
|
||||
...(workspaceID ? { safety_identifier: workspaceID } : {}),
|
||||
}),
|
||||
|
||||
@@ -33,11 +33,17 @@ export type UsageInfo = {
|
||||
cacheWrite1hTokens?: number
|
||||
}
|
||||
|
||||
export type ProviderHelper = (input: { reqModel: string; providerModel: string; adjustCacheUsage?: boolean }) => {
|
||||
export type ProviderHelper = (input: {
|
||||
reqModel: string
|
||||
providerModel: string
|
||||
adjustCacheUsage?: boolean
|
||||
safetyIdentifier?: string
|
||||
workspaceID?: string
|
||||
}) => {
|
||||
format: ZenData.Format
|
||||
modifyUrl: (providerApi: string, isStream?: boolean) => string
|
||||
modifyHeaders: (headers: Headers, body: Record<string, any>, apiKey: string) => void
|
||||
modifyBody: (body: Record<string, any>, workspaceID?: string) => Record<string, any>
|
||||
modifyBody: (body: Record<string, any>) => Record<string, any>
|
||||
createBinaryStreamDecoder: () => ((chunk: Uint8Array) => Uint8Array | undefined) | undefined
|
||||
streamSeparator: string
|
||||
createUsageParser: () => {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"$schema": "https://json.schemastore.org/package.json",
|
||||
"name": "@opencode-ai/console-core",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
|
||||
@@ -37,6 +37,7 @@ export namespace ZenData {
|
||||
disabled: z.boolean().optional(),
|
||||
storeModel: z.string().optional(),
|
||||
payloadModifier: z.record(z.string(), z.any()).optional(),
|
||||
safetyIdentifier: z.boolean().optional(),
|
||||
}),
|
||||
),
|
||||
})
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@opencode-ai/console-function",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"$schema": "https://json.schemastore.org/package.json",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@opencode-ai/console-mail",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"dependencies": {
|
||||
"@jsx-email/all": "2.2.3",
|
||||
"@jsx-email/cli": "1.4.3",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@opencode-ai/desktop-electron",
|
||||
"private": true,
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
"homepage": "https://opencode.ai",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@opencode-ai/desktop",
|
||||
"private": true,
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@opencode-ai/enterprise",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
id = "opencode"
|
||||
name = "OpenCode"
|
||||
description = "The open source coding agent."
|
||||
version = "1.3.7"
|
||||
version = "1.3.8"
|
||||
schema_version = 1
|
||||
authors = ["Anomaly"]
|
||||
repository = "https://github.com/anomalyco/opencode"
|
||||
@@ -11,26 +11,26 @@ name = "OpenCode"
|
||||
icon = "./icons/opencode.svg"
|
||||
|
||||
[agent_servers.opencode.targets.darwin-aarch64]
|
||||
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.7/opencode-darwin-arm64.zip"
|
||||
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.8/opencode-darwin-arm64.zip"
|
||||
cmd = "./opencode"
|
||||
args = ["acp"]
|
||||
|
||||
[agent_servers.opencode.targets.darwin-x86_64]
|
||||
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.7/opencode-darwin-x64.zip"
|
||||
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.8/opencode-darwin-x64.zip"
|
||||
cmd = "./opencode"
|
||||
args = ["acp"]
|
||||
|
||||
[agent_servers.opencode.targets.linux-aarch64]
|
||||
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.7/opencode-linux-arm64.tar.gz"
|
||||
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.8/opencode-linux-arm64.tar.gz"
|
||||
cmd = "./opencode"
|
||||
args = ["acp"]
|
||||
|
||||
[agent_servers.opencode.targets.linux-x86_64]
|
||||
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.7/opencode-linux-x64.tar.gz"
|
||||
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.8/opencode-linux-x64.tar.gz"
|
||||
cmd = "./opencode"
|
||||
args = ["acp"]
|
||||
|
||||
[agent_servers.opencode.targets.windows-x86_64]
|
||||
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.7/opencode-windows-x64.zip"
|
||||
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.8/opencode-windows-x64.zip"
|
||||
cmd = "./opencode.exe"
|
||||
args = ["acp"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@opencode-ai/function",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"$schema": "https://json.schemastore.org/package.json",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"$schema": "https://json.schemastore.org/package.json",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"name": "opencode",
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
@@ -87,7 +87,7 @@
|
||||
"@ai-sdk/provider-utils": "4.0.21",
|
||||
"@ai-sdk/togetherai": "2.0.41",
|
||||
"@ai-sdk/vercel": "2.0.39",
|
||||
"@ai-sdk/xai": "3.0.74",
|
||||
"@ai-sdk/xai": "3.0.75",
|
||||
"@aws-sdk/credential-providers": "3.993.0",
|
||||
"@clack/prompts": "1.0.0-alpha.1",
|
||||
"@effect/platform-node": "catalog:",
|
||||
|
||||
@@ -84,12 +84,18 @@ export default plugin
|
||||
- TUI shape is `default export { id?, tui }`; including `server` is rejected.
|
||||
- A single module cannot export both `server` and `tui`.
|
||||
- `tui` signature is `(api, options, meta) => Promise<void>`.
|
||||
- If package `exports` contains `./tui`, the loader resolves that entrypoint. Otherwise it uses the resolved package target.
|
||||
- If package `exports` contains `./tui`, the loader resolves that entrypoint.
|
||||
- If package `exports` exists, loader only resolves `./tui` or `./server`; it never falls back to `exports["."]`.
|
||||
- For npm package specs, TUI does not use `package.json` `main` as a fallback entry.
|
||||
- `package.json` `main` is only used for server plugin entrypoint resolution.
|
||||
- If a package supports both server and TUI, use separate files and package `exports` (`./server` and `./tui`) so each target resolves to a target-only module.
|
||||
- File/path plugins must export a non-empty `id`.
|
||||
- npm plugins may omit `id`; package `name` is used.
|
||||
- Runtime identity is the resolved plugin id. Later plugins with the same id are rejected, including collisions with internal plugin ids.
|
||||
- If a path spec points at a directory, that directory must have `package.json` with `main`.
|
||||
- If a path spec points at a directory, server loading can use `package.json` `main`.
|
||||
- TUI path loading never uses `package.json` `main`.
|
||||
- Legacy compatibility: path specs like `./plugin` can resolve to `./plugin/index.ts` (or `index.js`) when `package.json` is missing.
|
||||
- The `./plugin -> ./plugin/index.*` fallback applies to both server and TUI v1 loading.
|
||||
- There is no directory auto-discovery for TUI plugins; they must be listed in `tui.json`.
|
||||
|
||||
## Package manifest and install
|
||||
|
||||
@@ -393,7 +393,7 @@ export namespace Agent {
|
||||
)
|
||||
|
||||
export const defaultLayer = layer.pipe(
|
||||
Layer.provide(Auth.layer),
|
||||
Layer.provide(Auth.defaultLayer),
|
||||
Layer.provide(Config.defaultLayer),
|
||||
Layer.provide(Skill.defaultLayer),
|
||||
)
|
||||
|
||||
@@ -3,7 +3,7 @@ import { Effect, Layer, Record, Result, Schema, ServiceMap } from "effect"
|
||||
import { makeRuntime } from "@/effect/run-service"
|
||||
import { zod } from "@/util/effect-zod"
|
||||
import { Global } from "../global"
|
||||
import { Filesystem } from "../util/filesystem"
|
||||
import { AppFileSystem } from "../filesystem"
|
||||
|
||||
export const OAUTH_DUMMY_KEY = "opencode-oauth-dummy-key"
|
||||
|
||||
@@ -53,17 +53,13 @@ export namespace Auth {
|
||||
export const layer = Layer.effect(
|
||||
Service,
|
||||
Effect.gen(function* () {
|
||||
const fsys = yield* AppFileSystem.Service
|
||||
const decode = Schema.decodeUnknownOption(Info)
|
||||
|
||||
const all = Effect.fn("Auth.all")(() =>
|
||||
Effect.tryPromise({
|
||||
try: async () => {
|
||||
const data = await Filesystem.readJson<Record<string, unknown>>(file).catch(() => ({}))
|
||||
return Record.filterMap(data, (value) => Result.fromOption(decode(value), () => undefined))
|
||||
},
|
||||
catch: fail("Failed to read auth data"),
|
||||
}),
|
||||
)
|
||||
const all = Effect.fn("Auth.all")(function* () {
|
||||
const data = (yield* fsys.readJson(file).pipe(Effect.orElseSucceed(() => ({})))) as Record<string, unknown>
|
||||
return Record.filterMap(data, (value) => Result.fromOption(decode(value), () => undefined))
|
||||
})
|
||||
|
||||
const get = Effect.fn("Auth.get")(function* (providerID: string) {
|
||||
return (yield* all())[providerID]
|
||||
@@ -74,10 +70,9 @@ export namespace Auth {
|
||||
const data = yield* all()
|
||||
if (norm !== key) delete data[key]
|
||||
delete data[norm + "/"]
|
||||
yield* Effect.tryPromise({
|
||||
try: () => Filesystem.writeJson(file, { ...data, [norm]: info }, 0o600),
|
||||
catch: fail("Failed to write auth data"),
|
||||
})
|
||||
yield* fsys
|
||||
.writeJson(file, { ...data, [norm]: info }, 0o600)
|
||||
.pipe(Effect.mapError(fail("Failed to write auth data")))
|
||||
})
|
||||
|
||||
const remove = Effect.fn("Auth.remove")(function* (key: string) {
|
||||
@@ -85,17 +80,16 @@ export namespace Auth {
|
||||
const data = yield* all()
|
||||
delete data[key]
|
||||
delete data[norm]
|
||||
yield* Effect.tryPromise({
|
||||
try: () => Filesystem.writeJson(file, data, 0o600),
|
||||
catch: fail("Failed to write auth data"),
|
||||
})
|
||||
yield* fsys.writeJson(file, data, 0o600).pipe(Effect.mapError(fail("Failed to write auth data")))
|
||||
})
|
||||
|
||||
return Service.of({ get, all, set, remove })
|
||||
}),
|
||||
)
|
||||
|
||||
const { runPromise } = makeRuntime(Service, layer)
|
||||
export const defaultLayer = layer.pipe(Layer.provide(AppFileSystem.defaultLayer))
|
||||
|
||||
const { runPromise } = makeRuntime(Service, defaultLayer)
|
||||
|
||||
export async function get(providerID: string) {
|
||||
return runPromise((service) => service.get(providerID))
|
||||
|
||||
@@ -18,17 +18,8 @@ import { Log } from "@/util/log"
|
||||
import { errorData, errorMessage } from "@/util/error"
|
||||
import { isRecord } from "@/util/record"
|
||||
import { Instance } from "@/project/instance"
|
||||
import {
|
||||
checkPluginCompatibility,
|
||||
isDeprecatedPlugin,
|
||||
pluginSource,
|
||||
readPluginId,
|
||||
readV1Plugin,
|
||||
resolvePluginEntrypoint,
|
||||
resolvePluginId,
|
||||
resolvePluginTarget,
|
||||
type PluginSource,
|
||||
} from "@/plugin/shared"
|
||||
import { pluginSource, readPluginId, readV1Plugin, resolvePluginId, type PluginSource } from "@/plugin/shared"
|
||||
import { PluginLoader } from "@/plugin/loader"
|
||||
import { PluginMeta } from "@/plugin/meta"
|
||||
import { installPlugin as installModulePlugin, patchPluginConfig, readPluginManifest } from "@/plugin/install"
|
||||
import { hasTheme, upsertTheme } from "../context/theme"
|
||||
@@ -36,13 +27,12 @@ import { Global } from "@/global"
|
||||
import { Filesystem } from "@/util/filesystem"
|
||||
import { Process } from "@/util/process"
|
||||
import { Flag } from "@/flag/flag"
|
||||
import { Installation } from "@/installation"
|
||||
import { INTERNAL_TUI_PLUGINS, type InternalTuiPlugin } from "./internal"
|
||||
import { setupSlots, Slot as View } from "./slots"
|
||||
import type { HostPluginApi, HostSlots } from "./slots"
|
||||
|
||||
type PluginLoad = {
|
||||
item?: Config.PluginSpec
|
||||
options: Config.PluginOptions | undefined
|
||||
spec: string
|
||||
target: string
|
||||
retry: boolean
|
||||
@@ -67,7 +57,6 @@ type PluginEntry = {
|
||||
meta: TuiPluginMeta
|
||||
themes: Record<string, PluginMeta.Theme>
|
||||
plugin: TuiPlugin
|
||||
options: Config.PluginOptions | undefined
|
||||
enabled: boolean
|
||||
scope?: PluginScope
|
||||
}
|
||||
@@ -78,13 +67,7 @@ type RuntimeState = {
|
||||
slots: HostSlots
|
||||
plugins: PluginEntry[]
|
||||
plugins_by_id: Map<string, PluginEntry>
|
||||
pending: Map<
|
||||
string,
|
||||
{
|
||||
item: Config.PluginSpec
|
||||
meta: TuiConfig.PluginMeta
|
||||
}
|
||||
>
|
||||
pending: Map<string, TuiConfig.PluginRecord>
|
||||
}
|
||||
|
||||
const log = Log.create({ service: "tui.plugin" })
|
||||
@@ -239,73 +222,76 @@ function createThemeInstaller(
|
||||
}
|
||||
}
|
||||
|
||||
async function loadExternalPlugin(
|
||||
item: Config.PluginSpec,
|
||||
meta: TuiConfig.PluginMeta | undefined,
|
||||
retry = false,
|
||||
): Promise<PluginLoad | undefined> {
|
||||
const spec = Config.pluginSpecifier(item)
|
||||
if (isDeprecatedPlugin(spec)) return
|
||||
log.info("loading tui plugin", { path: spec, retry })
|
||||
const resolved = await resolvePluginTarget(spec).catch((error) => {
|
||||
fail("failed to resolve tui plugin", { path: spec, retry, error })
|
||||
return
|
||||
})
|
||||
if (!resolved) return
|
||||
async function loadExternalPlugin(cfg: TuiConfig.PluginRecord, retry = false): Promise<PluginLoad | undefined> {
|
||||
const plan = PluginLoader.plan(cfg.item)
|
||||
if (plan.deprecated) return
|
||||
|
||||
const source = pluginSource(spec)
|
||||
if (source === "npm") {
|
||||
const ok = await checkPluginCompatibility(resolved, Installation.VERSION)
|
||||
.then(() => true)
|
||||
.catch((error) => {
|
||||
fail("tui plugin incompatible", { path: spec, retry, error })
|
||||
return false
|
||||
})
|
||||
if (!ok) return
|
||||
log.info("loading tui plugin", { path: plan.spec, retry })
|
||||
const resolved = await PluginLoader.resolve(plan, "tui")
|
||||
if (!resolved.ok) {
|
||||
if (resolved.stage === "install") {
|
||||
fail("failed to resolve tui plugin", { path: plan.spec, retry, error: resolved.error })
|
||||
return
|
||||
}
|
||||
if (resolved.stage === "compatibility") {
|
||||
fail("tui plugin incompatible", { path: plan.spec, retry, error: resolved.error })
|
||||
return
|
||||
}
|
||||
fail("failed to resolve tui plugin entry", { path: plan.spec, retry, error: resolved.error })
|
||||
return
|
||||
}
|
||||
|
||||
const target = resolved
|
||||
if (!meta) {
|
||||
fail("missing tui plugin metadata", {
|
||||
path: spec,
|
||||
const loaded = await PluginLoader.load(resolved.value)
|
||||
if (!loaded.ok) {
|
||||
fail("failed to load tui plugin", {
|
||||
path: plan.spec,
|
||||
target: resolved.value.entry,
|
||||
retry,
|
||||
error: loaded.error,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const root = resolveRoot(source === "file" ? spec : target)
|
||||
const entry = await resolvePluginEntrypoint(spec, target, "tui").catch((error) => {
|
||||
fail("failed to resolve tui plugin entry", { path: spec, target, retry, error })
|
||||
return
|
||||
})
|
||||
if (!entry) return
|
||||
|
||||
const mod = await import(entry)
|
||||
.then((raw) => {
|
||||
return readV1Plugin(raw as Record<string, unknown>, spec, "tui") as TuiPluginModule
|
||||
const mod = await Promise.resolve()
|
||||
.then(() => {
|
||||
return readV1Plugin(loaded.value.mod as Record<string, unknown>, plan.spec, "tui") as TuiPluginModule
|
||||
})
|
||||
.catch((error) => {
|
||||
fail("failed to load tui plugin", { path: spec, target: entry, retry, error })
|
||||
fail("failed to load tui plugin", {
|
||||
path: plan.spec,
|
||||
target: loaded.value.entry,
|
||||
retry,
|
||||
error,
|
||||
})
|
||||
return
|
||||
})
|
||||
if (!mod) return
|
||||
|
||||
const id = await resolvePluginId(source, spec, target, readPluginId(mod.id, spec)).catch((error) => {
|
||||
fail("failed to load tui plugin", { path: spec, target, retry, error })
|
||||
const id = await resolvePluginId(
|
||||
loaded.value.source,
|
||||
plan.spec,
|
||||
loaded.value.target,
|
||||
readPluginId(mod.id, plan.spec),
|
||||
loaded.value.pkg,
|
||||
).catch((error) => {
|
||||
fail("failed to load tui plugin", { path: plan.spec, target: loaded.value.target, retry, error })
|
||||
return
|
||||
})
|
||||
if (!id) return
|
||||
|
||||
return {
|
||||
item,
|
||||
spec,
|
||||
target,
|
||||
options: plan.options,
|
||||
spec: plan.spec,
|
||||
target: loaded.value.target,
|
||||
retry,
|
||||
source,
|
||||
source: loaded.value.source,
|
||||
id,
|
||||
module: mod,
|
||||
theme_meta: meta,
|
||||
theme_root: root,
|
||||
theme_meta: {
|
||||
scope: cfg.scope,
|
||||
source: cfg.source,
|
||||
},
|
||||
theme_root: loaded.value.pkg?.dir ?? resolveRoot(loaded.value.target),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -343,6 +329,7 @@ function loadInternalPlugin(item: InternalTuiPlugin): PluginLoad {
|
||||
const target = spec
|
||||
|
||||
return {
|
||||
options: undefined,
|
||||
spec,
|
||||
target,
|
||||
retry: false,
|
||||
@@ -488,7 +475,7 @@ async function activatePluginEntry(state: RuntimeState, plugin: PluginEntry, per
|
||||
const api = pluginApi(state, plugin, scope, plugin.id)
|
||||
const ok = await Promise.resolve()
|
||||
.then(async () => {
|
||||
await plugin.plugin(api, plugin.options, plugin.meta)
|
||||
await plugin.plugin(api, plugin.load.options, plugin.meta)
|
||||
return true
|
||||
})
|
||||
.catch((error) => {
|
||||
@@ -613,21 +600,6 @@ function pluginApi(runtime: RuntimeState, plugin: PluginEntry, scope: PluginScop
|
||||
}
|
||||
}
|
||||
|
||||
function collectPluginEntries(load: PluginLoad, meta: TuiPluginMeta, themes: Record<string, PluginMeta.Theme> = {}) {
|
||||
const options = load.item ? Config.pluginOptions(load.item) : undefined
|
||||
return [
|
||||
{
|
||||
id: load.id,
|
||||
load,
|
||||
meta,
|
||||
themes,
|
||||
plugin: load.module.tui,
|
||||
options,
|
||||
enabled: true,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
function addPluginEntry(state: RuntimeState, plugin: PluginEntry) {
|
||||
if (state.plugins_by_id.has(plugin.id)) {
|
||||
fail("duplicate tui plugin id", {
|
||||
@@ -651,12 +623,8 @@ function applyInitialPluginEnabledState(state: RuntimeState, config: TuiConfig.I
|
||||
}
|
||||
}
|
||||
|
||||
async function resolveExternalPlugins(
|
||||
list: Config.PluginSpec[],
|
||||
wait: () => Promise<void>,
|
||||
meta: (item: Config.PluginSpec) => TuiConfig.PluginMeta | undefined,
|
||||
) {
|
||||
const loaded = await Promise.all(list.map((item) => loadExternalPlugin(item, meta(item))))
|
||||
async function resolveExternalPlugins(list: TuiConfig.PluginRecord[], wait: () => Promise<void>) {
|
||||
const loaded = await Promise.all(list.map((item) => loadExternalPlugin(item)))
|
||||
const ready: PluginLoad[] = []
|
||||
let deps: Promise<void> | undefined
|
||||
|
||||
@@ -665,13 +633,12 @@ async function resolveExternalPlugins(
|
||||
if (!entry) {
|
||||
const item = list[i]
|
||||
if (!item) continue
|
||||
const spec = Config.pluginSpecifier(item)
|
||||
if (pluginSource(spec) !== "file") continue
|
||||
if (pluginSource(Config.pluginSpecifier(item.item)) !== "file") continue
|
||||
deps ??= wait().catch((error) => {
|
||||
log.warn("failed waiting for tui plugin dependencies", { error })
|
||||
})
|
||||
await deps
|
||||
entry = await loadExternalPlugin(item, meta(item), true)
|
||||
entry = await loadExternalPlugin(item, true)
|
||||
}
|
||||
if (!entry) continue
|
||||
ready.push(entry)
|
||||
@@ -713,20 +680,27 @@ async function addExternalPluginEntries(state: RuntimeState, ready: PluginLoad[]
|
||||
|
||||
const row = createMeta(entry.source, entry.spec, entry.target, hit, entry.id)
|
||||
const themes = hit?.entry.themes ? { ...hit.entry.themes } : {}
|
||||
for (const plugin of collectPluginEntries(entry, row, themes)) {
|
||||
if (!addPluginEntry(state, plugin)) {
|
||||
ok = false
|
||||
continue
|
||||
}
|
||||
plugins.push(plugin)
|
||||
const plugin: PluginEntry = {
|
||||
id: entry.id,
|
||||
load: entry,
|
||||
meta: row,
|
||||
themes,
|
||||
plugin: entry.module.tui,
|
||||
enabled: true,
|
||||
}
|
||||
if (!addPluginEntry(state, plugin)) {
|
||||
ok = false
|
||||
continue
|
||||
}
|
||||
plugins.push(plugin)
|
||||
}
|
||||
|
||||
return { plugins, ok }
|
||||
}
|
||||
|
||||
function defaultPluginMeta(state: RuntimeState): TuiConfig.PluginMeta {
|
||||
function defaultPluginRecord(state: RuntimeState, spec: string): TuiConfig.PluginRecord {
|
||||
return {
|
||||
item: spec,
|
||||
scope: "local",
|
||||
source: state.api.state.path.config || path.join(state.directory, ".opencode", "tui.json"),
|
||||
}
|
||||
@@ -764,36 +738,28 @@ async function addPluginBySpec(state: RuntimeState | undefined, raw: string) {
|
||||
const spec = raw.trim()
|
||||
if (!spec) return false
|
||||
|
||||
const pending = state.pending.get(spec)
|
||||
const item = pending?.item ?? spec
|
||||
const nextSpec = Config.pluginSpecifier(item)
|
||||
if (state.plugins.some((plugin) => plugin.load.spec === nextSpec)) {
|
||||
const cfg = state.pending.get(spec) ?? defaultPluginRecord(state, spec)
|
||||
const next = Config.pluginSpecifier(cfg.item)
|
||||
if (state.plugins.some((plugin) => plugin.load.spec === next)) {
|
||||
state.pending.delete(spec)
|
||||
return true
|
||||
}
|
||||
|
||||
const meta = pending?.meta ?? defaultPluginMeta(state)
|
||||
|
||||
const ready = await Instance.provide({
|
||||
directory: state.directory,
|
||||
fn: () =>
|
||||
resolveExternalPlugins(
|
||||
[item],
|
||||
() => TuiConfig.waitForDependencies(),
|
||||
() => meta,
|
||||
),
|
||||
fn: () => resolveExternalPlugins([cfg], () => TuiConfig.waitForDependencies()),
|
||||
}).catch((error) => {
|
||||
fail("failed to add tui plugin", { path: nextSpec, error })
|
||||
fail("failed to add tui plugin", { path: next, error })
|
||||
return [] as PluginLoad[]
|
||||
})
|
||||
if (!ready.length) {
|
||||
fail("failed to add tui plugin", { path: nextSpec })
|
||||
fail("failed to add tui plugin", { path: next })
|
||||
return false
|
||||
}
|
||||
|
||||
const first = ready[0]
|
||||
if (!first) {
|
||||
fail("failed to add tui plugin", { path: nextSpec })
|
||||
fail("failed to add tui plugin", { path: next })
|
||||
return false
|
||||
}
|
||||
if (state.plugins_by_id.has(first.id)) {
|
||||
@@ -810,7 +776,7 @@ async function addPluginBySpec(state: RuntimeState | undefined, raw: string) {
|
||||
|
||||
if (ok) state.pending.delete(spec)
|
||||
if (!ok) {
|
||||
fail("failed to add tui plugin", { path: nextSpec })
|
||||
fail("failed to add tui plugin", { path: next })
|
||||
}
|
||||
return ok
|
||||
}
|
||||
@@ -893,12 +859,11 @@ async function installPluginBySpec(
|
||||
const tui = manifest.targets.find((item) => item.kind === "tui")
|
||||
if (tui) {
|
||||
const file = patch.items.find((item) => item.kind === "tui")?.file
|
||||
const item = tui.opts ? ([spec, tui.opts] as Config.PluginSpec) : spec
|
||||
state.pending.set(spec, {
|
||||
item: tui.opts ? [spec, tui.opts] : spec,
|
||||
meta: {
|
||||
scope: global ? "global" : "local",
|
||||
source: (file ?? dir.config) || path.join(patch.dir, "tui.json"),
|
||||
},
|
||||
item,
|
||||
scope: global ? "global" : "local",
|
||||
source: (file ?? dir.config) || path.join(patch.dir, "tui.json"),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -981,25 +946,26 @@ export namespace TuiPluginRuntime {
|
||||
directory: cwd,
|
||||
fn: async () => {
|
||||
const config = await TuiConfig.get()
|
||||
const plugins = Flag.OPENCODE_PURE ? [] : (config.plugin ?? [])
|
||||
if (Flag.OPENCODE_PURE && config.plugin?.length) {
|
||||
log.info("skipping external tui plugins in pure mode", { count: config.plugin.length })
|
||||
const records = Flag.OPENCODE_PURE ? [] : (config.plugin_records ?? [])
|
||||
if (Flag.OPENCODE_PURE && config.plugin_records?.length) {
|
||||
log.info("skipping external tui plugins in pure mode", { count: config.plugin_records.length })
|
||||
}
|
||||
|
||||
for (const item of INTERNAL_TUI_PLUGINS) {
|
||||
log.info("loading internal tui plugin", { id: item.id })
|
||||
const entry = loadInternalPlugin(item)
|
||||
const meta = createMeta(entry.source, entry.spec, entry.target, undefined, entry.id)
|
||||
for (const plugin of collectPluginEntries(entry, meta)) {
|
||||
addPluginEntry(next, plugin)
|
||||
}
|
||||
addPluginEntry(next, {
|
||||
id: entry.id,
|
||||
load: entry,
|
||||
meta,
|
||||
themes: {},
|
||||
plugin: entry.module.tui,
|
||||
enabled: true,
|
||||
})
|
||||
}
|
||||
|
||||
const ready = await resolveExternalPlugins(
|
||||
plugins,
|
||||
() => TuiConfig.waitForDependencies(),
|
||||
(item) => config.plugin_meta?.[Config.pluginSpecifier(item)],
|
||||
)
|
||||
const ready = await resolveExternalPlugins(records, () => TuiConfig.waitForDependencies())
|
||||
await addExternalPluginEntries(next, ready)
|
||||
|
||||
applyInitialPluginEnabledState(next, config)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { Log } from "../util/log"
|
||||
import path from "path"
|
||||
import { pathToFileURL } from "url"
|
||||
import { createRequire } from "module"
|
||||
import os from "os"
|
||||
import z from "zod"
|
||||
import { ModelsDev } from "../provider/models"
|
||||
@@ -366,33 +365,18 @@ export namespace Config {
|
||||
export async function resolvePluginSpec(plugin: PluginSpec, configFilepath: string): Promise<PluginSpec> {
|
||||
const spec = pluginSpecifier(plugin)
|
||||
if (!isPathPluginSpec(spec)) return plugin
|
||||
if (spec.startsWith("file://")) {
|
||||
const resolved = await resolvePathPluginTarget(spec).catch(() => spec)
|
||||
if (Array.isArray(plugin)) return [resolved, plugin[1]]
|
||||
return resolved
|
||||
}
|
||||
if (path.isAbsolute(spec) || /^[A-Za-z]:[\\/]/.test(spec)) {
|
||||
const base = pathToFileURL(spec).href
|
||||
const resolved = await resolvePathPluginTarget(base).catch(() => base)
|
||||
if (Array.isArray(plugin)) return [resolved, plugin[1]]
|
||||
return resolved
|
||||
}
|
||||
try {
|
||||
const base = import.meta.resolve!(spec, configFilepath)
|
||||
const resolved = await resolvePathPluginTarget(base).catch(() => base)
|
||||
if (Array.isArray(plugin)) return [resolved, plugin[1]]
|
||||
return resolved
|
||||
} catch {
|
||||
try {
|
||||
const require = createRequire(configFilepath)
|
||||
const base = pathToFileURL(require.resolve(spec)).href
|
||||
const resolved = await resolvePathPluginTarget(base).catch(() => base)
|
||||
if (Array.isArray(plugin)) return [resolved, plugin[1]]
|
||||
return resolved
|
||||
} catch {
|
||||
return plugin
|
||||
}
|
||||
}
|
||||
|
||||
const base = path.dirname(configFilepath)
|
||||
const file = (() => {
|
||||
if (spec.startsWith("file://")) return spec
|
||||
if (path.isAbsolute(spec) || /^[A-Za-z]:[\\/]/.test(spec)) return pathToFileURL(spec).href
|
||||
return pathToFileURL(path.resolve(base, spec)).href
|
||||
})()
|
||||
|
||||
const resolved = await resolvePathPluginTarget(file).catch(() => file)
|
||||
|
||||
if (Array.isArray(plugin)) return [resolved, plugin[1]]
|
||||
return resolved
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1556,7 +1540,7 @@ export namespace Config {
|
||||
|
||||
export const defaultLayer = layer.pipe(
|
||||
Layer.provide(AppFileSystem.defaultLayer),
|
||||
Layer.provide(Auth.layer),
|
||||
Layer.provide(Auth.defaultLayer),
|
||||
Layer.provide(Account.defaultLayer),
|
||||
)
|
||||
|
||||
|
||||
@@ -22,6 +22,12 @@ export namespace TuiConfig {
|
||||
source: string
|
||||
}
|
||||
|
||||
export type PluginRecord = {
|
||||
item: Config.PluginSpec
|
||||
scope: PluginMeta["scope"]
|
||||
source: string
|
||||
}
|
||||
|
||||
type PluginEntry = {
|
||||
item: Config.PluginSpec
|
||||
meta: PluginMeta
|
||||
@@ -33,7 +39,8 @@ export namespace TuiConfig {
|
||||
}
|
||||
|
||||
export type Info = z.output<typeof Info> & {
|
||||
plugin_meta?: Record<string, PluginMeta>
|
||||
// Internal resolved plugin list used by runtime loading.
|
||||
plugin_records?: PluginRecord[]
|
||||
}
|
||||
|
||||
function pluginScope(file: string): PluginMeta["scope"] {
|
||||
@@ -149,10 +156,13 @@ export namespace TuiConfig {
|
||||
|
||||
const merged = dedupePlugins(acc.entries)
|
||||
acc.result.keybinds = Config.Keybinds.parse(acc.result.keybinds ?? {})
|
||||
acc.result.plugin = merged.map((item) => item.item)
|
||||
acc.result.plugin_meta = merged.length
|
||||
? Object.fromEntries(merged.map((item) => [Config.pluginSpecifier(item.item), item.meta]))
|
||||
: undefined
|
||||
const list = merged.map((item) => ({
|
||||
item: item.item,
|
||||
scope: item.meta.scope,
|
||||
source: item.meta.source,
|
||||
}))
|
||||
acc.result.plugin = list.map((item) => item.item)
|
||||
acc.result.plugin_records = list.length ? list : undefined
|
||||
|
||||
const deps: Promise<void>[] = []
|
||||
if (acc.result.plugin?.length) {
|
||||
|
||||
216
packages/opencode/src/effect/runner.ts
Normal file
216
packages/opencode/src/effect/runner.ts
Normal file
@@ -0,0 +1,216 @@
|
||||
import { Cause, Deferred, Effect, Exit, Fiber, Option, Schema, Scope, SynchronizedRef } from "effect"
|
||||
|
||||
export interface Runner<A, E = never> {
|
||||
readonly state: Runner.State<A, E>
|
||||
readonly busy: boolean
|
||||
readonly ensureRunning: (work: Effect.Effect<A, E>) => Effect.Effect<A, E>
|
||||
readonly startShell: (work: (signal: AbortSignal) => Effect.Effect<A, E>) => Effect.Effect<A, E>
|
||||
readonly cancel: Effect.Effect<void>
|
||||
}
|
||||
|
||||
export namespace Runner {
|
||||
export class Cancelled extends Schema.TaggedErrorClass<Cancelled>()("RunnerCancelled", {}) {}
|
||||
|
||||
interface RunHandle<A, E> {
|
||||
id: number
|
||||
done: Deferred.Deferred<A, E | Cancelled>
|
||||
fiber: Fiber.Fiber<A, E>
|
||||
}
|
||||
|
||||
interface ShellHandle<A, E> {
|
||||
id: number
|
||||
fiber: Fiber.Fiber<A, E>
|
||||
abort: AbortController
|
||||
}
|
||||
|
||||
interface PendingHandle<A, E> {
|
||||
id: number
|
||||
done: Deferred.Deferred<A, E | Cancelled>
|
||||
work: Effect.Effect<A, E>
|
||||
}
|
||||
|
||||
export type State<A, E> =
|
||||
| { readonly _tag: "Idle" }
|
||||
| { readonly _tag: "Running"; readonly run: RunHandle<A, E> }
|
||||
| { readonly _tag: "Shell"; readonly shell: ShellHandle<A, E> }
|
||||
| { readonly _tag: "ShellThenRun"; readonly shell: ShellHandle<A, E>; readonly run: PendingHandle<A, E> }
|
||||
|
||||
export const make = <A, E = never>(
|
||||
scope: Scope.Scope,
|
||||
opts?: {
|
||||
onIdle?: Effect.Effect<void>
|
||||
onBusy?: Effect.Effect<void>
|
||||
onInterrupt?: Effect.Effect<A, E>
|
||||
busy?: () => never
|
||||
},
|
||||
): Runner<A, E> => {
|
||||
const ref = SynchronizedRef.makeUnsafe<State<A, E>>({ _tag: "Idle" })
|
||||
const idle = opts?.onIdle ?? Effect.void
|
||||
const busy = opts?.onBusy ?? Effect.void
|
||||
const onInterrupt = opts?.onInterrupt
|
||||
let ids = 0
|
||||
|
||||
const state = () => SynchronizedRef.getUnsafe(ref)
|
||||
const next = () => {
|
||||
ids += 1
|
||||
return ids
|
||||
}
|
||||
|
||||
const complete = (done: Deferred.Deferred<A, E | Cancelled>, exit: Exit.Exit<A, E>) =>
|
||||
Exit.isFailure(exit) && Cause.hasInterruptsOnly(exit.cause)
|
||||
? Deferred.fail(done, new Cancelled()).pipe(Effect.asVoid)
|
||||
: Deferred.done(done, exit).pipe(Effect.asVoid)
|
||||
|
||||
const idleIfCurrent = () =>
|
||||
SynchronizedRef.modify(ref, (st) => [st._tag === "Idle" ? idle : Effect.void, st] as const).pipe(Effect.flatten)
|
||||
|
||||
const finishRun = (id: number, done: Deferred.Deferred<A, E | Cancelled>, exit: Exit.Exit<A, E>) =>
|
||||
SynchronizedRef.modify(
|
||||
ref,
|
||||
(st) =>
|
||||
[
|
||||
Effect.gen(function* () {
|
||||
if (st._tag === "Running" && st.run.id === id) yield* idle
|
||||
yield* complete(done, exit)
|
||||
}),
|
||||
st._tag === "Running" && st.run.id === id ? ({ _tag: "Idle" } as const) : st,
|
||||
] as const,
|
||||
).pipe(Effect.flatten)
|
||||
|
||||
const startRun = (work: Effect.Effect<A, E>, done: Deferred.Deferred<A, E | Cancelled>) =>
|
||||
Effect.gen(function* () {
|
||||
const id = next()
|
||||
const fiber = yield* work.pipe(
|
||||
Effect.onExit((exit) => finishRun(id, done, exit)),
|
||||
Effect.forkIn(scope),
|
||||
)
|
||||
return { id, done, fiber } satisfies RunHandle<A, E>
|
||||
})
|
||||
|
||||
const finishShell = (id: number) =>
|
||||
SynchronizedRef.modifyEffect(
|
||||
ref,
|
||||
Effect.fnUntraced(function* (st) {
|
||||
if (st._tag === "Shell" && st.shell.id === id) return [idle, { _tag: "Idle" }] as const
|
||||
if (st._tag === "ShellThenRun" && st.shell.id === id) {
|
||||
const run = yield* startRun(st.run.work, st.run.done)
|
||||
return [Effect.void, { _tag: "Running", run }] as const
|
||||
}
|
||||
return [Effect.void, st] as const
|
||||
}),
|
||||
).pipe(Effect.flatten)
|
||||
|
||||
const stopShell = (shell: ShellHandle<A, E>) =>
|
||||
Effect.gen(function* () {
|
||||
shell.abort.abort()
|
||||
const exit = yield* Fiber.await(shell.fiber).pipe(Effect.timeoutOption("100 millis"))
|
||||
if (Option.isNone(exit)) yield* Fiber.interrupt(shell.fiber)
|
||||
yield* Fiber.await(shell.fiber).pipe(Effect.exit, Effect.asVoid)
|
||||
})
|
||||
|
||||
const ensureRunning = (work: Effect.Effect<A, E>) =>
|
||||
SynchronizedRef.modifyEffect(
|
||||
ref,
|
||||
Effect.fnUntraced(function* (st) {
|
||||
switch (st._tag) {
|
||||
case "Running":
|
||||
case "ShellThenRun":
|
||||
return [Deferred.await(st.run.done), st] as const
|
||||
case "Shell": {
|
||||
const run = {
|
||||
id: next(),
|
||||
done: yield* Deferred.make<A, E | Cancelled>(),
|
||||
work,
|
||||
} satisfies PendingHandle<A, E>
|
||||
return [Deferred.await(run.done), { _tag: "ShellThenRun", shell: st.shell, run }] as const
|
||||
}
|
||||
case "Idle": {
|
||||
const done = yield* Deferred.make<A, E | Cancelled>()
|
||||
const run = yield* startRun(work, done)
|
||||
return [Deferred.await(done), { _tag: "Running", run }] as const
|
||||
}
|
||||
}
|
||||
}),
|
||||
).pipe(
|
||||
Effect.flatten,
|
||||
Effect.catch(
|
||||
(e): Effect.Effect<A, E> => (e instanceof Cancelled ? (onInterrupt ?? Effect.die(e)) : Effect.fail(e as E)),
|
||||
),
|
||||
)
|
||||
|
||||
const startShell = (work: (signal: AbortSignal) => Effect.Effect<A, E>) =>
|
||||
SynchronizedRef.modifyEffect(
|
||||
ref,
|
||||
Effect.fnUntraced(function* (st) {
|
||||
if (st._tag !== "Idle") {
|
||||
return [
|
||||
Effect.sync(() => {
|
||||
if (opts?.busy) opts.busy()
|
||||
throw new Error("Runner is busy")
|
||||
}),
|
||||
st,
|
||||
] as const
|
||||
}
|
||||
yield* busy
|
||||
const id = next()
|
||||
const abort = new AbortController()
|
||||
const fiber = yield* work(abort.signal).pipe(Effect.ensuring(finishShell(id)), Effect.forkChild)
|
||||
const shell = { id, fiber, abort } satisfies ShellHandle<A, E>
|
||||
return [
|
||||
Effect.gen(function* () {
|
||||
const exit = yield* Fiber.await(fiber)
|
||||
if (Exit.isSuccess(exit)) return exit.value
|
||||
if (Cause.hasInterruptsOnly(exit.cause) && onInterrupt) return yield* onInterrupt
|
||||
return yield* Effect.failCause(exit.cause)
|
||||
}),
|
||||
{ _tag: "Shell", shell },
|
||||
] as const
|
||||
}),
|
||||
).pipe(Effect.flatten)
|
||||
|
||||
const cancel = SynchronizedRef.modify(ref, (st) => {
|
||||
switch (st._tag) {
|
||||
case "Idle":
|
||||
return [Effect.void, st] as const
|
||||
case "Running":
|
||||
return [
|
||||
Effect.gen(function* () {
|
||||
yield* Fiber.interrupt(st.run.fiber)
|
||||
yield* Deferred.await(st.run.done).pipe(Effect.exit, Effect.asVoid)
|
||||
yield* idleIfCurrent()
|
||||
}),
|
||||
{ _tag: "Idle" } as const,
|
||||
] as const
|
||||
case "Shell":
|
||||
return [
|
||||
Effect.gen(function* () {
|
||||
yield* stopShell(st.shell)
|
||||
yield* idleIfCurrent()
|
||||
}),
|
||||
{ _tag: "Idle" } as const,
|
||||
] as const
|
||||
case "ShellThenRun":
|
||||
return [
|
||||
Effect.gen(function* () {
|
||||
yield* Deferred.fail(st.run.done, new Cancelled()).pipe(Effect.asVoid)
|
||||
yield* stopShell(st.shell)
|
||||
yield* idleIfCurrent()
|
||||
}),
|
||||
{ _tag: "Idle" } as const,
|
||||
] as const
|
||||
}
|
||||
}).pipe(Effect.flatten)
|
||||
|
||||
return {
|
||||
get state() {
|
||||
return state()
|
||||
},
|
||||
get busy() {
|
||||
return state()._tag !== "Idle"
|
||||
},
|
||||
ensureRunning,
|
||||
startShell,
|
||||
cancel,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -541,7 +541,7 @@ export namespace File {
|
||||
const exists = yield* appFs.existsSafe(full)
|
||||
if (!exists) return { type: "text" as const, content: "" }
|
||||
|
||||
const mimeType = Filesystem.mimeType(full)
|
||||
const mimeType = AppFileSystem.mimeType(full)
|
||||
const encode = knownText ? false : shouldEncode(mimeType)
|
||||
|
||||
if (encode && !isImage(mimeType)) return { type: "binary" as const, content: "", mimeType }
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import { DateTime, Effect, Layer, Semaphore, ServiceMap } from "effect"
|
||||
import { DateTime, Effect, Layer, Option, Semaphore, ServiceMap } from "effect"
|
||||
import { InstanceState } from "@/effect/instance-state"
|
||||
import { makeRuntime } from "@/effect/run-service"
|
||||
import { AppFileSystem } from "@/filesystem"
|
||||
import { Flag } from "@/flag/flag"
|
||||
import type { SessionID } from "@/session/schema"
|
||||
import { Filesystem } from "../util/filesystem"
|
||||
import { Log } from "../util/log"
|
||||
|
||||
export namespace FileTime {
|
||||
@@ -12,21 +12,9 @@ export namespace FileTime {
|
||||
export type Stamp = {
|
||||
readonly read: Date
|
||||
readonly mtime: number | undefined
|
||||
readonly ctime: number | undefined
|
||||
readonly size: number | undefined
|
||||
}
|
||||
|
||||
const stamp = Effect.fnUntraced(function* (file: string) {
|
||||
const stat = Filesystem.stat(file)
|
||||
const size = typeof stat?.size === "bigint" ? Number(stat.size) : stat?.size
|
||||
return {
|
||||
read: yield* DateTime.nowAsDate,
|
||||
mtime: stat?.mtime?.getTime(),
|
||||
ctime: stat?.ctime?.getTime(),
|
||||
size,
|
||||
}
|
||||
})
|
||||
|
||||
const session = (reads: Map<SessionID, Map<string, Stamp>>, sessionID: SessionID) => {
|
||||
const value = reads.get(sessionID)
|
||||
if (value) return value
|
||||
@@ -53,7 +41,17 @@ export namespace FileTime {
|
||||
export const layer = Layer.effect(
|
||||
Service,
|
||||
Effect.gen(function* () {
|
||||
const fsys = yield* AppFileSystem.Service
|
||||
const disableCheck = yield* Flag.OPENCODE_DISABLE_FILETIME_CHECK
|
||||
|
||||
const stamp = Effect.fnUntraced(function* (file: string) {
|
||||
const info = yield* fsys.stat(file).pipe(Effect.catch(() => Effect.succeed(undefined)))
|
||||
return {
|
||||
read: yield* DateTime.nowAsDate,
|
||||
mtime: info ? Option.getOrUndefined(info.mtime)?.getTime() : undefined,
|
||||
size: info ? Number(info.size) : undefined,
|
||||
}
|
||||
})
|
||||
const state = yield* InstanceState.make<State>(
|
||||
Effect.fn("FileTime.state")(() =>
|
||||
Effect.succeed({
|
||||
@@ -92,7 +90,7 @@ export namespace FileTime {
|
||||
if (!time) throw new Error(`You must read file ${filepath} before overwriting it. Use the Read tool first`)
|
||||
|
||||
const next = yield* stamp(filepath)
|
||||
const changed = next.mtime !== time.mtime || next.ctime !== time.ctime || next.size !== time.size
|
||||
const changed = next.mtime !== time.mtime || next.size !== time.size
|
||||
if (!changed) return
|
||||
|
||||
throw new Error(
|
||||
@@ -108,7 +106,9 @@ export namespace FileTime {
|
||||
}),
|
||||
).pipe(Layer.orDie)
|
||||
|
||||
const { runPromise } = makeRuntime(Service, layer)
|
||||
export const defaultLayer = layer.pipe(Layer.provide(AppFileSystem.defaultLayer))
|
||||
|
||||
const { runPromise } = makeRuntime(Service, defaultLayer)
|
||||
|
||||
export function read(sessionID: SessionID, file: string) {
|
||||
return runPromise((s) => s.read(sessionID, file))
|
||||
|
||||
@@ -14,19 +14,8 @@ import { Effect, Layer, ServiceMap, Stream } from "effect"
|
||||
import { InstanceState } from "@/effect/instance-state"
|
||||
import { makeRuntime } from "@/effect/run-service"
|
||||
import { errorMessage } from "@/util/error"
|
||||
import { Installation } from "@/installation"
|
||||
import {
|
||||
checkPluginCompatibility,
|
||||
isDeprecatedPlugin,
|
||||
parsePluginSpecifier,
|
||||
pluginSource,
|
||||
readPluginId,
|
||||
readV1Plugin,
|
||||
resolvePluginEntrypoint,
|
||||
resolvePluginId,
|
||||
resolvePluginTarget,
|
||||
type PluginSource,
|
||||
} from "./shared"
|
||||
import { PluginLoader } from "./loader"
|
||||
import { parsePluginSpecifier, readPluginId, readV1Plugin, resolvePluginId } from "./shared"
|
||||
|
||||
export namespace Plugin {
|
||||
const log = Log.create({ service: "plugin" })
|
||||
@@ -36,11 +25,7 @@ export namespace Plugin {
|
||||
}
|
||||
|
||||
type Loaded = {
|
||||
item: Config.PluginSpec
|
||||
spec: string
|
||||
target: string
|
||||
source: PluginSource
|
||||
mod: Record<string, unknown>
|
||||
row: PluginLoader.Loaded
|
||||
}
|
||||
|
||||
// Hook names that follow the (input, output) => Promise<void> trigger pattern
|
||||
@@ -93,91 +78,22 @@ export namespace Plugin {
|
||||
return result
|
||||
}
|
||||
|
||||
async function resolvePlugin(spec: string) {
|
||||
const parsed = parsePluginSpecifier(spec)
|
||||
const target = await resolvePluginTarget(spec, parsed).catch((err) => {
|
||||
const cause = err instanceof Error ? err.cause : err
|
||||
const detail = errorMessage(cause ?? err)
|
||||
log.error("failed to install plugin", { pkg: parsed.pkg, version: parsed.version, error: detail })
|
||||
Bus.publish(Session.Event.Error, {
|
||||
error: new NamedError.Unknown({
|
||||
message: `Failed to install plugin ${parsed.pkg}@${parsed.version}: ${detail}`,
|
||||
}).toObject(),
|
||||
})
|
||||
return ""
|
||||
})
|
||||
if (!target) return
|
||||
return target
|
||||
}
|
||||
|
||||
async function prepPlugin(item: Config.PluginSpec): Promise<Loaded | undefined> {
|
||||
const spec = Config.pluginSpecifier(item)
|
||||
if (isDeprecatedPlugin(spec)) return
|
||||
log.info("loading plugin", { path: spec })
|
||||
const resolved = await resolvePlugin(spec)
|
||||
if (!resolved) return
|
||||
|
||||
const source = pluginSource(spec)
|
||||
if (source === "npm") {
|
||||
const incompatible = await checkPluginCompatibility(resolved, Installation.VERSION)
|
||||
.then(() => false)
|
||||
.catch((err) => {
|
||||
const message = errorMessage(err)
|
||||
log.warn("plugin incompatible", { path: spec, error: message })
|
||||
Bus.publish(Session.Event.Error, {
|
||||
error: new NamedError.Unknown({
|
||||
message: `Plugin ${spec} skipped: ${message}`,
|
||||
}).toObject(),
|
||||
})
|
||||
return true
|
||||
})
|
||||
if (incompatible) return
|
||||
}
|
||||
|
||||
const target = resolved
|
||||
const entry = await resolvePluginEntrypoint(spec, target, "server").catch((err) => {
|
||||
const message = errorMessage(err)
|
||||
log.error("failed to resolve plugin server entry", { path: spec, target, error: message })
|
||||
Bus.publish(Session.Event.Error, {
|
||||
error: new NamedError.Unknown({
|
||||
message: `Failed to load plugin ${spec}: ${message}`,
|
||||
}).toObject(),
|
||||
})
|
||||
return
|
||||
})
|
||||
if (!entry) return
|
||||
|
||||
const mod = await import(entry).catch((err) => {
|
||||
const message = errorMessage(err)
|
||||
log.error("failed to load plugin", { path: spec, target: entry, error: message })
|
||||
Bus.publish(Session.Event.Error, {
|
||||
error: new NamedError.Unknown({
|
||||
message: `Failed to load plugin ${spec}: ${message}`,
|
||||
}).toObject(),
|
||||
})
|
||||
return
|
||||
})
|
||||
if (!mod) return
|
||||
|
||||
return {
|
||||
item,
|
||||
spec,
|
||||
target,
|
||||
source,
|
||||
mod,
|
||||
}
|
||||
}
|
||||
|
||||
async function applyPlugin(load: Loaded, input: PluginInput, hooks: Hooks[]) {
|
||||
const plugin = readV1Plugin(load.mod, load.spec, "server", "detect")
|
||||
const plugin = readV1Plugin(load.row.mod, load.row.spec, "server", "detect")
|
||||
if (plugin) {
|
||||
await resolvePluginId(load.source, load.spec, load.target, readPluginId(plugin.id, load.spec))
|
||||
hooks.push(await (plugin as PluginModule).server(input, Config.pluginOptions(load.item)))
|
||||
await resolvePluginId(
|
||||
load.row.source,
|
||||
load.row.spec,
|
||||
load.row.target,
|
||||
readPluginId(plugin.id, load.row.spec),
|
||||
load.row.pkg,
|
||||
)
|
||||
hooks.push(await (plugin as PluginModule).server(input, load.row.options))
|
||||
return
|
||||
}
|
||||
|
||||
for (const server of getLegacyPlugins(load.mod)) {
|
||||
hooks.push(await server(input, Config.pluginOptions(load.item)))
|
||||
for (const server of getLegacyPlugins(load.row.mod)) {
|
||||
hooks.push(await server(input, load.row.options))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +148,74 @@ export namespace Plugin {
|
||||
}
|
||||
if (plugins.length) yield* config.waitForDependencies()
|
||||
|
||||
const loaded = yield* Effect.promise(() => Promise.all(plugins.map((item) => prepPlugin(item))))
|
||||
const loaded = yield* Effect.promise(() =>
|
||||
Promise.all(
|
||||
plugins.map(async (item) => {
|
||||
const plan = PluginLoader.plan(item)
|
||||
if (plan.deprecated) return
|
||||
log.info("loading plugin", { path: plan.spec })
|
||||
|
||||
const resolved = await PluginLoader.resolve(plan, "server")
|
||||
if (!resolved.ok) {
|
||||
const cause =
|
||||
resolved.error instanceof Error ? (resolved.error.cause ?? resolved.error) : resolved.error
|
||||
const message = errorMessage(cause)
|
||||
|
||||
if (resolved.stage === "install") {
|
||||
const parsed = parsePluginSpecifier(plan.spec)
|
||||
log.error("failed to install plugin", {
|
||||
pkg: parsed.pkg,
|
||||
version: parsed.version,
|
||||
error: message,
|
||||
})
|
||||
Bus.publish(Session.Event.Error, {
|
||||
error: new NamedError.Unknown({
|
||||
message: `Failed to install plugin ${parsed.pkg}@${parsed.version}: ${message}`,
|
||||
}).toObject(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if (resolved.stage === "compatibility") {
|
||||
log.warn("plugin incompatible", { path: plan.spec, error: message })
|
||||
Bus.publish(Session.Event.Error, {
|
||||
error: new NamedError.Unknown({
|
||||
message: `Plugin ${plan.spec} skipped: ${message}`,
|
||||
}).toObject(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
log.error("failed to resolve plugin server entry", {
|
||||
path: plan.spec,
|
||||
error: message,
|
||||
})
|
||||
Bus.publish(Session.Event.Error, {
|
||||
error: new NamedError.Unknown({
|
||||
message: `Failed to load plugin ${plan.spec}: ${message}`,
|
||||
}).toObject(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const mod = await PluginLoader.load(resolved.value)
|
||||
if (!mod.ok) {
|
||||
const message = errorMessage(mod.error)
|
||||
log.error("failed to load plugin", { path: plan.spec, target: resolved.value.entry, error: message })
|
||||
Bus.publish(Session.Event.Error, {
|
||||
error: new NamedError.Unknown({
|
||||
message: `Failed to load plugin ${plan.spec}: ${message}`,
|
||||
}).toObject(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
return {
|
||||
row: mod.value,
|
||||
}
|
||||
}),
|
||||
),
|
||||
)
|
||||
for (const load of loaded) {
|
||||
if (!load) continue
|
||||
|
||||
@@ -242,14 +225,14 @@ export namespace Plugin {
|
||||
try: () => applyPlugin(load, input, hooks),
|
||||
catch: (err) => {
|
||||
const message = errorMessage(err)
|
||||
log.error("failed to load plugin", { path: load.spec, error: message })
|
||||
log.error("failed to load plugin", { path: load.row.spec, error: message })
|
||||
return message
|
||||
},
|
||||
}).pipe(
|
||||
Effect.catch((message) =>
|
||||
bus.publish(Session.Event.Error, {
|
||||
error: new NamedError.Unknown({
|
||||
message: `Failed to load plugin ${load.spec}: ${message}`,
|
||||
message: `Failed to load plugin ${load.row.spec}: ${message}`,
|
||||
}).toObject(),
|
||||
}),
|
||||
),
|
||||
|
||||
135
packages/opencode/src/plugin/loader.ts
Normal file
135
packages/opencode/src/plugin/loader.ts
Normal file
@@ -0,0 +1,135 @@
|
||||
import { Config } from "@/config/config"
|
||||
import { Installation } from "@/installation"
|
||||
import {
|
||||
checkPluginCompatibility,
|
||||
createPluginEntry,
|
||||
isDeprecatedPlugin,
|
||||
resolvePluginTarget,
|
||||
type PluginKind,
|
||||
type PluginPackage,
|
||||
type PluginSource,
|
||||
} from "./shared"
|
||||
|
||||
export namespace PluginLoader {
|
||||
export type Plan = {
|
||||
item: Config.PluginSpec
|
||||
spec: string
|
||||
options: Config.PluginOptions | undefined
|
||||
deprecated: boolean
|
||||
}
|
||||
|
||||
export type Resolved = Plan & {
|
||||
source: PluginSource
|
||||
target: string
|
||||
entry: string
|
||||
pkg?: PluginPackage
|
||||
}
|
||||
|
||||
export type Loaded = Resolved & {
|
||||
mod: Record<string, unknown>
|
||||
}
|
||||
|
||||
export function plan(item: Config.PluginSpec): Plan {
|
||||
const spec = Config.pluginSpecifier(item)
|
||||
return {
|
||||
item,
|
||||
spec,
|
||||
options: Config.pluginOptions(item),
|
||||
deprecated: isDeprecatedPlugin(spec),
|
||||
}
|
||||
}
|
||||
|
||||
export async function resolve(
|
||||
plan: Plan,
|
||||
kind: PluginKind,
|
||||
): Promise<
|
||||
{ ok: true; value: Resolved } | { ok: false; stage: "install" | "entry" | "compatibility"; error: unknown }
|
||||
> {
|
||||
let target = ""
|
||||
try {
|
||||
target = await resolvePluginTarget(plan.spec)
|
||||
} catch (error) {
|
||||
return {
|
||||
ok: false,
|
||||
stage: "install",
|
||||
error,
|
||||
}
|
||||
}
|
||||
if (!target) {
|
||||
return {
|
||||
ok: false,
|
||||
stage: "install",
|
||||
error: new Error(`Plugin ${plan.spec} target is empty`),
|
||||
}
|
||||
}
|
||||
|
||||
let base
|
||||
try {
|
||||
base = await createPluginEntry(plan.spec, target, kind)
|
||||
} catch (error) {
|
||||
return {
|
||||
ok: false,
|
||||
stage: "entry",
|
||||
error,
|
||||
}
|
||||
}
|
||||
|
||||
if (!base.entry) {
|
||||
return {
|
||||
ok: false,
|
||||
stage: "entry",
|
||||
error: new Error(`Plugin ${plan.spec} entry is empty`),
|
||||
}
|
||||
}
|
||||
|
||||
if (base.source === "npm") {
|
||||
try {
|
||||
await checkPluginCompatibility(base.target, Installation.VERSION, base.pkg)
|
||||
} catch (error) {
|
||||
return {
|
||||
ok: false,
|
||||
stage: "compatibility",
|
||||
error,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
ok: true,
|
||||
value: {
|
||||
...plan,
|
||||
source: base.source,
|
||||
target: base.target,
|
||||
entry: base.entry,
|
||||
pkg: base.pkg,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
export async function load(row: Resolved): Promise<{ ok: true; value: Loaded } | { ok: false; error: unknown }> {
|
||||
let mod
|
||||
try {
|
||||
mod = await import(row.entry)
|
||||
} catch (error) {
|
||||
return {
|
||||
ok: false,
|
||||
error,
|
||||
}
|
||||
}
|
||||
|
||||
if (!mod) {
|
||||
return {
|
||||
ok: false,
|
||||
error: new Error(`Plugin ${row.spec} module is empty`),
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
ok: true,
|
||||
value: {
|
||||
...row,
|
||||
mod,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -23,13 +23,25 @@ export type PluginSource = "file" | "npm"
|
||||
export type PluginKind = "server" | "tui"
|
||||
type PluginMode = "strict" | "detect"
|
||||
|
||||
export function pluginSource(spec: string): PluginSource {
|
||||
return spec.startsWith("file://") ? "file" : "npm"
|
||||
export type PluginPackage = {
|
||||
dir: string
|
||||
pkg: string
|
||||
json: Record<string, unknown>
|
||||
}
|
||||
|
||||
function hasEntrypoint(json: Record<string, unknown>, kind: PluginKind) {
|
||||
if (!isRecord(json.exports)) return false
|
||||
return `./${kind}` in json.exports
|
||||
export type PluginEntry = {
|
||||
spec: string
|
||||
source: PluginSource
|
||||
target: string
|
||||
pkg?: PluginPackage
|
||||
entry: string
|
||||
}
|
||||
|
||||
const INDEX_FILES = ["index.ts", "index.tsx", "index.js", "index.mjs", "index.cjs"]
|
||||
|
||||
export function pluginSource(spec: string): PluginSource {
|
||||
if (isPathPluginSpec(spec)) return "file"
|
||||
return "npm"
|
||||
}
|
||||
|
||||
function resolveExportPath(raw: string, dir: string) {
|
||||
@@ -48,26 +60,97 @@ function extractExportValue(value: unknown): string | undefined {
|
||||
return undefined
|
||||
}
|
||||
|
||||
export async function resolvePluginEntrypoint(spec: string, target: string, kind: PluginKind) {
|
||||
const pkg = await readPluginPackage(target).catch(() => undefined)
|
||||
if (!pkg) return target
|
||||
if (!hasEntrypoint(pkg.json, kind)) return target
|
||||
|
||||
const exports = pkg.json.exports
|
||||
if (!isRecord(exports)) return target
|
||||
const raw = extractExportValue(exports[`./${kind}`])
|
||||
if (!raw) return target
|
||||
function packageMain(pkg: PluginPackage) {
|
||||
const value = pkg.json.main
|
||||
if (typeof value !== "string") return
|
||||
const next = value.trim()
|
||||
if (!next) return
|
||||
return next
|
||||
}
|
||||
|
||||
function resolvePackagePath(spec: string, raw: string, kind: PluginKind, pkg: PluginPackage) {
|
||||
const resolved = resolveExportPath(raw, pkg.dir)
|
||||
const root = Filesystem.resolve(pkg.dir)
|
||||
const next = Filesystem.resolve(resolved)
|
||||
if (!Filesystem.contains(root, next)) {
|
||||
throw new Error(`Plugin ${spec} resolved ${kind} entry outside plugin directory`)
|
||||
}
|
||||
|
||||
return pathToFileURL(next).href
|
||||
}
|
||||
|
||||
function resolvePackageEntrypoint(spec: string, kind: PluginKind, pkg: PluginPackage) {
|
||||
const exports = pkg.json.exports
|
||||
if (isRecord(exports)) {
|
||||
const raw = extractExportValue(exports[`./${kind}`])
|
||||
if (raw) return resolvePackagePath(spec, raw, kind, pkg)
|
||||
}
|
||||
|
||||
if (kind !== "server") return
|
||||
const main = packageMain(pkg)
|
||||
if (!main) return
|
||||
return resolvePackagePath(spec, main, kind, pkg)
|
||||
}
|
||||
|
||||
function targetPath(target: string) {
|
||||
if (target.startsWith("file://")) return fileURLToPath(target)
|
||||
if (path.isAbsolute(target) || /^[A-Za-z]:[\\/]/.test(target)) return target
|
||||
}
|
||||
|
||||
async function resolveDirectoryIndex(dir: string) {
|
||||
for (const name of INDEX_FILES) {
|
||||
const file = path.join(dir, name)
|
||||
if (await Filesystem.exists(file)) return file
|
||||
}
|
||||
}
|
||||
|
||||
async function resolveTargetDirectory(target: string) {
|
||||
const file = targetPath(target)
|
||||
if (!file) return
|
||||
const stat = await Filesystem.stat(file)
|
||||
if (!stat?.isDirectory()) return
|
||||
return file
|
||||
}
|
||||
|
||||
async function resolvePluginEntrypoint(spec: string, target: string, kind: PluginKind, pkg?: PluginPackage) {
|
||||
const source = pluginSource(spec)
|
||||
const hit =
|
||||
pkg ?? (source === "npm" ? await readPluginPackage(target) : await readPluginPackage(target).catch(() => undefined))
|
||||
if (!hit) return target
|
||||
|
||||
const entry = resolvePackageEntrypoint(spec, kind, hit)
|
||||
if (entry) return entry
|
||||
|
||||
const dir = await resolveTargetDirectory(target)
|
||||
|
||||
if (kind === "tui") {
|
||||
if (source === "file" && dir) {
|
||||
const index = await resolveDirectoryIndex(dir)
|
||||
if (index) return pathToFileURL(index).href
|
||||
}
|
||||
|
||||
if (source === "npm") {
|
||||
throw new TypeError(`Plugin ${spec} must define package.json exports["./tui"]`)
|
||||
}
|
||||
|
||||
if (dir) {
|
||||
throw new TypeError(`Plugin ${spec} must define package.json exports["./tui"] or include index file`)
|
||||
}
|
||||
|
||||
return target
|
||||
}
|
||||
|
||||
if (dir && isRecord(hit.json.exports)) {
|
||||
if (source === "file") {
|
||||
const index = await resolveDirectoryIndex(dir)
|
||||
if (index) return pathToFileURL(index).href
|
||||
}
|
||||
|
||||
throw new TypeError(`Plugin ${spec} must define package.json exports["./server"] or package.json main`)
|
||||
}
|
||||
|
||||
return target
|
||||
}
|
||||
|
||||
export function isPathPluginSpec(spec: string) {
|
||||
return spec.startsWith("file://") || spec.startsWith(".") || path.isAbsolute(spec) || /^[A-Za-z]:[\\/]/.test(spec)
|
||||
}
|
||||
@@ -81,19 +164,21 @@ export async function resolvePathPluginTarget(spec: string) {
|
||||
return pathToFileURL(file).href
|
||||
}
|
||||
|
||||
const pkg = await Filesystem.readJson<Record<string, unknown>>(path.join(file, "package.json")).catch(() => undefined)
|
||||
if (!pkg) throw new Error(`Plugin directory ${file} is missing package.json`)
|
||||
if (typeof pkg.main !== "string" || !pkg.main.trim()) {
|
||||
throw new Error(`Plugin directory ${file} must define package.json main`)
|
||||
if (await Filesystem.exists(path.join(file, "package.json"))) {
|
||||
return pathToFileURL(file).href
|
||||
}
|
||||
return pathToFileURL(path.resolve(file, pkg.main)).href
|
||||
|
||||
const index = await resolveDirectoryIndex(file)
|
||||
if (index) return pathToFileURL(index).href
|
||||
|
||||
throw new Error(`Plugin directory ${file} is missing package.json or index file`)
|
||||
}
|
||||
|
||||
export async function checkPluginCompatibility(target: string, opencodeVersion: string) {
|
||||
export async function checkPluginCompatibility(target: string, opencodeVersion: string, pkg?: PluginPackage) {
|
||||
if (!semver.valid(opencodeVersion) || semver.major(opencodeVersion) === 0) return
|
||||
const pkg = await readPluginPackage(target).catch(() => undefined)
|
||||
if (!pkg) return
|
||||
const engines = pkg.json.engines
|
||||
const hit = pkg ?? (await readPluginPackage(target).catch(() => undefined))
|
||||
if (!hit) return
|
||||
const engines = hit.json.engines
|
||||
if (!isRecord(engines)) return
|
||||
const range = engines.opencode
|
||||
if (typeof range !== "string") return
|
||||
@@ -107,7 +192,7 @@ export async function resolvePluginTarget(spec: string, parsed = parsePluginSpec
|
||||
return BunProc.install(parsed.pkg, parsed.version)
|
||||
}
|
||||
|
||||
export async function readPluginPackage(target: string) {
|
||||
export async function readPluginPackage(target: string): Promise<PluginPackage> {
|
||||
const file = target.startsWith("file://") ? fileURLToPath(target) : target
|
||||
const stat = await Filesystem.stat(file)
|
||||
const dir = stat?.isDirectory() ? file : path.dirname(file)
|
||||
@@ -116,6 +201,20 @@ export async function readPluginPackage(target: string) {
|
||||
return { dir, pkg, json }
|
||||
}
|
||||
|
||||
export async function createPluginEntry(spec: string, target: string, kind: PluginKind): Promise<PluginEntry> {
|
||||
const source = pluginSource(spec)
|
||||
const pkg =
|
||||
source === "npm" ? await readPluginPackage(target) : await readPluginPackage(target).catch(() => undefined)
|
||||
const entry = await resolvePluginEntrypoint(spec, target, kind, pkg)
|
||||
return {
|
||||
spec,
|
||||
source,
|
||||
target,
|
||||
pkg,
|
||||
entry,
|
||||
}
|
||||
}
|
||||
|
||||
export function readPluginId(id: unknown, spec: string) {
|
||||
if (id === undefined) return
|
||||
if (typeof id !== "string") throw new TypeError(`Plugin ${spec} has invalid id type ${typeof id}`)
|
||||
@@ -158,15 +257,21 @@ export function readV1Plugin(
|
||||
return value
|
||||
}
|
||||
|
||||
export async function resolvePluginId(source: PluginSource, spec: string, target: string, id: string | undefined) {
|
||||
export async function resolvePluginId(
|
||||
source: PluginSource,
|
||||
spec: string,
|
||||
target: string,
|
||||
id: string | undefined,
|
||||
pkg?: PluginPackage,
|
||||
) {
|
||||
if (source === "file") {
|
||||
if (id) return id
|
||||
throw new TypeError(`Path plugin ${spec} must export id`)
|
||||
}
|
||||
if (id) return id
|
||||
const pkg = await readPluginPackage(target)
|
||||
if (typeof pkg.json.name !== "string" || !pkg.json.name.trim()) {
|
||||
throw new TypeError(`Plugin package ${pkg.pkg} is missing name`)
|
||||
const hit = pkg ?? (await readPluginPackage(target))
|
||||
if (typeof hit.json.name !== "string" || !hit.json.name.trim()) {
|
||||
throw new TypeError(`Plugin package ${hit.pkg} is missing name`)
|
||||
}
|
||||
return pkg.json.name.trim()
|
||||
return hit.json.name.trim()
|
||||
}
|
||||
|
||||
@@ -230,7 +230,7 @@ export namespace ProviderAuth {
|
||||
}),
|
||||
)
|
||||
|
||||
export const defaultLayer = layer.pipe(Layer.provide(Auth.layer))
|
||||
export const defaultLayer = layer.pipe(Layer.provide(Auth.defaultLayer))
|
||||
|
||||
const { runPromise } = makeRuntime(Service, defaultLayer)
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Provider } from "../provider/provider"
|
||||
import { NamedError } from "@opencode-ai/util/error"
|
||||
import { NotFoundError } from "../storage/db"
|
||||
import { Session } from "../session"
|
||||
import type { ContentfulStatusCode } from "hono/utils/http-status"
|
||||
import type { ErrorHandler } from "hono"
|
||||
import { HTTPException } from "hono/http-exception"
|
||||
@@ -20,6 +21,9 @@ export function errorHandler(log: Log.Logger): ErrorHandler {
|
||||
else status = 500
|
||||
return c.json(err.toObject(), { status })
|
||||
}
|
||||
if (err instanceof Session.BusyError) {
|
||||
return c.json(new NamedError.Unknown({ message: err.message }).toObject(), { status: 400 })
|
||||
}
|
||||
if (err instanceof HTTPException) return err.getResponse()
|
||||
const message = err instanceof Error && err.stack ? err.stack : err.toString()
|
||||
return c.json(new NamedError.Unknown({ message }).toObject(), {
|
||||
|
||||
@@ -381,7 +381,7 @@ export const SessionRoutes = lazy(() =>
|
||||
}),
|
||||
),
|
||||
async (c) => {
|
||||
SessionPrompt.cancel(c.req.valid("param").sessionID)
|
||||
await SessionPrompt.cancel(c.req.valid("param").sessionID)
|
||||
return c.json(true)
|
||||
},
|
||||
)
|
||||
@@ -699,7 +699,7 @@ export const SessionRoutes = lazy(() =>
|
||||
),
|
||||
async (c) => {
|
||||
const params = c.req.valid("param")
|
||||
SessionPrompt.assertNotBusy(params.sessionID)
|
||||
await SessionPrompt.assertNotBusy(params.sessionID)
|
||||
await Session.removeMessage({
|
||||
sessionID: params.sessionID,
|
||||
messageID: params.messageID,
|
||||
|
||||
@@ -15,7 +15,7 @@ import { Plugin } from "@/plugin"
|
||||
import { Config } from "@/config/config"
|
||||
import { NotFoundError } from "@/storage/db"
|
||||
import { ModelID, ProviderID } from "@/provider/schema"
|
||||
import { Cause, Effect, Exit, Layer, ServiceMap } from "effect"
|
||||
import { Effect, Layer, ServiceMap } from "effect"
|
||||
import { makeRuntime } from "@/effect/run-service"
|
||||
import { isOverflow as overflow } from "./overflow"
|
||||
|
||||
@@ -45,7 +45,6 @@ export namespace SessionCompaction {
|
||||
parentID: MessageID
|
||||
messages: MessageV2.WithParts[]
|
||||
sessionID: SessionID
|
||||
abort: AbortSignal
|
||||
auto: boolean
|
||||
overflow?: boolean
|
||||
}) => Effect.Effect<"continue" | "stop">
|
||||
@@ -135,20 +134,28 @@ export namespace SessionCompaction {
|
||||
parentID: MessageID
|
||||
messages: MessageV2.WithParts[]
|
||||
sessionID: SessionID
|
||||
abort: AbortSignal
|
||||
auto: boolean
|
||||
overflow?: boolean
|
||||
}) {
|
||||
const userMessage = input.messages.findLast((m) => m.info.id === input.parentID)!.info as MessageV2.User
|
||||
const parent = input.messages.findLast((m) => m.info.id === input.parentID)
|
||||
if (!parent || parent.info.role !== "user") {
|
||||
throw new Error(`Compaction parent must be a user message: ${input.parentID}`)
|
||||
}
|
||||
const userMessage = parent.info
|
||||
|
||||
let messages = input.messages
|
||||
let replay: MessageV2.WithParts | undefined
|
||||
let replay:
|
||||
| {
|
||||
info: MessageV2.User
|
||||
parts: MessageV2.Part[]
|
||||
}
|
||||
| undefined
|
||||
if (input.overflow) {
|
||||
const idx = input.messages.findIndex((m) => m.info.id === input.parentID)
|
||||
for (let i = idx - 1; i >= 0; i--) {
|
||||
const msg = input.messages[i]
|
||||
if (msg.info.role === "user" && !msg.parts.some((p) => p.type === "compaction")) {
|
||||
replay = msg
|
||||
replay = { info: msg.info, parts: msg.parts }
|
||||
messages = input.messages.slice(0, i)
|
||||
break
|
||||
}
|
||||
@@ -206,7 +213,7 @@ When constructing the summary, try to stick to this template:
|
||||
const msgs = structuredClone(messages)
|
||||
yield* plugin.trigger("experimental.chat.messages.transform", {}, { messages: msgs })
|
||||
const modelMessages = yield* Effect.promise(() => MessageV2.toModelMessages(msgs, model, { stripMedia: true }))
|
||||
const msg = (yield* session.updateMessage({
|
||||
const msg: MessageV2.Assistant = {
|
||||
id: MessageID.ascending(),
|
||||
role: "assistant",
|
||||
parentID: input.parentID,
|
||||
@@ -231,25 +238,17 @@ When constructing the summary, try to stick to this template:
|
||||
time: {
|
||||
created: Date.now(),
|
||||
},
|
||||
})) as MessageV2.Assistant
|
||||
}
|
||||
yield* session.updateMessage(msg)
|
||||
const processor = yield* processors.create({
|
||||
assistantMessage: msg,
|
||||
sessionID: input.sessionID,
|
||||
model,
|
||||
abort: input.abort,
|
||||
})
|
||||
const cancel = Effect.fn("SessionCompaction.cancel")(function* () {
|
||||
if (!input.abort.aborted || msg.time.completed) return
|
||||
msg.error = msg.error ?? new MessageV2.AbortedError({ message: "Aborted" }).toObject()
|
||||
msg.finish = msg.finish ?? "error"
|
||||
msg.time.completed = Date.now()
|
||||
yield* session.updateMessage(msg)
|
||||
})
|
||||
const result = yield* processor
|
||||
.process({
|
||||
user: userMessage,
|
||||
agent,
|
||||
abort: input.abort,
|
||||
sessionID: input.sessionID,
|
||||
tools: {},
|
||||
system: [],
|
||||
@@ -262,7 +261,7 @@ When constructing the summary, try to stick to this template:
|
||||
],
|
||||
model,
|
||||
})
|
||||
.pipe(Effect.ensuring(cancel()))
|
||||
.pipe(Effect.onInterrupt(() => processor.abort()))
|
||||
|
||||
if (result === "compact") {
|
||||
processor.message.error = new MessageV2.ContextOverflowError({
|
||||
@@ -277,7 +276,7 @@ When constructing the summary, try to stick to this template:
|
||||
|
||||
if (result === "continue" && input.auto) {
|
||||
if (replay) {
|
||||
const original = replay.info as MessageV2.User
|
||||
const original = replay.info
|
||||
const replayMsg = yield* session.updateMessage({
|
||||
id: MessageID.ascending(),
|
||||
role: "user",
|
||||
@@ -386,7 +385,7 @@ When constructing the summary, try to stick to this template:
|
||||
),
|
||||
)
|
||||
|
||||
const { runPromise, runPromiseExit } = makeRuntime(Service, defaultLayer)
|
||||
const { runPromise } = makeRuntime(Service, defaultLayer)
|
||||
|
||||
export async function isOverflow(input: { tokens: MessageV2.Assistant["tokens"]; model: Provider.Model }) {
|
||||
return runPromise((svc) => svc.isOverflow(input))
|
||||
@@ -396,21 +395,16 @@ When constructing the summary, try to stick to this template:
|
||||
return runPromise((svc) => svc.prune(input))
|
||||
}
|
||||
|
||||
export async function process(input: {
|
||||
parentID: MessageID
|
||||
messages: MessageV2.WithParts[]
|
||||
sessionID: SessionID
|
||||
abort: AbortSignal
|
||||
auto: boolean
|
||||
overflow?: boolean
|
||||
}) {
|
||||
const exit = await runPromiseExit((svc) => svc.process(input), { signal: input.abort })
|
||||
if (Exit.isFailure(exit)) {
|
||||
if (Cause.hasInterrupts(exit.cause) && input.abort.aborted) return "stop"
|
||||
throw Cause.squash(exit.cause)
|
||||
}
|
||||
return exit.value
|
||||
}
|
||||
export const process = fn(
|
||||
z.object({
|
||||
parentID: MessageID.zod,
|
||||
messages: z.custom<MessageV2.WithParts[]>(),
|
||||
sessionID: SessionID.zod,
|
||||
auto: z.boolean(),
|
||||
overflow: z.boolean().optional(),
|
||||
}),
|
||||
(input) => runPromise((svc) => svc.process(input)),
|
||||
)
|
||||
|
||||
export const create = fn(
|
||||
z.object({
|
||||
|
||||
@@ -334,14 +334,14 @@ export namespace Session {
|
||||
readonly messages: (input: { sessionID: SessionID; limit?: number }) => Effect.Effect<MessageV2.WithParts[]>
|
||||
readonly children: (parentID: SessionID) => Effect.Effect<Info[]>
|
||||
readonly remove: (sessionID: SessionID) => Effect.Effect<void>
|
||||
readonly updateMessage: (msg: MessageV2.Info) => Effect.Effect<MessageV2.Info>
|
||||
readonly updateMessage: <T extends MessageV2.Info>(msg: T) => Effect.Effect<T>
|
||||
readonly removeMessage: (input: { sessionID: SessionID; messageID: MessageID }) => Effect.Effect<MessageID>
|
||||
readonly removePart: (input: {
|
||||
sessionID: SessionID
|
||||
messageID: MessageID
|
||||
partID: PartID
|
||||
}) => Effect.Effect<PartID>
|
||||
readonly updatePart: (part: MessageV2.Part) => Effect.Effect<MessageV2.Part>
|
||||
readonly updatePart: <T extends MessageV2.Part>(part: T) => Effect.Effect<T>
|
||||
readonly updatePartDelta: (input: {
|
||||
sessionID: SessionID
|
||||
messageID: MessageID
|
||||
@@ -469,26 +469,23 @@ export namespace Session {
|
||||
}
|
||||
})
|
||||
|
||||
const updateMessage = Effect.fn("Session.updateMessage")(function* (msg: MessageV2.Info) {
|
||||
yield* Effect.sync(() =>
|
||||
SyncEvent.run(MessageV2.Event.Updated, {
|
||||
sessionID: msg.sessionID,
|
||||
info: msg,
|
||||
}),
|
||||
)
|
||||
return msg
|
||||
})
|
||||
const updateMessage = <T extends MessageV2.Info>(msg: T): Effect.Effect<T> =>
|
||||
Effect.gen(function* () {
|
||||
yield* Effect.sync(() => SyncEvent.run(MessageV2.Event.Updated, { sessionID: msg.sessionID, info: msg }))
|
||||
return msg
|
||||
}).pipe(Effect.withSpan("Session.updateMessage"))
|
||||
|
||||
const updatePart = Effect.fn("Session.updatePart")(function* (part: MessageV2.Part) {
|
||||
yield* Effect.sync(() =>
|
||||
SyncEvent.run(MessageV2.Event.PartUpdated, {
|
||||
sessionID: part.sessionID,
|
||||
part: structuredClone(part),
|
||||
time: Date.now(),
|
||||
}),
|
||||
)
|
||||
return part
|
||||
})
|
||||
const updatePart = <T extends MessageV2.Part>(part: T): Effect.Effect<T> =>
|
||||
Effect.gen(function* () {
|
||||
yield* Effect.sync(() =>
|
||||
SyncEvent.run(MessageV2.Event.PartUpdated, {
|
||||
sessionID: part.sessionID,
|
||||
part: structuredClone(part),
|
||||
time: Date.now(),
|
||||
}),
|
||||
)
|
||||
return part
|
||||
}).pipe(Effect.withSpan("Session.updatePart"))
|
||||
|
||||
const create = Effect.fn("Session.create")(function* (input?: {
|
||||
parentID?: SessionID
|
||||
@@ -851,7 +848,10 @@ export namespace Session {
|
||||
|
||||
export const children = fn(SessionID.zod, (id) => runPromise((svc) => svc.children(id)))
|
||||
export const remove = fn(SessionID.zod, (id) => runPromise((svc) => svc.remove(id)))
|
||||
export const updateMessage = fn(MessageV2.Info, (msg) => runPromise((svc) => svc.updateMessage(msg)))
|
||||
export async function updateMessage<T extends MessageV2.Info>(msg: T): Promise<T> {
|
||||
MessageV2.Info.parse(msg)
|
||||
return runPromise((svc) => svc.updateMessage(msg))
|
||||
}
|
||||
|
||||
export const removeMessage = fn(z.object({ sessionID: SessionID.zod, messageID: MessageID.zod }), (input) =>
|
||||
runPromise((svc) => svc.removeMessage(input)),
|
||||
@@ -862,7 +862,10 @@ export namespace Session {
|
||||
(input) => runPromise((svc) => svc.removePart(input)),
|
||||
)
|
||||
|
||||
export const updatePart = fn(MessageV2.Part, (part) => runPromise((svc) => svc.updatePart(part)))
|
||||
export async function updatePart<T extends MessageV2.Part>(part: T): Promise<T> {
|
||||
MessageV2.Part.parse(part)
|
||||
return runPromise((svc) => svc.updatePart(part))
|
||||
}
|
||||
|
||||
export const updatePartDelta = fn(
|
||||
z.object({
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Provider } from "@/provider/provider"
|
||||
import { Log } from "@/util/log"
|
||||
import { Effect, Layer, ServiceMap } from "effect"
|
||||
import { Cause, Effect, Layer, Record, ServiceMap } from "effect"
|
||||
import * as Queue from "effect/Queue"
|
||||
import * as Stream from "effect/Stream"
|
||||
import { streamText, wrapLanguageModel, type ModelMessage, type Tool, tool, jsonSchema } from "ai"
|
||||
import { mergeDeep, pipe } from "remeda"
|
||||
@@ -28,7 +29,6 @@ export namespace LLM {
|
||||
agent: Agent.Info
|
||||
permission?: Permission.Ruleset
|
||||
system: string[]
|
||||
abort: AbortSignal
|
||||
messages: ModelMessage[]
|
||||
small?: boolean
|
||||
tools: Record<string, Tool>
|
||||
@@ -36,6 +36,10 @@ export namespace LLM {
|
||||
toolChoice?: "auto" | "required" | "none"
|
||||
}
|
||||
|
||||
export type StreamRequest = StreamInput & {
|
||||
abort: AbortSignal
|
||||
}
|
||||
|
||||
export type Event = Awaited<ReturnType<typeof stream>>["fullStream"] extends AsyncIterable<infer T> ? T : never
|
||||
|
||||
export interface Interface {
|
||||
@@ -49,15 +53,32 @@ export namespace LLM {
|
||||
Effect.gen(function* () {
|
||||
return Service.of({
|
||||
stream(input) {
|
||||
return Stream.unwrap(
|
||||
Effect.promise(() => LLM.stream(input)).pipe(
|
||||
Effect.map((result) =>
|
||||
Stream.fromAsyncIterable(result.fullStream, (err) => err).pipe(
|
||||
Stream.mapEffect((event) => Effect.succeed(event)),
|
||||
),
|
||||
),
|
||||
const stream: Stream.Stream<Event, unknown> = Stream.scoped(
|
||||
Stream.unwrap(
|
||||
Effect.gen(function* () {
|
||||
const ctrl = yield* Effect.acquireRelease(
|
||||
Effect.sync(() => new AbortController()),
|
||||
(ctrl) => Effect.sync(() => ctrl.abort()),
|
||||
)
|
||||
const queue = yield* Queue.unbounded<Event, unknown | Cause.Done>()
|
||||
|
||||
yield* Effect.promise(async () => {
|
||||
const result = await LLM.stream({ ...input, abort: ctrl.signal })
|
||||
for await (const event of result.fullStream) {
|
||||
if (!Queue.offerUnsafe(queue, event)) break
|
||||
}
|
||||
Queue.endUnsafe(queue)
|
||||
}).pipe(
|
||||
Effect.catchCause((cause) => Effect.sync(() => void Queue.failCauseUnsafe(queue, cause))),
|
||||
Effect.onInterrupt(() => Effect.sync(() => ctrl.abort())),
|
||||
Effect.forkScoped,
|
||||
)
|
||||
|
||||
return Stream.fromQueue(queue)
|
||||
}),
|
||||
),
|
||||
)
|
||||
return stream
|
||||
},
|
||||
})
|
||||
}),
|
||||
@@ -65,7 +86,7 @@ export namespace LLM {
|
||||
|
||||
export const defaultLayer = layer
|
||||
|
||||
export async function stream(input: StreamInput) {
|
||||
export async function stream(input: StreamRequest) {
|
||||
const l = log
|
||||
.clone()
|
||||
.tag("providerID", input.model.providerID)
|
||||
@@ -152,7 +173,7 @@ export namespace LLM {
|
||||
"chat.params",
|
||||
{
|
||||
sessionID: input.sessionID,
|
||||
agent: input.agent,
|
||||
agent: input.agent.name,
|
||||
model: input.model,
|
||||
provider,
|
||||
message: input.user,
|
||||
@@ -171,7 +192,7 @@ export namespace LLM {
|
||||
"chat.headers",
|
||||
{
|
||||
sessionID: input.sessionID,
|
||||
agent: input.agent,
|
||||
agent: input.agent.name,
|
||||
model: input.model,
|
||||
provider,
|
||||
message: input.user,
|
||||
@@ -322,17 +343,12 @@ export namespace LLM {
|
||||
})
|
||||
}
|
||||
|
||||
async function resolveTools(input: Pick<StreamInput, "tools" | "agent" | "permission" | "user">) {
|
||||
function resolveTools(input: Pick<StreamInput, "tools" | "agent" | "permission" | "user">) {
|
||||
const disabled = Permission.disabled(
|
||||
Object.keys(input.tools),
|
||||
Permission.merge(input.agent.permission, input.permission ?? []),
|
||||
)
|
||||
for (const tool of Object.keys(input.tools)) {
|
||||
if (input.user.tools?.[tool] === false || disabled.has(tool)) {
|
||||
delete input.tools[tool]
|
||||
}
|
||||
}
|
||||
return input.tools
|
||||
return Record.filter(input.tools, (_, k) => input.user.tools?.[k] !== false && !disabled.has(k))
|
||||
}
|
||||
|
||||
// Check if messages contain any tool-call content
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import { Cause, Effect, Exit, Layer, ServiceMap } from "effect"
|
||||
import { Cause, Effect, Layer, ServiceMap } from "effect"
|
||||
import * as Stream from "effect/Stream"
|
||||
import { Agent } from "@/agent/agent"
|
||||
import { Bus } from "@/bus"
|
||||
import { makeRuntime } from "@/effect/run-service"
|
||||
import { Config } from "@/config/config"
|
||||
import { Permission } from "@/permission"
|
||||
import { Plugin } from "@/plugin"
|
||||
@@ -35,17 +34,10 @@ export namespace SessionProcessor {
|
||||
readonly process: (streamInput: LLM.StreamInput) => Effect.Effect<Result>
|
||||
}
|
||||
|
||||
export interface Info {
|
||||
readonly message: MessageV2.Assistant
|
||||
readonly partFromToolCall: (toolCallID: string) => MessageV2.ToolPart | undefined
|
||||
readonly process: (streamInput: LLM.StreamInput) => Promise<Result>
|
||||
}
|
||||
|
||||
type Input = {
|
||||
assistantMessage: MessageV2.Assistant
|
||||
sessionID: SessionID
|
||||
model: Provider.Model
|
||||
abort: AbortSignal
|
||||
}
|
||||
|
||||
export interface Interface {
|
||||
@@ -96,7 +88,6 @@ export namespace SessionProcessor {
|
||||
assistantMessage: input.assistantMessage,
|
||||
sessionID: input.sessionID,
|
||||
model: input.model,
|
||||
abort: input.abort,
|
||||
toolcalls: {},
|
||||
shouldBreak: false,
|
||||
snapshot: undefined,
|
||||
@@ -105,11 +96,12 @@ export namespace SessionProcessor {
|
||||
currentText: undefined,
|
||||
reasoningMap: {},
|
||||
}
|
||||
let aborted = false
|
||||
|
||||
const parse = (e: unknown) =>
|
||||
MessageV2.fromError(e, {
|
||||
providerID: input.model.providerID,
|
||||
aborted: input.abort.aborted,
|
||||
aborted,
|
||||
})
|
||||
|
||||
const handleEvent = Effect.fn("SessionProcessor.handleEvent")(function* (value: StreamEvent) {
|
||||
@@ -155,7 +147,10 @@ export namespace SessionProcessor {
|
||||
return
|
||||
|
||||
case "tool-input-start":
|
||||
ctx.toolcalls[value.id] = (yield* session.updatePart({
|
||||
if (ctx.assistantMessage.summary) {
|
||||
throw new Error(`Tool call not allowed while generating summary: ${value.toolName}`)
|
||||
}
|
||||
ctx.toolcalls[value.id] = yield* session.updatePart({
|
||||
id: ctx.toolcalls[value.id]?.id ?? PartID.ascending(),
|
||||
messageID: ctx.assistantMessage.id,
|
||||
sessionID: ctx.assistantMessage.sessionID,
|
||||
@@ -163,7 +158,7 @@ export namespace SessionProcessor {
|
||||
tool: value.toolName,
|
||||
callID: value.id,
|
||||
state: { status: "pending", input: {}, raw: "" },
|
||||
})) as MessageV2.ToolPart
|
||||
} satisfies MessageV2.ToolPart)
|
||||
return
|
||||
|
||||
case "tool-input-delta":
|
||||
@@ -173,14 +168,17 @@ export namespace SessionProcessor {
|
||||
return
|
||||
|
||||
case "tool-call": {
|
||||
if (ctx.assistantMessage.summary) {
|
||||
throw new Error(`Tool call not allowed while generating summary: ${value.toolName}`)
|
||||
}
|
||||
const match = ctx.toolcalls[value.toolCallId]
|
||||
if (!match) return
|
||||
ctx.toolcalls[value.toolCallId] = (yield* session.updatePart({
|
||||
ctx.toolcalls[value.toolCallId] = yield* session.updatePart({
|
||||
...match,
|
||||
tool: value.toolName,
|
||||
state: { status: "running", input: value.input, time: { start: Date.now() } },
|
||||
metadata: value.providerMetadata,
|
||||
})) as MessageV2.ToolPart
|
||||
} satisfies MessageV2.ToolPart)
|
||||
|
||||
const parts = yield* Effect.promise(() => MessageV2.parts(ctx.assistantMessage.id))
|
||||
const recentParts = parts.slice(-DOOM_LOOP_THRESHOLD)
|
||||
@@ -414,7 +412,7 @@ export namespace SessionProcessor {
|
||||
})
|
||||
|
||||
const halt = Effect.fn("SessionProcessor.halt")(function* (e: unknown) {
|
||||
log.error("process", { error: e, stack: JSON.stringify((e as any)?.stack) })
|
||||
log.error("process", { error: e, stack: e instanceof Error ? e.stack : undefined })
|
||||
const error = parse(e)
|
||||
if (MessageV2.ContextOverflowError.isInstance(error)) {
|
||||
ctx.needsCompaction = true
|
||||
@@ -429,59 +427,6 @@ export namespace SessionProcessor {
|
||||
yield* status.set(ctx.sessionID, { type: "idle" })
|
||||
})
|
||||
|
||||
const process = Effect.fn("SessionProcessor.process")(function* (streamInput: LLM.StreamInput) {
|
||||
log.info("process")
|
||||
ctx.needsCompaction = false
|
||||
ctx.shouldBreak = (yield* config.get()).experimental?.continue_loop_on_deny !== true
|
||||
|
||||
yield* Effect.gen(function* () {
|
||||
ctx.currentText = undefined
|
||||
ctx.reasoningMap = {}
|
||||
const stream = llm.stream(streamInput)
|
||||
|
||||
yield* stream.pipe(
|
||||
Stream.tap((event) =>
|
||||
Effect.gen(function* () {
|
||||
input.abort.throwIfAborted()
|
||||
yield* handleEvent(event)
|
||||
}),
|
||||
),
|
||||
Stream.takeUntil(() => ctx.needsCompaction),
|
||||
Stream.runDrain,
|
||||
)
|
||||
}).pipe(
|
||||
Effect.catchCauseIf(
|
||||
(cause) => !Cause.hasInterruptsOnly(cause),
|
||||
(cause) => Effect.fail(Cause.squash(cause)),
|
||||
),
|
||||
Effect.retry(
|
||||
SessionRetry.policy({
|
||||
parse,
|
||||
set: (info) =>
|
||||
status.set(ctx.sessionID, {
|
||||
type: "retry",
|
||||
attempt: info.attempt,
|
||||
message: info.message,
|
||||
next: info.next,
|
||||
}),
|
||||
}),
|
||||
),
|
||||
Effect.catchCause((cause) =>
|
||||
Cause.hasInterruptsOnly(cause)
|
||||
? halt(new DOMException("Aborted", "AbortError"))
|
||||
: halt(Cause.squash(cause)),
|
||||
),
|
||||
Effect.ensuring(cleanup()),
|
||||
)
|
||||
|
||||
if (input.abort.aborted && !ctx.assistantMessage.error) {
|
||||
yield* abort()
|
||||
}
|
||||
if (ctx.needsCompaction) return "compact"
|
||||
if (ctx.blocked || ctx.assistantMessage.error || input.abort.aborted) return "stop"
|
||||
return "continue"
|
||||
})
|
||||
|
||||
const abort = Effect.fn("SessionProcessor.abort")(() =>
|
||||
Effect.gen(function* () {
|
||||
if (!ctx.assistantMessage.error) {
|
||||
@@ -495,6 +440,53 @@ export namespace SessionProcessor {
|
||||
}),
|
||||
)
|
||||
|
||||
const process = Effect.fn("SessionProcessor.process")(function* (streamInput: LLM.StreamInput) {
|
||||
log.info("process")
|
||||
ctx.needsCompaction = false
|
||||
ctx.shouldBreak = (yield* config.get()).experimental?.continue_loop_on_deny !== true
|
||||
|
||||
return yield* Effect.gen(function* () {
|
||||
yield* Effect.gen(function* () {
|
||||
ctx.currentText = undefined
|
||||
ctx.reasoningMap = {}
|
||||
const stream = llm.stream(streamInput)
|
||||
|
||||
yield* stream.pipe(
|
||||
Stream.tap((event) => handleEvent(event)),
|
||||
Stream.takeUntil(() => ctx.needsCompaction),
|
||||
Stream.runDrain,
|
||||
)
|
||||
}).pipe(
|
||||
Effect.onInterrupt(() => Effect.sync(() => void (aborted = true))),
|
||||
Effect.catchCauseIf(
|
||||
(cause) => !Cause.hasInterruptsOnly(cause),
|
||||
(cause) => Effect.fail(Cause.squash(cause)),
|
||||
),
|
||||
Effect.retry(
|
||||
SessionRetry.policy({
|
||||
parse,
|
||||
set: (info) =>
|
||||
status.set(ctx.sessionID, {
|
||||
type: "retry",
|
||||
attempt: info.attempt,
|
||||
message: info.message,
|
||||
next: info.next,
|
||||
}),
|
||||
}),
|
||||
),
|
||||
Effect.catch(halt),
|
||||
Effect.ensuring(cleanup()),
|
||||
)
|
||||
|
||||
if (aborted && !ctx.assistantMessage.error) {
|
||||
yield* abort()
|
||||
}
|
||||
if (ctx.needsCompaction) return "compact"
|
||||
if (ctx.blocked || ctx.assistantMessage.error || aborted) return "stop"
|
||||
return "continue"
|
||||
}).pipe(Effect.onInterrupt(() => abort().pipe(Effect.asVoid)))
|
||||
})
|
||||
|
||||
return {
|
||||
get message() {
|
||||
return ctx.assistantMessage
|
||||
@@ -526,29 +518,4 @@ export namespace SessionProcessor {
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
const { runPromise } = makeRuntime(Service, defaultLayer)
|
||||
|
||||
export async function create(input: Input): Promise<Info> {
|
||||
const hit = await runPromise((svc) => svc.create(input))
|
||||
return {
|
||||
get message() {
|
||||
return hit.message
|
||||
},
|
||||
partFromToolCall(toolCallID: string) {
|
||||
return hit.partFromToolCall(toolCallID)
|
||||
},
|
||||
async process(streamInput: LLM.StreamInput) {
|
||||
const exit = await Effect.runPromiseExit(hit.process(streamInput), { signal: input.abort })
|
||||
if (Exit.isFailure(exit)) {
|
||||
if (Cause.hasInterrupts(exit.cause) && input.abort.aborted) {
|
||||
await Effect.runPromise(hit.abort())
|
||||
return "stop"
|
||||
}
|
||||
throw Cause.squash(exit.cause)
|
||||
}
|
||||
return exit.value
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -21,7 +21,7 @@ export namespace SessionRevert {
|
||||
export type RevertInput = z.infer<typeof RevertInput>
|
||||
|
||||
export async function revert(input: RevertInput) {
|
||||
SessionPrompt.assertNotBusy(input.sessionID)
|
||||
await SessionPrompt.assertNotBusy(input.sessionID)
|
||||
const all = await Session.messages({ sessionID: input.sessionID })
|
||||
let lastUser: MessageV2.User | undefined
|
||||
const session = await Session.get(input.sessionID)
|
||||
@@ -80,7 +80,7 @@ export namespace SessionRevert {
|
||||
|
||||
export async function unrevert(input: { sessionID: SessionID }) {
|
||||
log.info("unreverting", input)
|
||||
SessionPrompt.assertNotBusy(input.sessionID)
|
||||
await SessionPrompt.assertNotBusy(input.sessionID)
|
||||
const session = await Session.get(input.sessionID)
|
||||
if (!session.revert) return session
|
||||
if (session.revert.snapshot) await Snapshot.restore(session.revert.snapshot)
|
||||
@@ -92,12 +92,10 @@ export namespace SessionRevert {
|
||||
const sessionID = session.id
|
||||
const msgs = await Session.messages({ sessionID })
|
||||
const messageID = session.revert.messageID
|
||||
const preserve = [] as MessageV2.WithParts[]
|
||||
const remove = [] as MessageV2.WithParts[]
|
||||
let target: MessageV2.WithParts | undefined
|
||||
for (const msg of msgs) {
|
||||
if (msg.info.id < messageID) {
|
||||
preserve.push(msg)
|
||||
continue
|
||||
}
|
||||
if (msg.info.id > messageID) {
|
||||
@@ -105,7 +103,6 @@ export namespace SessionRevert {
|
||||
continue
|
||||
}
|
||||
if (session.revert.partID) {
|
||||
preserve.push(msg)
|
||||
target = msg
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ import z from "zod"
|
||||
import { Session } from "."
|
||||
|
||||
import { MessageV2 } from "./message-v2"
|
||||
import { Identifier } from "@/id/id"
|
||||
import { SessionID, MessageID } from "./schema"
|
||||
import { Snapshot } from "@/snapshot"
|
||||
|
||||
@@ -110,8 +109,8 @@ export namespace SessionSummary {
|
||||
(m) => m.info.id === input.messageID || (m.info.role === "assistant" && m.info.parentID === input.messageID),
|
||||
)
|
||||
const msgWithParts = messages.find((m) => m.info.id === input.messageID)
|
||||
if (!msgWithParts) return
|
||||
const userMsg = msgWithParts.info as MessageV2.User
|
||||
if (!msgWithParts || msgWithParts.info.role !== "user") return
|
||||
const userMsg = msgWithParts.info
|
||||
const diffs = await computeDiff({ messages })
|
||||
userMsg.summary = {
|
||||
...userMsg.summary,
|
||||
|
||||
@@ -11,7 +11,7 @@ import { makeRuntime } from "@/effect/run-service"
|
||||
import { Flag } from "@/flag/flag"
|
||||
import { Global } from "@/global"
|
||||
import { Permission } from "@/permission"
|
||||
import { Filesystem } from "@/util/filesystem"
|
||||
import { AppFileSystem } from "@/filesystem"
|
||||
import { Config } from "../config/config"
|
||||
import { ConfigMarkdown } from "../config/markdown"
|
||||
import { Glob } from "../util/glob"
|
||||
@@ -139,28 +139,20 @@ export namespace Skill {
|
||||
config: Config.Interface,
|
||||
discovery: Discovery.Interface,
|
||||
bus: Bus.Interface,
|
||||
fsys: AppFileSystem.Interface,
|
||||
directory: string,
|
||||
worktree: string,
|
||||
) {
|
||||
if (!Flag.OPENCODE_DISABLE_EXTERNAL_SKILLS) {
|
||||
for (const dir of EXTERNAL_DIRS) {
|
||||
const root = path.join(Global.Path.home, dir)
|
||||
const isDir = yield* Effect.promise(() => Filesystem.isDir(root))
|
||||
if (!isDir) continue
|
||||
if (!(yield* fsys.isDir(root))) continue
|
||||
yield* scan(state, bus, root, EXTERNAL_SKILL_PATTERN, { dot: true, scope: "global" })
|
||||
}
|
||||
|
||||
const upDirs = yield* Effect.promise(async () => {
|
||||
const dirs: string[] = []
|
||||
for await (const root of Filesystem.up({
|
||||
targets: EXTERNAL_DIRS,
|
||||
start: directory,
|
||||
stop: worktree,
|
||||
})) {
|
||||
dirs.push(root)
|
||||
}
|
||||
return dirs
|
||||
})
|
||||
const upDirs = yield* fsys
|
||||
.up({ targets: EXTERNAL_DIRS, start: directory, stop: worktree })
|
||||
.pipe(Effect.catch(() => Effect.succeed([] as string[])))
|
||||
|
||||
for (const root of upDirs) {
|
||||
yield* scan(state, bus, root, EXTERNAL_SKILL_PATTERN, { dot: true, scope: "project" })
|
||||
@@ -176,8 +168,7 @@ export namespace Skill {
|
||||
for (const item of cfg.skills?.paths ?? []) {
|
||||
const expanded = item.startsWith("~/") ? path.join(os.homedir(), item.slice(2)) : item
|
||||
const dir = path.isAbsolute(expanded) ? expanded : path.join(directory, expanded)
|
||||
const isDir = yield* Effect.promise(() => Filesystem.isDir(dir))
|
||||
if (!isDir) {
|
||||
if (!(yield* fsys.isDir(dir))) {
|
||||
log.warn("skill path not found", { path: dir })
|
||||
continue
|
||||
}
|
||||
@@ -198,16 +189,17 @@ export namespace Skill {
|
||||
|
||||
export class Service extends ServiceMap.Service<Service, Interface>()("@opencode/Skill") {}
|
||||
|
||||
export const layer: Layer.Layer<Service, never, Discovery.Service | Config.Service | Bus.Service> = Layer.effect(
|
||||
export const layer = Layer.effect(
|
||||
Service,
|
||||
Effect.gen(function* () {
|
||||
const discovery = yield* Discovery.Service
|
||||
const config = yield* Config.Service
|
||||
const bus = yield* Bus.Service
|
||||
const fsys = yield* AppFileSystem.Service
|
||||
const state = yield* InstanceState.make(
|
||||
Effect.fn("Skill.state")(function* (ctx) {
|
||||
const s: State = { skills: {}, dirs: new Set() }
|
||||
yield* loadSkills(s, config, discovery, bus, ctx.directory, ctx.worktree)
|
||||
yield* loadSkills(s, config, discovery, bus, fsys, ctx.directory, ctx.worktree)
|
||||
return s
|
||||
}),
|
||||
)
|
||||
@@ -238,10 +230,11 @@ export namespace Skill {
|
||||
}),
|
||||
)
|
||||
|
||||
export const defaultLayer: Layer.Layer<Service> = layer.pipe(
|
||||
export const defaultLayer = layer.pipe(
|
||||
Layer.provide(Discovery.defaultLayer),
|
||||
Layer.provide(Config.defaultLayer),
|
||||
Layer.provide(Bus.layer),
|
||||
Layer.provide(AppFileSystem.defaultLayer),
|
||||
)
|
||||
|
||||
export function fmt(list: Info[], opts: { verbose: boolean }) {
|
||||
|
||||
@@ -46,12 +46,12 @@ export namespace ToolRegistry {
|
||||
readonly tools: (
|
||||
model: { providerID: ProviderID; modelID: ModelID },
|
||||
agent?: Agent.Info,
|
||||
) => Effect.Effect<(Awaited<ReturnType<Tool.Info["init"]>> & { id: string })[]>
|
||||
) => Effect.Effect<(Tool.Def & { id: string })[]>
|
||||
}
|
||||
|
||||
export class Service extends ServiceMap.Service<Service, Interface>()("@opencode/ToolRegistry") {}
|
||||
|
||||
export const layer = Layer.effect(
|
||||
export const layer: Layer.Layer<Service, never, Config.Service | Plugin.Service> = Layer.effect(
|
||||
Service,
|
||||
Effect.gen(function* () {
|
||||
const config = yield* Config.Service
|
||||
@@ -174,7 +174,7 @@ export namespace ToolRegistry {
|
||||
})
|
||||
return yield* Effect.forEach(
|
||||
filtered,
|
||||
Effect.fnUntraced(function* (tool) {
|
||||
Effect.fnUntraced(function* (tool: Tool.Info) {
|
||||
using _ = log.time(tool.id)
|
||||
const next = yield* Effect.promise(() => tool.init({ agent }))
|
||||
const output = {
|
||||
@@ -184,10 +184,11 @@ export namespace ToolRegistry {
|
||||
yield* plugin.trigger("tool.definition", { toolID: tool.id }, output)
|
||||
return {
|
||||
id: tool.id,
|
||||
...next,
|
||||
description: output.description,
|
||||
parameters: output.parameters,
|
||||
} as Awaited<ReturnType<Tool.Info["init"]>> & { id: string }
|
||||
execute: next.execute,
|
||||
formatValidationError: next.formatValidationError,
|
||||
}
|
||||
}),
|
||||
{ concurrency: "unbounded" },
|
||||
)
|
||||
@@ -217,7 +218,7 @@ export namespace ToolRegistry {
|
||||
modelID: ModelID
|
||||
},
|
||||
agent?: Agent.Info,
|
||||
): Promise<(Awaited<ReturnType<Tool.Info["init"]>> & { id: string })[]> {
|
||||
): Promise<(Tool.Def & { id: string })[]> {
|
||||
return runPromise((svc) => svc.tools(model, agent))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,22 +25,24 @@ export namespace Tool {
|
||||
metadata(input: { title?: string; metadata?: M }): void
|
||||
ask(input: Omit<Permission.Request, "id" | "sessionID" | "tool">): Promise<void>
|
||||
}
|
||||
export interface Def<Parameters extends z.ZodType = z.ZodType, M extends Metadata = Metadata> {
|
||||
description: string
|
||||
parameters: Parameters
|
||||
execute(
|
||||
args: z.infer<Parameters>,
|
||||
ctx: Context,
|
||||
): Promise<{
|
||||
title: string
|
||||
metadata: M
|
||||
output: string
|
||||
attachments?: Omit<MessageV2.FilePart, "id" | "sessionID" | "messageID">[]
|
||||
}>
|
||||
formatValidationError?(error: z.ZodError): string
|
||||
}
|
||||
|
||||
export interface Info<Parameters extends z.ZodType = z.ZodType, M extends Metadata = Metadata> {
|
||||
id: string
|
||||
init: (ctx?: InitContext) => Promise<{
|
||||
description: string
|
||||
parameters: Parameters
|
||||
execute(
|
||||
args: z.infer<Parameters>,
|
||||
ctx: Context,
|
||||
): Promise<{
|
||||
title: string
|
||||
metadata: M
|
||||
output: string
|
||||
attachments?: Omit<MessageV2.FilePart, "id" | "sessionID" | "messageID">[]
|
||||
}>
|
||||
formatValidationError?(error: z.ZodError): string
|
||||
}>
|
||||
init: (ctx?: InitContext) => Promise<Def<Parameters, M>>
|
||||
}
|
||||
|
||||
export type InferParameters<T extends Info> = T extends Info<infer P> ? z.infer<P> : never
|
||||
@@ -48,7 +50,7 @@ export namespace Tool {
|
||||
|
||||
export function define<Parameters extends z.ZodType, Result extends Metadata>(
|
||||
id: string,
|
||||
init: Info<Parameters, Result>["init"] | Awaited<ReturnType<Info<Parameters, Result>["init"]>>,
|
||||
init: Info<Parameters, Result>["init"] | Def<Parameters, Result>,
|
||||
): Info<Parameters, Result> {
|
||||
return {
|
||||
id,
|
||||
|
||||
@@ -33,7 +33,7 @@ test("adds tui plugin at runtime from spec", async () => {
|
||||
process.env.OPENCODE_PLUGIN_META_FILE = path.join(tmp.path, "plugin-meta.json")
|
||||
const get = spyOn(TuiConfig, "get").mockResolvedValue({
|
||||
plugin: [],
|
||||
plugin_meta: undefined,
|
||||
plugin_records: undefined,
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
|
||||
@@ -48,7 +48,7 @@ test("installs plugin without loading it", async () => {
|
||||
process.env.OPENCODE_PLUGIN_META_FILE = path.join(tmp.path, "plugin-meta.json")
|
||||
let cfg: Awaited<ReturnType<typeof TuiConfig.get>> = {
|
||||
plugin: [],
|
||||
plugin_meta: undefined,
|
||||
plugin_records: undefined,
|
||||
}
|
||||
const get = spyOn(TuiConfig, "get").mockImplementation(async () => cfg)
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
@@ -68,12 +68,13 @@ test("installs plugin without loading it", async () => {
|
||||
await TuiPluginRuntime.init(api)
|
||||
cfg = {
|
||||
plugin: [[tmp.extra.spec, { marker: tmp.extra.marker }]],
|
||||
plugin_meta: {
|
||||
[tmp.extra.spec]: {
|
||||
plugin_records: [
|
||||
{
|
||||
item: [tmp.extra.spec, { marker: tmp.extra.marker }],
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
const out = await TuiPluginRuntime.installPlugin(tmp.extra.spec)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { expect, spyOn, test } from "bun:test"
|
||||
import fs from "fs/promises"
|
||||
import path from "path"
|
||||
import { pathToFileURL } from "url"
|
||||
import { tmpdir } from "../../fixture/fixture"
|
||||
import { createTuiPluginApi } from "../../fixture/tui-plugin"
|
||||
import { TuiConfig } from "../../../src/config/tui"
|
||||
@@ -45,9 +46,13 @@ test("loads npm tui plugin from package ./tui export", async () => {
|
||||
process.env.OPENCODE_PLUGIN_META_FILE = path.join(tmp.path, "plugin-meta.json")
|
||||
const get = spyOn(TuiConfig, "get").mockResolvedValue({
|
||||
plugin: [[tmp.extra.spec, { marker: tmp.extra.marker }]],
|
||||
plugin_meta: {
|
||||
[tmp.extra.spec]: { scope: "local", source: path.join(tmp.path, "tui.json") },
|
||||
},
|
||||
plugin_records: [
|
||||
{
|
||||
item: [tmp.extra.spec, { marker: tmp.extra.marker }],
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
],
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
@@ -70,6 +75,65 @@ test("loads npm tui plugin from package ./tui export", async () => {
|
||||
}
|
||||
})
|
||||
|
||||
test("does not use npm package exports dot for tui entry", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
const mod = path.join(dir, "mods", "acme-plugin")
|
||||
const marker = path.join(dir, "dot-called.txt")
|
||||
await fs.mkdir(mod, { recursive: true })
|
||||
|
||||
await Bun.write(
|
||||
path.join(mod, "package.json"),
|
||||
JSON.stringify({
|
||||
name: "acme-plugin",
|
||||
type: "module",
|
||||
exports: { ".": "./index.js" },
|
||||
}),
|
||||
)
|
||||
await Bun.write(
|
||||
path.join(mod, "index.js"),
|
||||
`export default {
|
||||
id: "demo.dot",
|
||||
tui: async () => {
|
||||
await Bun.write(${JSON.stringify(marker)}, "called")
|
||||
},
|
||||
}
|
||||
`,
|
||||
)
|
||||
|
||||
return { mod, marker, spec: "acme-plugin@1.0.0" }
|
||||
},
|
||||
})
|
||||
|
||||
process.env.OPENCODE_PLUGIN_META_FILE = path.join(tmp.path, "plugin-meta.json")
|
||||
const get = spyOn(TuiConfig, "get").mockResolvedValue({
|
||||
plugin: [tmp.extra.spec],
|
||||
plugin_records: [
|
||||
{
|
||||
item: tmp.extra.spec,
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
],
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
const install = spyOn(BunProc, "install").mockResolvedValue(tmp.extra.mod)
|
||||
|
||||
try {
|
||||
await TuiPluginRuntime.init(createTuiPluginApi())
|
||||
await expect(fs.readFile(tmp.extra.marker, "utf8")).rejects.toThrow()
|
||||
expect(TuiPluginRuntime.list().some((item) => item.spec === tmp.extra.spec)).toBe(false)
|
||||
} finally {
|
||||
await TuiPluginRuntime.dispose()
|
||||
install.mockRestore()
|
||||
cwd.mockRestore()
|
||||
get.mockRestore()
|
||||
wait.mockRestore()
|
||||
delete process.env.OPENCODE_PLUGIN_META_FILE
|
||||
}
|
||||
})
|
||||
|
||||
test("rejects npm tui export that resolves outside plugin directory", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
@@ -107,9 +171,13 @@ test("rejects npm tui export that resolves outside plugin directory", async () =
|
||||
process.env.OPENCODE_PLUGIN_META_FILE = path.join(tmp.path, "plugin-meta.json")
|
||||
const get = spyOn(TuiConfig, "get").mockResolvedValue({
|
||||
plugin: [tmp.extra.spec],
|
||||
plugin_meta: {
|
||||
[tmp.extra.spec]: { scope: "local", source: path.join(tmp.path, "tui.json") },
|
||||
},
|
||||
plugin_records: [
|
||||
{
|
||||
item: tmp.extra.spec,
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
],
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
@@ -166,9 +234,13 @@ test("rejects npm tui plugin that exports server and tui together", async () =>
|
||||
process.env.OPENCODE_PLUGIN_META_FILE = path.join(tmp.path, "plugin-meta.json")
|
||||
const get = spyOn(TuiConfig, "get").mockResolvedValue({
|
||||
plugin: [tmp.extra.spec],
|
||||
plugin_meta: {
|
||||
[tmp.extra.spec]: { scope: "local", source: path.join(tmp.path, "tui.json") },
|
||||
},
|
||||
plugin_records: [
|
||||
{
|
||||
item: tmp.extra.spec,
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
],
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
@@ -187,3 +259,228 @@ test("rejects npm tui plugin that exports server and tui together", async () =>
|
||||
delete process.env.OPENCODE_PLUGIN_META_FILE
|
||||
}
|
||||
})
|
||||
|
||||
test("does not use npm package main for tui entry", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
const mod = path.join(dir, "mods", "acme-plugin")
|
||||
const marker = path.join(dir, "main-called.txt")
|
||||
await fs.mkdir(mod, { recursive: true })
|
||||
|
||||
await Bun.write(
|
||||
path.join(mod, "package.json"),
|
||||
JSON.stringify({
|
||||
name: "acme-plugin",
|
||||
type: "module",
|
||||
main: "./index.js",
|
||||
}),
|
||||
)
|
||||
await Bun.write(
|
||||
path.join(mod, "index.js"),
|
||||
`export default {
|
||||
id: "demo.main",
|
||||
tui: async () => {
|
||||
await Bun.write(${JSON.stringify(marker)}, "called")
|
||||
},
|
||||
}
|
||||
`,
|
||||
)
|
||||
|
||||
return { mod, marker, spec: "acme-plugin@1.0.0" }
|
||||
},
|
||||
})
|
||||
|
||||
process.env.OPENCODE_PLUGIN_META_FILE = path.join(tmp.path, "plugin-meta.json")
|
||||
const get = spyOn(TuiConfig, "get").mockResolvedValue({
|
||||
plugin: [tmp.extra.spec],
|
||||
plugin_records: [
|
||||
{
|
||||
item: tmp.extra.spec,
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
],
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
const install = spyOn(BunProc, "install").mockResolvedValue(tmp.extra.mod)
|
||||
|
||||
try {
|
||||
await TuiPluginRuntime.init(createTuiPluginApi())
|
||||
await expect(fs.readFile(tmp.extra.marker, "utf8")).rejects.toThrow()
|
||||
expect(TuiPluginRuntime.list().some((item) => item.spec === tmp.extra.spec)).toBe(false)
|
||||
} finally {
|
||||
await TuiPluginRuntime.dispose()
|
||||
install.mockRestore()
|
||||
cwd.mockRestore()
|
||||
get.mockRestore()
|
||||
wait.mockRestore()
|
||||
delete process.env.OPENCODE_PLUGIN_META_FILE
|
||||
}
|
||||
})
|
||||
|
||||
test("does not use directory package main for tui entry", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
const mod = path.join(dir, "mods", "dir-plugin")
|
||||
const spec = pathToFileURL(mod).href
|
||||
const marker = path.join(dir, "dir-main-called.txt")
|
||||
await fs.mkdir(mod, { recursive: true })
|
||||
|
||||
await Bun.write(
|
||||
path.join(mod, "package.json"),
|
||||
JSON.stringify({
|
||||
name: "dir-plugin",
|
||||
type: "module",
|
||||
main: "./main.js",
|
||||
}),
|
||||
)
|
||||
await Bun.write(
|
||||
path.join(mod, "main.js"),
|
||||
`export default {
|
||||
id: "demo.dir.main",
|
||||
tui: async () => {
|
||||
await Bun.write(${JSON.stringify(marker)}, "called")
|
||||
},
|
||||
}
|
||||
`,
|
||||
)
|
||||
|
||||
return { marker, spec }
|
||||
},
|
||||
})
|
||||
|
||||
process.env.OPENCODE_PLUGIN_META_FILE = path.join(tmp.path, "plugin-meta.json")
|
||||
const get = spyOn(TuiConfig, "get").mockResolvedValue({
|
||||
plugin: [tmp.extra.spec],
|
||||
plugin_records: [
|
||||
{
|
||||
item: tmp.extra.spec,
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
],
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
|
||||
try {
|
||||
await TuiPluginRuntime.init(createTuiPluginApi())
|
||||
await expect(fs.readFile(tmp.extra.marker, "utf8")).rejects.toThrow()
|
||||
expect(TuiPluginRuntime.list().some((item) => item.spec === tmp.extra.spec)).toBe(false)
|
||||
} finally {
|
||||
await TuiPluginRuntime.dispose()
|
||||
cwd.mockRestore()
|
||||
get.mockRestore()
|
||||
wait.mockRestore()
|
||||
delete process.env.OPENCODE_PLUGIN_META_FILE
|
||||
}
|
||||
})
|
||||
|
||||
test("uses directory index fallback for tui when package.json is missing", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
const mod = path.join(dir, "mods", "dir-index")
|
||||
const spec = pathToFileURL(mod).href
|
||||
const marker = path.join(dir, "dir-index-called.txt")
|
||||
await fs.mkdir(mod, { recursive: true })
|
||||
await Bun.write(
|
||||
path.join(mod, "index.ts"),
|
||||
`export default {
|
||||
id: "demo.dir.index",
|
||||
tui: async () => {
|
||||
await Bun.write(${JSON.stringify(marker)}, "called")
|
||||
},
|
||||
}
|
||||
`,
|
||||
)
|
||||
return { marker, spec }
|
||||
},
|
||||
})
|
||||
|
||||
process.env.OPENCODE_PLUGIN_META_FILE = path.join(tmp.path, "plugin-meta.json")
|
||||
const get = spyOn(TuiConfig, "get").mockResolvedValue({
|
||||
plugin: [tmp.extra.spec],
|
||||
plugin_records: [
|
||||
{
|
||||
item: tmp.extra.spec,
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
],
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
|
||||
try {
|
||||
await TuiPluginRuntime.init(createTuiPluginApi())
|
||||
await expect(fs.readFile(tmp.extra.marker, "utf8")).resolves.toBe("called")
|
||||
expect(TuiPluginRuntime.list().find((item) => item.id === "demo.dir.index")?.active).toBe(true)
|
||||
} finally {
|
||||
await TuiPluginRuntime.dispose()
|
||||
cwd.mockRestore()
|
||||
get.mockRestore()
|
||||
wait.mockRestore()
|
||||
delete process.env.OPENCODE_PLUGIN_META_FILE
|
||||
}
|
||||
})
|
||||
|
||||
test("uses npm package name when tui plugin id is omitted", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
const mod = path.join(dir, "mods", "acme-plugin")
|
||||
const marker = path.join(dir, "name-id-called.txt")
|
||||
await fs.mkdir(mod, { recursive: true })
|
||||
|
||||
await Bun.write(
|
||||
path.join(mod, "package.json"),
|
||||
JSON.stringify({
|
||||
name: "acme-plugin",
|
||||
type: "module",
|
||||
exports: { ".": "./index.js", "./tui": "./tui.js" },
|
||||
}),
|
||||
)
|
||||
await Bun.write(path.join(mod, "index.js"), "export default {}\n")
|
||||
await Bun.write(
|
||||
path.join(mod, "tui.js"),
|
||||
`export default {
|
||||
tui: async (_api, options) => {
|
||||
if (!options?.marker) return
|
||||
await Bun.write(options.marker, "called")
|
||||
},
|
||||
}
|
||||
`,
|
||||
)
|
||||
|
||||
return { mod, marker, spec: "acme-plugin@1.0.0" }
|
||||
},
|
||||
})
|
||||
|
||||
process.env.OPENCODE_PLUGIN_META_FILE = path.join(tmp.path, "plugin-meta.json")
|
||||
const get = spyOn(TuiConfig, "get").mockResolvedValue({
|
||||
plugin: [[tmp.extra.spec, { marker: tmp.extra.marker }]],
|
||||
plugin_records: [
|
||||
{
|
||||
item: [tmp.extra.spec, { marker: tmp.extra.marker }],
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
],
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
const install = spyOn(BunProc, "install").mockResolvedValue(tmp.extra.mod)
|
||||
|
||||
try {
|
||||
await TuiPluginRuntime.init(createTuiPluginApi())
|
||||
await expect(fs.readFile(tmp.extra.marker, "utf8")).resolves.toBe("called")
|
||||
expect(TuiPluginRuntime.list().find((item) => item.spec === tmp.extra.spec)?.id).toBe("acme-plugin")
|
||||
} finally {
|
||||
await TuiPluginRuntime.dispose()
|
||||
install.mockRestore()
|
||||
cwd.mockRestore()
|
||||
get.mockRestore()
|
||||
wait.mockRestore()
|
||||
delete process.env.OPENCODE_PLUGIN_META_FILE
|
||||
}
|
||||
})
|
||||
|
||||
@@ -39,12 +39,13 @@ test("skips external tui plugins in pure mode", async () => {
|
||||
|
||||
const get = spyOn(TuiConfig, "get").mockResolvedValue({
|
||||
plugin: [[tmp.extra.spec, { marker: tmp.extra.marker }]],
|
||||
plugin_meta: {
|
||||
[tmp.extra.spec]: {
|
||||
plugin_records: [
|
||||
{
|
||||
item: [tmp.extra.spec, { marker: tmp.extra.marker }],
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
},
|
||||
],
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
|
||||
@@ -468,10 +468,18 @@ test("continues loading when a plugin is missing config metadata", async () => {
|
||||
[tmp.extra.goodSpec, { marker: tmp.extra.goodMarker }],
|
||||
tmp.extra.bareSpec,
|
||||
],
|
||||
plugin_meta: {
|
||||
[tmp.extra.goodSpec]: { scope: "local", source: path.join(tmp.path, "tui.json") },
|
||||
[tmp.extra.bareSpec]: { scope: "local", source: path.join(tmp.path, "tui.json") },
|
||||
},
|
||||
plugin_records: [
|
||||
{
|
||||
item: [tmp.extra.goodSpec, { marker: tmp.extra.goodMarker }],
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
{
|
||||
item: tmp.extra.bareSpec,
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
],
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
@@ -493,6 +501,84 @@ test("continues loading when a plugin is missing config metadata", async () => {
|
||||
}
|
||||
})
|
||||
|
||||
test("initializes external tui plugins in config order", async () => {
|
||||
const globalJson = path.join(Global.Path.config, "tui.json")
|
||||
const globalJsonc = path.join(Global.Path.config, "tui.jsonc")
|
||||
const backupJson = await Bun.file(globalJson)
|
||||
.text()
|
||||
.catch(() => undefined)
|
||||
const backupJsonc = await Bun.file(globalJsonc)
|
||||
.text()
|
||||
.catch(() => undefined)
|
||||
|
||||
await fs.rm(globalJson, { force: true }).catch(() => {})
|
||||
await fs.rm(globalJsonc, { force: true }).catch(() => {})
|
||||
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
const a = path.join(dir, "order-a.ts")
|
||||
const b = path.join(dir, "order-b.ts")
|
||||
const aSpec = pathToFileURL(a).href
|
||||
const bSpec = pathToFileURL(b).href
|
||||
const marker = path.join(dir, "tui-order.txt")
|
||||
|
||||
await Bun.write(
|
||||
a,
|
||||
`import fs from "fs/promises"
|
||||
|
||||
export default {
|
||||
id: "demo.tui.order.a",
|
||||
tui: async () => {
|
||||
await fs.appendFile(${JSON.stringify(marker)}, "a-start\\n")
|
||||
await Bun.sleep(25)
|
||||
await fs.appendFile(${JSON.stringify(marker)}, "a-end\\n")
|
||||
},
|
||||
}
|
||||
`,
|
||||
)
|
||||
await Bun.write(
|
||||
b,
|
||||
`import fs from "fs/promises"
|
||||
|
||||
export default {
|
||||
id: "demo.tui.order.b",
|
||||
tui: async () => {
|
||||
await fs.appendFile(${JSON.stringify(marker)}, "b\\n")
|
||||
},
|
||||
}
|
||||
`,
|
||||
)
|
||||
await Bun.write(path.join(dir, "tui.json"), JSON.stringify({ plugin: [aSpec, bSpec] }, null, 2))
|
||||
|
||||
return { marker }
|
||||
},
|
||||
})
|
||||
|
||||
process.env.OPENCODE_PLUGIN_META_FILE = path.join(tmp.path, "plugin-meta.json")
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
|
||||
try {
|
||||
await TuiPluginRuntime.init(createTuiPluginApi())
|
||||
const lines = (await fs.readFile(tmp.extra.marker, "utf8")).trim().split("\n")
|
||||
expect(lines).toEqual(["a-start", "a-end", "b"])
|
||||
} finally {
|
||||
await TuiPluginRuntime.dispose()
|
||||
cwd.mockRestore()
|
||||
delete process.env.OPENCODE_PLUGIN_META_FILE
|
||||
|
||||
if (backupJson === undefined) {
|
||||
await fs.rm(globalJson, { force: true }).catch(() => {})
|
||||
} else {
|
||||
await Bun.write(globalJson, backupJson)
|
||||
}
|
||||
if (backupJsonc === undefined) {
|
||||
await fs.rm(globalJsonc, { force: true }).catch(() => {})
|
||||
} else {
|
||||
await Bun.write(globalJsonc, backupJsonc)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
describe("tui.plugin.loader", () => {
|
||||
let data: Data
|
||||
|
||||
|
||||
@@ -44,12 +44,13 @@ test("toggles plugin runtime state by exported id", async () => {
|
||||
plugin_enabled: {
|
||||
"demo.toggle": false,
|
||||
},
|
||||
plugin_meta: {
|
||||
[tmp.extra.spec]: {
|
||||
plugin_records: [
|
||||
{
|
||||
item: [tmp.extra.spec, { marker: tmp.extra.marker }],
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
},
|
||||
],
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
@@ -121,12 +122,13 @@ test("kv plugin_enabled overrides tui config on startup", async () => {
|
||||
plugin_enabled: {
|
||||
"demo.startup": false,
|
||||
},
|
||||
plugin_meta: {
|
||||
[tmp.extra.spec]: {
|
||||
plugin_records: [
|
||||
{
|
||||
item: [tmp.extra.spec, { marker: tmp.extra.marker }],
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
},
|
||||
],
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path)
|
||||
|
||||
@@ -1822,6 +1822,22 @@ describe("resolvePluginSpec", () => {
|
||||
expect(await Config.resolvePluginSpec("@scope/pkg", file)).toBe("@scope/pkg")
|
||||
})
|
||||
|
||||
test("resolves windows-style relative plugin directory specs", async () => {
|
||||
if (process.platform !== "win32") return
|
||||
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
const plugin = path.join(dir, "plugin")
|
||||
await fs.mkdir(plugin, { recursive: true })
|
||||
await Filesystem.write(path.join(plugin, "index.ts"), "export default {}")
|
||||
},
|
||||
})
|
||||
|
||||
const file = path.join(tmp.path, "opencode.json")
|
||||
const hit = await Config.resolvePluginSpec(".\\plugin", file)
|
||||
expect(Config.pluginSpecifier(hit)).toBe(pathToFileURL(path.join(tmp.path, "plugin", "index.ts")).href)
|
||||
})
|
||||
|
||||
test("resolves relative file plugin paths to file urls", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
@@ -1834,7 +1850,7 @@ describe("resolvePluginSpec", () => {
|
||||
expect(Config.pluginSpecifier(hit)).toBe(pathToFileURL(path.join(tmp.path, "plugin.ts")).href)
|
||||
})
|
||||
|
||||
test("resolves plugin directory paths to package main files", async () => {
|
||||
test("resolves plugin directory paths to directory urls", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
const plugin = path.join(dir, "plugin")
|
||||
@@ -1848,6 +1864,20 @@ describe("resolvePluginSpec", () => {
|
||||
},
|
||||
})
|
||||
|
||||
const file = path.join(tmp.path, "opencode.json")
|
||||
const hit = await Config.resolvePluginSpec("./plugin", file)
|
||||
expect(Config.pluginSpecifier(hit)).toBe(pathToFileURL(path.join(tmp.path, "plugin")).href)
|
||||
})
|
||||
|
||||
test("resolves plugin directories without package.json to index.ts", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
const plugin = path.join(dir, "plugin")
|
||||
await fs.mkdir(plugin, { recursive: true })
|
||||
await Filesystem.write(path.join(plugin, "index.ts"), "export default {}")
|
||||
},
|
||||
})
|
||||
|
||||
const file = path.join(tmp.path, "opencode.json")
|
||||
const hit = await Config.resolvePluginSpec("./plugin", file)
|
||||
expect(Config.pluginSpecifier(hit)).toBe(pathToFileURL(path.join(tmp.path, "plugin", "index.ts")).href)
|
||||
|
||||
@@ -476,12 +476,13 @@ test("loads managed tui config and gives it highest precedence", async () => {
|
||||
const config = await TuiConfig.get()
|
||||
expect(config.theme).toBe("managed-theme")
|
||||
expect(config.plugin).toEqual(["shared-plugin@2.0.0"])
|
||||
expect(config.plugin_meta).toEqual({
|
||||
"shared-plugin@2.0.0": {
|
||||
expect(config.plugin_records).toEqual([
|
||||
{
|
||||
item: "shared-plugin@2.0.0",
|
||||
scope: "global",
|
||||
source: path.join(managedConfigDir, "tui.json"),
|
||||
},
|
||||
})
|
||||
])
|
||||
},
|
||||
})
|
||||
})
|
||||
@@ -539,12 +540,13 @@ test("supports tuple plugin specs with options in tui.json", async () => {
|
||||
fn: async () => {
|
||||
const config = await TuiConfig.get()
|
||||
expect(config.plugin).toEqual([["acme-plugin@1.2.3", { enabled: true, label: "demo" }]])
|
||||
expect(config.plugin_meta).toEqual({
|
||||
"acme-plugin@1.2.3": {
|
||||
expect(config.plugin_records).toEqual([
|
||||
{
|
||||
item: ["acme-plugin@1.2.3", { enabled: true, label: "demo" }],
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
})
|
||||
])
|
||||
},
|
||||
})
|
||||
})
|
||||
@@ -578,16 +580,18 @@ test("deduplicates tuple plugin specs by name with higher precedence winning", a
|
||||
["acme-plugin@2.0.0", { source: "project" }],
|
||||
["second-plugin@3.0.0", { source: "project" }],
|
||||
])
|
||||
expect(config.plugin_meta).toEqual({
|
||||
"acme-plugin@2.0.0": {
|
||||
expect(config.plugin_records).toEqual([
|
||||
{
|
||||
item: ["acme-plugin@2.0.0", { source: "project" }],
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
"second-plugin@3.0.0": {
|
||||
{
|
||||
item: ["second-plugin@3.0.0", { source: "project" }],
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
})
|
||||
])
|
||||
},
|
||||
})
|
||||
})
|
||||
@@ -615,16 +619,18 @@ test("tracks global and local plugin metadata in merged tui config", async () =>
|
||||
fn: async () => {
|
||||
const config = await TuiConfig.get()
|
||||
expect(config.plugin).toEqual(["global-plugin@1.0.0", "local-plugin@2.0.0"])
|
||||
expect(config.plugin_meta).toEqual({
|
||||
"global-plugin@1.0.0": {
|
||||
expect(config.plugin_records).toEqual([
|
||||
{
|
||||
item: "global-plugin@1.0.0",
|
||||
scope: "global",
|
||||
source: path.join(Global.Path.config, "tui.json"),
|
||||
},
|
||||
"local-plugin@2.0.0": {
|
||||
{
|
||||
item: "local-plugin@2.0.0",
|
||||
scope: "local",
|
||||
source: path.join(tmp.path, "tui.json"),
|
||||
},
|
||||
})
|
||||
])
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
523
packages/opencode/test/effect/runner.test.ts
Normal file
523
packages/opencode/test/effect/runner.test.ts
Normal file
@@ -0,0 +1,523 @@
|
||||
import { describe, expect, test } from "bun:test"
|
||||
import { Deferred, Effect, Exit, Fiber, Ref, Scope } from "effect"
|
||||
import { Runner } from "../../src/effect/runner"
|
||||
import { it } from "../lib/effect"
|
||||
|
||||
describe("Runner", () => {
|
||||
// --- ensureRunning semantics ---
|
||||
|
||||
it.effect(
|
||||
"ensureRunning starts work and returns result",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const result = yield* runner.ensureRunning(Effect.succeed("hello"))
|
||||
expect(result).toBe("hello")
|
||||
expect(runner.state._tag).toBe("Idle")
|
||||
expect(runner.busy).toBe(false)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"ensureRunning propagates work failures",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string, string>(s)
|
||||
const exit = yield* runner.ensureRunning(Effect.fail("boom")).pipe(Effect.exit)
|
||||
expect(Exit.isFailure(exit)).toBe(true)
|
||||
expect(runner.state._tag).toBe("Idle")
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"concurrent callers share the same run",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const calls = yield* Ref.make(0)
|
||||
const work = Effect.gen(function* () {
|
||||
yield* Ref.update(calls, (n) => n + 1)
|
||||
yield* Effect.sleep("10 millis")
|
||||
return "shared"
|
||||
})
|
||||
|
||||
const [a, b] = yield* Effect.all([runner.ensureRunning(work), runner.ensureRunning(work)], {
|
||||
concurrency: "unbounded",
|
||||
})
|
||||
|
||||
expect(a).toBe("shared")
|
||||
expect(b).toBe("shared")
|
||||
expect(yield* Ref.get(calls)).toBe(1)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"concurrent callers all receive same error",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string, string>(s)
|
||||
const work = Effect.gen(function* () {
|
||||
yield* Effect.sleep("10 millis")
|
||||
return yield* Effect.fail("boom")
|
||||
})
|
||||
|
||||
const [a, b] = yield* Effect.all(
|
||||
[runner.ensureRunning(work).pipe(Effect.exit), runner.ensureRunning(work).pipe(Effect.exit)],
|
||||
{ concurrency: "unbounded" },
|
||||
)
|
||||
|
||||
expect(Exit.isFailure(a)).toBe(true)
|
||||
expect(Exit.isFailure(b)).toBe(true)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"ensureRunning can be called again after previous run completes",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
expect(yield* runner.ensureRunning(Effect.succeed("first"))).toBe("first")
|
||||
expect(yield* runner.ensureRunning(Effect.succeed("second"))).toBe("second")
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"second ensureRunning ignores new work if already running",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const ran = yield* Ref.make<string[]>([])
|
||||
|
||||
const first = Effect.gen(function* () {
|
||||
yield* Ref.update(ran, (a) => [...a, "first"])
|
||||
yield* Effect.sleep("50 millis")
|
||||
return "first-result"
|
||||
})
|
||||
const second = Effect.gen(function* () {
|
||||
yield* Ref.update(ran, (a) => [...a, "second"])
|
||||
return "second-result"
|
||||
})
|
||||
|
||||
const [a, b] = yield* Effect.all([runner.ensureRunning(first), runner.ensureRunning(second)], {
|
||||
concurrency: "unbounded",
|
||||
})
|
||||
|
||||
expect(a).toBe("first-result")
|
||||
expect(b).toBe("first-result")
|
||||
expect(yield* Ref.get(ran)).toEqual(["first"])
|
||||
}),
|
||||
)
|
||||
|
||||
// --- cancel semantics ---
|
||||
|
||||
it.effect(
|
||||
"cancel interrupts running work",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const fiber = yield* runner.ensureRunning(Effect.never.pipe(Effect.as("never"))).pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
expect(runner.busy).toBe(true)
|
||||
expect(runner.state._tag).toBe("Running")
|
||||
|
||||
yield* runner.cancel
|
||||
expect(runner.busy).toBe(false)
|
||||
|
||||
const exit = yield* Fiber.await(fiber)
|
||||
expect(Exit.isFailure(exit)).toBe(true)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"cancel on idle is a no-op",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
yield* runner.cancel
|
||||
expect(runner.busy).toBe(false)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"cancel with onInterrupt resolves callers gracefully",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s, { onInterrupt: Effect.succeed("fallback") })
|
||||
const fiber = yield* runner.ensureRunning(Effect.never.pipe(Effect.as("never"))).pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
|
||||
yield* runner.cancel
|
||||
|
||||
const exit = yield* Fiber.await(fiber)
|
||||
expect(Exit.isSuccess(exit)).toBe(true)
|
||||
if (Exit.isSuccess(exit)) expect(exit.value).toBe("fallback")
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"cancel with queued callers resolves all",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s, { onInterrupt: Effect.succeed("fallback") })
|
||||
|
||||
const a = yield* runner.ensureRunning(Effect.never.pipe(Effect.as("x"))).pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
const b = yield* runner.ensureRunning(Effect.succeed("y")).pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
|
||||
yield* runner.cancel
|
||||
|
||||
const [exitA, exitB] = yield* Effect.all([Fiber.await(a), Fiber.await(b)])
|
||||
expect(Exit.isSuccess(exitA)).toBe(true)
|
||||
expect(Exit.isSuccess(exitB)).toBe(true)
|
||||
if (Exit.isSuccess(exitA)) expect(exitA.value).toBe("fallback")
|
||||
if (Exit.isSuccess(exitB)) expect(exitB.value).toBe("fallback")
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"work can be started after cancel",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const fiber = yield* runner.ensureRunning(Effect.never.pipe(Effect.as("x"))).pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
yield* runner.cancel
|
||||
yield* Fiber.await(fiber)
|
||||
|
||||
const result = yield* runner.ensureRunning(Effect.succeed("after-cancel"))
|
||||
expect(result).toBe("after-cancel")
|
||||
}),
|
||||
)
|
||||
|
||||
test("cancel does not deadlock when replacement work starts before interrupted run exits", async () => {
|
||||
function defer() {
|
||||
let resolve!: () => void
|
||||
const promise = new Promise<void>((done) => {
|
||||
resolve = done
|
||||
})
|
||||
return { promise, resolve }
|
||||
}
|
||||
|
||||
function fail(ms: number, msg: string) {
|
||||
return new Promise<never>((_, reject) => {
|
||||
setTimeout(() => reject(new Error(msg)), ms)
|
||||
})
|
||||
}
|
||||
|
||||
const s = await Effect.runPromise(Scope.make())
|
||||
const hit = defer()
|
||||
const hold = defer()
|
||||
const done = defer()
|
||||
try {
|
||||
const runner = Runner.make<string>(s)
|
||||
const first = Effect.never.pipe(
|
||||
Effect.onInterrupt(() => Effect.sync(() => hit.resolve())),
|
||||
Effect.ensuring(Effect.promise(() => hold.promise)),
|
||||
Effect.as("first"),
|
||||
)
|
||||
|
||||
const a = Effect.runPromiseExit(runner.ensureRunning(first))
|
||||
await Bun.sleep(10)
|
||||
|
||||
const stop = Effect.runPromise(runner.cancel)
|
||||
await Promise.race([hit.promise, fail(250, "cancel did not interrupt running work")])
|
||||
|
||||
const b = Effect.runPromise(runner.ensureRunning(Effect.promise(() => done.promise).pipe(Effect.as("second"))))
|
||||
expect(runner.busy).toBe(true)
|
||||
|
||||
hold.resolve()
|
||||
await Promise.race([stop, fail(250, "cancel deadlocked while replacement run was active")])
|
||||
|
||||
expect(runner.busy).toBe(true)
|
||||
done.resolve()
|
||||
expect(await b).toBe("second")
|
||||
expect(runner.busy).toBe(false)
|
||||
|
||||
const exit = await a
|
||||
expect(Exit.isFailure(exit)).toBe(true)
|
||||
} finally {
|
||||
hold.resolve()
|
||||
done.resolve()
|
||||
await Promise.race([Effect.runPromise(Scope.close(s, Exit.void)), fail(1000, "runner scope did not close")])
|
||||
}
|
||||
})
|
||||
|
||||
// --- shell semantics ---
|
||||
|
||||
it.effect(
|
||||
"shell runs exclusively",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const result = yield* runner.startShell((_signal) => Effect.succeed("shell-done"))
|
||||
expect(result).toBe("shell-done")
|
||||
expect(runner.busy).toBe(false)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"shell rejects when run is active",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const fiber = yield* runner.ensureRunning(Effect.never.pipe(Effect.as("x"))).pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
|
||||
const exit = yield* runner.startShell((_s) => Effect.succeed("nope")).pipe(Effect.exit)
|
||||
expect(Exit.isFailure(exit)).toBe(true)
|
||||
|
||||
yield* runner.cancel
|
||||
yield* Fiber.await(fiber)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"shell rejects when another shell is running",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const gate = yield* Deferred.make<void>()
|
||||
|
||||
const sh = yield* runner
|
||||
.startShell((_signal) => Deferred.await(gate).pipe(Effect.as("first")))
|
||||
.pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
|
||||
const exit = yield* runner.startShell((_s) => Effect.succeed("second")).pipe(Effect.exit)
|
||||
expect(Exit.isFailure(exit)).toBe(true)
|
||||
|
||||
yield* Deferred.succeed(gate, undefined)
|
||||
yield* Fiber.await(sh)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"shell rejects via busy callback and cancel still stops the first shell",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s, {
|
||||
busy: () => {
|
||||
throw new Error("busy")
|
||||
},
|
||||
})
|
||||
|
||||
const sh = yield* runner
|
||||
.startShell((signal) =>
|
||||
Effect.promise(
|
||||
() =>
|
||||
new Promise<string>((resolve) => {
|
||||
signal.addEventListener("abort", () => resolve("aborted"), { once: true })
|
||||
}),
|
||||
),
|
||||
)
|
||||
.pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
|
||||
const exit = yield* runner.startShell((_s) => Effect.succeed("second")).pipe(Effect.exit)
|
||||
expect(Exit.isFailure(exit)).toBe(true)
|
||||
|
||||
yield* runner.cancel
|
||||
const done = yield* Fiber.await(sh)
|
||||
expect(Exit.isSuccess(done)).toBe(true)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"cancel interrupts shell that ignores abort signal",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const gate = yield* Deferred.make<void>()
|
||||
|
||||
const sh = yield* runner
|
||||
.startShell((_signal) => Deferred.await(gate).pipe(Effect.as("ignored")))
|
||||
.pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
|
||||
const stop = yield* runner.cancel.pipe(Effect.forkChild)
|
||||
const stopExit = yield* Fiber.await(stop).pipe(Effect.timeout("250 millis"))
|
||||
expect(Exit.isSuccess(stopExit)).toBe(true)
|
||||
expect(runner.busy).toBe(false)
|
||||
|
||||
const shellExit = yield* Fiber.await(sh)
|
||||
expect(Exit.isFailure(shellExit)).toBe(true)
|
||||
|
||||
yield* Deferred.succeed(gate, undefined).pipe(Effect.ignore)
|
||||
}),
|
||||
)
|
||||
|
||||
// --- shell→run handoff ---
|
||||
|
||||
it.effect(
|
||||
"ensureRunning queues behind shell then runs after",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const gate = yield* Deferred.make<void>()
|
||||
|
||||
const sh = yield* runner
|
||||
.startShell((_signal) => Deferred.await(gate).pipe(Effect.as("shell-result")))
|
||||
.pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
expect(runner.state._tag).toBe("Shell")
|
||||
|
||||
const run = yield* runner.ensureRunning(Effect.succeed("run-result")).pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
expect(runner.state._tag).toBe("ShellThenRun")
|
||||
|
||||
yield* Deferred.succeed(gate, undefined)
|
||||
yield* Fiber.await(sh)
|
||||
|
||||
const exit = yield* Fiber.await(run)
|
||||
expect(Exit.isSuccess(exit)).toBe(true)
|
||||
if (Exit.isSuccess(exit)) expect(exit.value).toBe("run-result")
|
||||
expect(runner.state._tag).toBe("Idle")
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"multiple ensureRunning callers share the queued run behind shell",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const calls = yield* Ref.make(0)
|
||||
const gate = yield* Deferred.make<void>()
|
||||
|
||||
const sh = yield* runner
|
||||
.startShell((_signal) => Deferred.await(gate).pipe(Effect.as("shell")))
|
||||
.pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
|
||||
const work = Effect.gen(function* () {
|
||||
yield* Ref.update(calls, (n) => n + 1)
|
||||
return "run"
|
||||
})
|
||||
const a = yield* runner.ensureRunning(work).pipe(Effect.forkChild)
|
||||
const b = yield* runner.ensureRunning(work).pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
|
||||
yield* Deferred.succeed(gate, undefined)
|
||||
yield* Fiber.await(sh)
|
||||
|
||||
const [exitA, exitB] = yield* Effect.all([Fiber.await(a), Fiber.await(b)])
|
||||
expect(Exit.isSuccess(exitA)).toBe(true)
|
||||
expect(Exit.isSuccess(exitB)).toBe(true)
|
||||
expect(yield* Ref.get(calls)).toBe(1)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"cancel during shell_then_run cancels both",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const gate = yield* Deferred.make<void>()
|
||||
|
||||
const sh = yield* runner
|
||||
.startShell((signal) =>
|
||||
Effect.promise(
|
||||
() =>
|
||||
new Promise<string>((resolve) => {
|
||||
signal.addEventListener("abort", () => resolve("aborted"), { once: true })
|
||||
}),
|
||||
),
|
||||
)
|
||||
.pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
|
||||
const run = yield* runner.ensureRunning(Effect.succeed("y")).pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
expect(runner.state._tag).toBe("ShellThenRun")
|
||||
|
||||
yield* runner.cancel
|
||||
expect(runner.busy).toBe(false)
|
||||
|
||||
yield* Fiber.await(sh)
|
||||
const exit = yield* Fiber.await(run)
|
||||
expect(Exit.isFailure(exit)).toBe(true)
|
||||
}),
|
||||
)
|
||||
|
||||
// --- lifecycle callbacks ---
|
||||
|
||||
it.effect(
|
||||
"onIdle fires when returning to idle from running",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const count = yield* Ref.make(0)
|
||||
const runner = Runner.make<string>(s, {
|
||||
onIdle: Ref.update(count, (n) => n + 1),
|
||||
})
|
||||
yield* runner.ensureRunning(Effect.succeed("ok"))
|
||||
expect(yield* Ref.get(count)).toBe(1)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"onIdle fires on cancel",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const count = yield* Ref.make(0)
|
||||
const runner = Runner.make<string>(s, {
|
||||
onIdle: Ref.update(count, (n) => n + 1),
|
||||
})
|
||||
const fiber = yield* runner.ensureRunning(Effect.never.pipe(Effect.as("x"))).pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
yield* runner.cancel
|
||||
yield* Fiber.await(fiber)
|
||||
expect(yield* Ref.get(count)).toBeGreaterThanOrEqual(1)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"onBusy fires when shell starts",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const count = yield* Ref.make(0)
|
||||
const runner = Runner.make<string>(s, {
|
||||
onBusy: Ref.update(count, (n) => n + 1),
|
||||
})
|
||||
yield* runner.startShell((_signal) => Effect.succeed("done"))
|
||||
expect(yield* Ref.get(count)).toBe(1)
|
||||
}),
|
||||
)
|
||||
|
||||
// --- busy flag ---
|
||||
|
||||
it.effect(
|
||||
"busy is true during run",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const gate = yield* Deferred.make<void>()
|
||||
|
||||
const fiber = yield* runner.ensureRunning(Deferred.await(gate).pipe(Effect.as("ok"))).pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
expect(runner.busy).toBe(true)
|
||||
|
||||
yield* Deferred.succeed(gate, undefined)
|
||||
yield* Fiber.await(fiber)
|
||||
expect(runner.busy).toBe(false)
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect(
|
||||
"busy is true during shell",
|
||||
Effect.gen(function* () {
|
||||
const s = yield* Scope.Scope
|
||||
const runner = Runner.make<string>(s)
|
||||
const gate = yield* Deferred.make<void>()
|
||||
|
||||
const fiber = yield* runner
|
||||
.startShell((_signal) => Deferred.await(gate).pipe(Effect.as("ok")))
|
||||
.pipe(Effect.forkChild)
|
||||
yield* Effect.sleep("10 millis")
|
||||
expect(runner.busy).toBe(true)
|
||||
|
||||
yield* Deferred.succeed(gate, undefined)
|
||||
yield* Fiber.await(fiber)
|
||||
expect(runner.busy).toBe(false)
|
||||
}),
|
||||
)
|
||||
})
|
||||
@@ -6,21 +6,14 @@ type PluginSpec = string | [string, Record<string, unknown>]
|
||||
|
||||
export function mockTuiRuntime(dir: string, plugin: PluginSpec[]) {
|
||||
process.env.OPENCODE_PLUGIN_META_FILE = path.join(dir, "plugin-meta.json")
|
||||
const meta = Object.fromEntries(
|
||||
plugin.map((item) => {
|
||||
const spec = Array.isArray(item) ? item[0] : item
|
||||
return [
|
||||
spec,
|
||||
{
|
||||
scope: "local" as const,
|
||||
source: path.join(dir, "tui.json"),
|
||||
},
|
||||
]
|
||||
}),
|
||||
)
|
||||
const plugin_records = plugin.map((item) => ({
|
||||
item,
|
||||
scope: "local" as const,
|
||||
source: path.join(dir, "tui.json"),
|
||||
}))
|
||||
const get = spyOn(TuiConfig, "get").mockResolvedValue({
|
||||
plugin,
|
||||
plugin_meta: meta,
|
||||
plugin_records,
|
||||
})
|
||||
const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue()
|
||||
const cwd = spyOn(process, "cwd").mockImplementation(() => dir)
|
||||
|
||||
@@ -331,6 +331,57 @@ describe("plugin.loader.shared", () => {
|
||||
}
|
||||
})
|
||||
|
||||
test("does not use npm package exports dot for server entry", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
const mod = path.join(dir, "mods", "acme-plugin")
|
||||
const mark = path.join(dir, "dot-server.txt")
|
||||
await fs.mkdir(mod, { recursive: true })
|
||||
|
||||
await Bun.write(
|
||||
path.join(mod, "package.json"),
|
||||
JSON.stringify({
|
||||
name: "acme-plugin",
|
||||
type: "module",
|
||||
exports: { ".": "./index.js" },
|
||||
}),
|
||||
)
|
||||
await Bun.write(
|
||||
path.join(mod, "index.js"),
|
||||
[
|
||||
"export default {",
|
||||
' id: "demo.dot.server",',
|
||||
" server: async () => {",
|
||||
` await Bun.write(${JSON.stringify(mark)}, "called")`,
|
||||
" return {}",
|
||||
" },",
|
||||
"}",
|
||||
"",
|
||||
].join("\n"),
|
||||
)
|
||||
|
||||
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ plugin: ["acme-plugin@1.0.0"] }, null, 2))
|
||||
|
||||
return { mod, mark }
|
||||
},
|
||||
})
|
||||
|
||||
const install = spyOn(BunProc, "install").mockResolvedValue(tmp.extra.mod)
|
||||
|
||||
try {
|
||||
const errors = await errs(tmp.path)
|
||||
const called = await Bun.file(tmp.extra.mark)
|
||||
.text()
|
||||
.then(() => true)
|
||||
.catch(() => false)
|
||||
|
||||
expect(called).toBe(false)
|
||||
expect(errors.some((x) => x.includes('exports["./server"]') && x.includes("package.json main"))).toBe(true)
|
||||
} finally {
|
||||
install.mockRestore()
|
||||
}
|
||||
})
|
||||
|
||||
test("rejects npm server export that resolves outside plugin directory", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
@@ -576,6 +627,55 @@ describe("plugin.loader.shared", () => {
|
||||
})
|
||||
})
|
||||
|
||||
test("initializes server plugins in config order", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
const a = path.join(dir, "a-plugin.ts")
|
||||
const b = path.join(dir, "b-plugin.ts")
|
||||
const marker = path.join(dir, "server-order.txt")
|
||||
const aSpec = pathToFileURL(a).href
|
||||
const bSpec = pathToFileURL(b).href
|
||||
|
||||
await Bun.write(
|
||||
a,
|
||||
`import fs from "fs/promises"
|
||||
|
||||
export default {
|
||||
id: "demo.order.a",
|
||||
server: async () => {
|
||||
await fs.appendFile(${JSON.stringify(marker)}, "a-start\\n")
|
||||
await Bun.sleep(25)
|
||||
await fs.appendFile(${JSON.stringify(marker)}, "a-end\\n")
|
||||
return {}
|
||||
},
|
||||
}
|
||||
`,
|
||||
)
|
||||
await Bun.write(
|
||||
b,
|
||||
`import fs from "fs/promises"
|
||||
|
||||
export default {
|
||||
id: "demo.order.b",
|
||||
server: async () => {
|
||||
await fs.appendFile(${JSON.stringify(marker)}, "b\\n")
|
||||
return {}
|
||||
},
|
||||
}
|
||||
`,
|
||||
)
|
||||
|
||||
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ plugin: [aSpec, bSpec] }, null, 2))
|
||||
|
||||
return { marker }
|
||||
},
|
||||
})
|
||||
|
||||
await load(tmp.path)
|
||||
const lines = (await fs.readFile(tmp.extra.marker, "utf8")).trim().split("\n")
|
||||
expect(lines).toEqual(["a-start", "a-end", "b"])
|
||||
})
|
||||
|
||||
test("skips external plugins in pure mode", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
|
||||
83
packages/opencode/test/server/session-actions.test.ts
Normal file
83
packages/opencode/test/server/session-actions.test.ts
Normal file
@@ -0,0 +1,83 @@
|
||||
import { afterEach, describe, expect, mock, spyOn, test } from "bun:test"
|
||||
import { Instance } from "../../src/project/instance"
|
||||
import { Server } from "../../src/server/server"
|
||||
import { Session } from "../../src/session"
|
||||
import { ModelID, ProviderID } from "../../src/provider/schema"
|
||||
import { MessageID, PartID, type SessionID } from "../../src/session/schema"
|
||||
import { SessionPrompt } from "../../src/session/prompt"
|
||||
import { Log } from "../../src/util/log"
|
||||
import { tmpdir } from "../fixture/fixture"
|
||||
|
||||
Log.init({ print: false })
|
||||
|
||||
afterEach(async () => {
|
||||
mock.restore()
|
||||
await Instance.disposeAll()
|
||||
})
|
||||
|
||||
async function user(sessionID: SessionID, text: string) {
|
||||
const msg = await Session.updateMessage({
|
||||
id: MessageID.ascending(),
|
||||
role: "user",
|
||||
sessionID,
|
||||
agent: "build",
|
||||
model: { providerID: ProviderID.make("test"), modelID: ModelID.make("test") },
|
||||
time: { created: Date.now() },
|
||||
})
|
||||
await Session.updatePart({
|
||||
id: PartID.ascending(),
|
||||
sessionID,
|
||||
messageID: msg.id,
|
||||
type: "text",
|
||||
text,
|
||||
})
|
||||
return msg
|
||||
}
|
||||
|
||||
describe("session action routes", () => {
|
||||
test("abort route calls SessionPrompt.cancel", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({})
|
||||
const cancel = spyOn(SessionPrompt, "cancel").mockResolvedValue()
|
||||
const app = Server.Default()
|
||||
|
||||
const res = await app.request(`/session/${session.id}/abort`, {
|
||||
method: "POST",
|
||||
})
|
||||
|
||||
expect(res.status).toBe(200)
|
||||
expect(await res.json()).toBe(true)
|
||||
expect(cancel).toHaveBeenCalledWith(session.id)
|
||||
|
||||
await Session.remove(session.id)
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("delete message route returns 400 when session is busy", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({})
|
||||
const msg = await user(session.id, "hello")
|
||||
const busy = spyOn(SessionPrompt, "assertNotBusy").mockRejectedValue(new Session.BusyError(session.id))
|
||||
const remove = spyOn(Session, "removeMessage").mockResolvedValue(msg.id)
|
||||
const app = Server.Default()
|
||||
|
||||
const res = await app.request(`/session/${session.id}/message/${msg.id}`, {
|
||||
method: "DELETE",
|
||||
})
|
||||
|
||||
expect(res.status).toBe(400)
|
||||
expect(busy).toHaveBeenCalledWith(session.id)
|
||||
expect(remove).not.toHaveBeenCalled()
|
||||
|
||||
await Session.remove(session.id)
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,26 +1,30 @@
|
||||
import { describe, expect, test } from "bun:test"
|
||||
import path from "path"
|
||||
import { afterEach, describe, expect, test } from "bun:test"
|
||||
import { Instance } from "../../src/project/instance"
|
||||
import { Session } from "../../src/session"
|
||||
import { Log } from "../../src/util/log"
|
||||
import { tmpdir } from "../fixture/fixture"
|
||||
|
||||
const projectRoot = path.join(__dirname, "../..")
|
||||
Log.init({ print: false })
|
||||
|
||||
afterEach(async () => {
|
||||
await Instance.disposeAll()
|
||||
})
|
||||
|
||||
describe("Session.list", () => {
|
||||
test("filters by directory", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: projectRoot,
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const first = await Session.create({})
|
||||
|
||||
const otherDir = path.join(projectRoot, "..", "__session_list_other")
|
||||
await using other = await tmpdir({ git: true })
|
||||
const second = await Instance.provide({
|
||||
directory: otherDir,
|
||||
directory: other.path,
|
||||
fn: async () => Session.create({}),
|
||||
})
|
||||
|
||||
const sessions = [...Session.list({ directory: projectRoot })]
|
||||
const sessions = [...Session.list({ directory: tmp.path })]
|
||||
const ids = sessions.map((s) => s.id)
|
||||
|
||||
expect(ids).toContain(first.id)
|
||||
@@ -30,8 +34,9 @@ describe("Session.list", () => {
|
||||
})
|
||||
|
||||
test("filters root sessions", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: projectRoot,
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const root = await Session.create({ title: "root-session" })
|
||||
const child = await Session.create({ title: "child-session", parentID: root.id })
|
||||
@@ -46,8 +51,9 @@ describe("Session.list", () => {
|
||||
})
|
||||
|
||||
test("filters by start time", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: projectRoot,
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({ title: "new-session" })
|
||||
const futureStart = Date.now() + 86400000
|
||||
@@ -59,8 +65,9 @@ describe("Session.list", () => {
|
||||
})
|
||||
|
||||
test("filters by search term", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: projectRoot,
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
await Session.create({ title: "unique-search-term-abc" })
|
||||
await Session.create({ title: "other-session-xyz" })
|
||||
@@ -75,8 +82,9 @@ describe("Session.list", () => {
|
||||
})
|
||||
|
||||
test("respects limit parameter", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: projectRoot,
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
await Session.create({ title: "session-1" })
|
||||
await Session.create({ title: "session-2" })
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
import { describe, expect, test } from "bun:test"
|
||||
import path from "path"
|
||||
import { afterEach, describe, expect, test } from "bun:test"
|
||||
import { Instance } from "../../src/project/instance"
|
||||
import { Server } from "../../src/server/server"
|
||||
import { Session } from "../../src/session"
|
||||
import { MessageV2 } from "../../src/session/message-v2"
|
||||
import { MessageID, PartID, type SessionID } from "../../src/session/schema"
|
||||
import { Log } from "../../src/util/log"
|
||||
import { tmpdir } from "../fixture/fixture"
|
||||
|
||||
const root = path.join(__dirname, "../..")
|
||||
Log.init({ print: false })
|
||||
|
||||
afterEach(async () => {
|
||||
await Instance.disposeAll()
|
||||
})
|
||||
|
||||
async function fill(sessionID: SessionID, count: number, time = (i: number) => Date.now() + i) {
|
||||
const ids = [] as MessageID[]
|
||||
for (let i = 0; i < count; i++) {
|
||||
@@ -38,8 +41,9 @@ async function fill(sessionID: SessionID, count: number, time = (i: number) => D
|
||||
|
||||
describe("session messages endpoint", () => {
|
||||
test("returns cursor headers for older pages", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: root,
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({})
|
||||
const ids = await fill(session.id, 5)
|
||||
@@ -64,8 +68,9 @@ describe("session messages endpoint", () => {
|
||||
})
|
||||
|
||||
test("keeps full-history responses when limit is omitted", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: root,
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({})
|
||||
const ids = await fill(session.id, 3)
|
||||
@@ -82,8 +87,9 @@ describe("session messages endpoint", () => {
|
||||
})
|
||||
|
||||
test("rejects invalid cursors and missing sessions", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: root,
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({})
|
||||
const app = Server.Default()
|
||||
@@ -100,8 +106,9 @@ describe("session messages endpoint", () => {
|
||||
})
|
||||
|
||||
test("does not truncate large legacy limit requests", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: root,
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({})
|
||||
await fill(session.id, 520)
|
||||
@@ -120,7 +127,7 @@ describe("session messages endpoint", () => {
|
||||
|
||||
describe("session.prompt_async error handling", () => {
|
||||
test("prompt_async route has error handler for detached prompt call", async () => {
|
||||
const src = await Bun.file(path.join(import.meta.dir, "../../src/server/routes/session.ts")).text()
|
||||
const src = await Bun.file(new URL("../../src/server/routes/session.ts", import.meta.url)).text()
|
||||
const start = src.indexOf('"/:sessionID/prompt_async"')
|
||||
const end = src.indexOf('"/:sessionID/command"', start)
|
||||
expect(start).toBeGreaterThan(-1)
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
import { describe, expect, test } from "bun:test"
|
||||
import path from "path"
|
||||
import { afterEach, describe, expect, test } from "bun:test"
|
||||
import { Session } from "../../src/session"
|
||||
import { Log } from "../../src/util/log"
|
||||
import { Instance } from "../../src/project/instance"
|
||||
import { Server } from "../../src/server/server"
|
||||
import { tmpdir } from "../fixture/fixture"
|
||||
|
||||
const projectRoot = path.join(__dirname, "../..")
|
||||
Log.init({ print: false })
|
||||
|
||||
afterEach(async () => {
|
||||
await Instance.disposeAll()
|
||||
})
|
||||
|
||||
describe("tui.selectSession endpoint", () => {
|
||||
test("should return 200 when called with valid session", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: projectRoot,
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
// #given
|
||||
const session = await Session.create({})
|
||||
@@ -35,8 +39,9 @@ describe("tui.selectSession endpoint", () => {
|
||||
})
|
||||
|
||||
test("should return 404 when session does not exist", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: projectRoot,
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
// #given
|
||||
const nonExistentSessionID = "ses_nonexistent123"
|
||||
@@ -56,8 +61,9 @@ describe("tui.selectSession endpoint", () => {
|
||||
})
|
||||
|
||||
test("should return 400 when session ID format is invalid", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: projectRoot,
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
// #given
|
||||
const invalidSessionID = "invalid_session_id"
|
||||
|
||||
@@ -129,7 +129,7 @@ async function tool(sessionID: SessionID, messageID: MessageID, tool: string, ou
|
||||
}
|
||||
|
||||
function fake(
|
||||
input: Parameters<(typeof SessionProcessorModule.SessionProcessor)["create"]>[0],
|
||||
input: Parameters<SessionProcessorModule.SessionProcessor.Interface["create"]>[0],
|
||||
result: "continue" | "compact",
|
||||
) {
|
||||
const msg = input.assistantMessage
|
||||
@@ -509,6 +509,36 @@ describe("session.compaction.prune", () => {
|
||||
})
|
||||
|
||||
describe("session.compaction.process", () => {
|
||||
test("throws when parent is not a user message", async () => {
|
||||
await using tmp = await tmpdir()
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({})
|
||||
const msg = await user(session.id, "hello")
|
||||
const reply = await assistant(session.id, msg.id, tmp.path)
|
||||
const rt = runtime("continue")
|
||||
try {
|
||||
const msgs = await Session.messages({ sessionID: session.id })
|
||||
await expect(
|
||||
rt.runPromise(
|
||||
SessionCompaction.Service.use((svc) =>
|
||||
svc.process({
|
||||
parentID: reply.id,
|
||||
messages: msgs,
|
||||
sessionID: session.id,
|
||||
auto: false,
|
||||
}),
|
||||
),
|
||||
),
|
||||
).rejects.toThrow(`Compaction parent must be a user message: ${reply.id}`)
|
||||
} finally {
|
||||
await rt.dispose()
|
||||
}
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("publishes compacted event on continue", async () => {
|
||||
await using tmp = await tmpdir()
|
||||
await Instance.provide({
|
||||
@@ -540,7 +570,6 @@ describe("session.compaction.process", () => {
|
||||
parentID: msg.id,
|
||||
messages: msgs,
|
||||
sessionID: session.id,
|
||||
abort: new AbortController().signal,
|
||||
auto: false,
|
||||
}),
|
||||
),
|
||||
@@ -580,7 +609,6 @@ describe("session.compaction.process", () => {
|
||||
parentID: msg.id,
|
||||
messages: msgs,
|
||||
sessionID: session.id,
|
||||
abort: new AbortController().signal,
|
||||
auto: false,
|
||||
}),
|
||||
),
|
||||
@@ -621,7 +649,6 @@ describe("session.compaction.process", () => {
|
||||
parentID: msg.id,
|
||||
messages: msgs,
|
||||
sessionID: session.id,
|
||||
abort: new AbortController().signal,
|
||||
auto: true,
|
||||
}),
|
||||
),
|
||||
@@ -675,7 +702,6 @@ describe("session.compaction.process", () => {
|
||||
parentID: msg.id,
|
||||
messages: msgs,
|
||||
sessionID: session.id,
|
||||
abort: new AbortController().signal,
|
||||
auto: true,
|
||||
overflow: true,
|
||||
}),
|
||||
@@ -717,7 +743,6 @@ describe("session.compaction.process", () => {
|
||||
parentID: msg.id,
|
||||
messages: msgs,
|
||||
sessionID: session.id,
|
||||
abort: new AbortController().signal,
|
||||
auto: true,
|
||||
overflow: true,
|
||||
}),
|
||||
@@ -792,7 +817,6 @@ describe("session.compaction.process", () => {
|
||||
parentID: msg.id,
|
||||
messages: msgs,
|
||||
sessionID: session.id,
|
||||
abort: abort.signal,
|
||||
auto: false,
|
||||
}),
|
||||
),
|
||||
@@ -858,7 +882,6 @@ describe("session.compaction.process", () => {
|
||||
parentID: msg.id,
|
||||
messages: msgs,
|
||||
sessionID: session.id,
|
||||
abort: abort.signal,
|
||||
auto: false,
|
||||
}),
|
||||
),
|
||||
@@ -892,6 +915,91 @@ describe("session.compaction.process", () => {
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("does not allow tool calls while generating the summary", async () => {
|
||||
const stub = llm()
|
||||
stub.push(
|
||||
Stream.make(
|
||||
{ type: "start" } satisfies LLM.Event,
|
||||
{ type: "tool-input-start", id: "call-1", toolName: "_noop" } satisfies LLM.Event,
|
||||
{ type: "tool-call", toolCallId: "call-1", toolName: "_noop", input: {} } satisfies LLM.Event,
|
||||
{
|
||||
type: "finish-step",
|
||||
finishReason: "tool-calls",
|
||||
rawFinishReason: "tool_calls",
|
||||
response: { id: "res", modelId: "test-model", timestamp: new Date() },
|
||||
providerMetadata: undefined,
|
||||
usage: {
|
||||
inputTokens: 1,
|
||||
outputTokens: 1,
|
||||
totalTokens: 2,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
} satisfies LLM.Event,
|
||||
{
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
rawFinishReason: "tool_calls",
|
||||
totalUsage: {
|
||||
inputTokens: 1,
|
||||
outputTokens: 1,
|
||||
totalTokens: 2,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
} satisfies LLM.Event,
|
||||
),
|
||||
)
|
||||
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
spyOn(ProviderModule.Provider, "getModel").mockResolvedValue(createModel({ context: 100_000, output: 32_000 }))
|
||||
|
||||
const session = await Session.create({})
|
||||
const msg = await user(session.id, "hello")
|
||||
const rt = liveRuntime(stub.layer)
|
||||
try {
|
||||
const msgs = await Session.messages({ sessionID: session.id })
|
||||
await rt.runPromise(
|
||||
SessionCompaction.Service.use((svc) =>
|
||||
svc.process({
|
||||
parentID: msg.id,
|
||||
messages: msgs,
|
||||
sessionID: session.id,
|
||||
auto: false,
|
||||
}),
|
||||
),
|
||||
)
|
||||
|
||||
const summary = (await Session.messages({ sessionID: session.id })).find(
|
||||
(item) => item.info.role === "assistant" && item.info.summary,
|
||||
)
|
||||
|
||||
expect(summary?.info.role).toBe("assistant")
|
||||
expect(summary?.parts.some((part) => part.type === "tool")).toBe(false)
|
||||
} finally {
|
||||
await rt.dispose()
|
||||
}
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("util.token.estimate", () => {
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import { afterAll, beforeAll, beforeEach, describe, expect, test } from "bun:test"
|
||||
import path from "path"
|
||||
import { tool, type ModelMessage } from "ai"
|
||||
import { Cause, Exit, Stream } from "effect"
|
||||
import z from "zod"
|
||||
import { makeRuntime } from "../../src/effect/run-service"
|
||||
import { LLM } from "../../src/session/llm"
|
||||
import { Instance } from "../../src/project/instance"
|
||||
import { Provider } from "../../src/provider/provider"
|
||||
@@ -109,7 +111,11 @@ type Capture = {
|
||||
|
||||
const state = {
|
||||
server: null as ReturnType<typeof Bun.serve> | null,
|
||||
queue: [] as Array<{ path: string; response: Response; resolve: (value: Capture) => void }>,
|
||||
queue: [] as Array<{
|
||||
path: string
|
||||
response: Response | ((req: Request, capture: Capture) => Response)
|
||||
resolve: (value: Capture) => void
|
||||
}>,
|
||||
}
|
||||
|
||||
function deferred<T>() {
|
||||
@@ -126,6 +132,58 @@ function waitRequest(pathname: string, response: Response) {
|
||||
return pending.promise
|
||||
}
|
||||
|
||||
function timeout(ms: number) {
|
||||
return new Promise<never>((_, reject) => {
|
||||
setTimeout(() => reject(new Error(`timed out after ${ms}ms`)), ms)
|
||||
})
|
||||
}
|
||||
|
||||
function waitStreamingRequest(pathname: string) {
|
||||
const request = deferred<Capture>()
|
||||
const requestAborted = deferred<void>()
|
||||
const responseCanceled = deferred<void>()
|
||||
const encoder = new TextEncoder()
|
||||
|
||||
state.queue.push({
|
||||
path: pathname,
|
||||
resolve: request.resolve,
|
||||
response(req: Request) {
|
||||
req.signal.addEventListener("abort", () => requestAborted.resolve(), { once: true })
|
||||
|
||||
return new Response(
|
||||
new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
[
|
||||
`data: ${JSON.stringify({
|
||||
id: "chatcmpl-abort",
|
||||
object: "chat.completion.chunk",
|
||||
choices: [{ delta: { role: "assistant" } }],
|
||||
})}`,
|
||||
].join("\n\n") + "\n\n",
|
||||
),
|
||||
)
|
||||
},
|
||||
cancel() {
|
||||
responseCanceled.resolve()
|
||||
},
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: { "Content-Type": "text/event-stream" },
|
||||
},
|
||||
)
|
||||
},
|
||||
})
|
||||
|
||||
return {
|
||||
request: request.promise,
|
||||
requestAborted: requestAborted.promise,
|
||||
responseCanceled: responseCanceled.promise,
|
||||
}
|
||||
}
|
||||
|
||||
beforeAll(() => {
|
||||
state.server = Bun.serve({
|
||||
port: 0,
|
||||
@@ -143,7 +201,9 @@ beforeAll(() => {
|
||||
return new Response("not found", { status: 404 })
|
||||
}
|
||||
|
||||
return next.response
|
||||
return typeof next.response === "function"
|
||||
? next.response(req, { url, headers: req.headers, body })
|
||||
: next.response
|
||||
},
|
||||
})
|
||||
})
|
||||
@@ -325,6 +385,162 @@ describe("session.llm.stream", () => {
|
||||
})
|
||||
})
|
||||
|
||||
test("raw stream abort signal cancels provider response body promptly", async () => {
|
||||
const server = state.server
|
||||
if (!server) throw new Error("Server not initialized")
|
||||
|
||||
const providerID = "alibaba"
|
||||
const modelID = "qwen-plus"
|
||||
const fixture = await loadFixture(providerID, modelID)
|
||||
const model = fixture.model
|
||||
const pending = waitStreamingRequest("/chat/completions")
|
||||
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
enabled_providers: [providerID],
|
||||
provider: {
|
||||
[providerID]: {
|
||||
options: {
|
||||
apiKey: "test-key",
|
||||
baseURL: `${server.url.origin}/v1`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const resolved = await Provider.getModel(ProviderID.make(providerID), ModelID.make(model.id))
|
||||
const sessionID = SessionID.make("session-test-raw-abort")
|
||||
const agent = {
|
||||
name: "test",
|
||||
mode: "primary",
|
||||
options: {},
|
||||
permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
||||
} satisfies Agent.Info
|
||||
const user = {
|
||||
id: MessageID.make("user-raw-abort"),
|
||||
sessionID,
|
||||
role: "user",
|
||||
time: { created: Date.now() },
|
||||
agent: agent.name,
|
||||
model: { providerID: ProviderID.make(providerID), modelID: resolved.id },
|
||||
} satisfies MessageV2.User
|
||||
|
||||
const ctrl = new AbortController()
|
||||
const result = await LLM.stream({
|
||||
user,
|
||||
sessionID,
|
||||
model: resolved,
|
||||
agent,
|
||||
system: ["You are a helpful assistant."],
|
||||
abort: ctrl.signal,
|
||||
messages: [{ role: "user", content: "Hello" }],
|
||||
tools: {},
|
||||
})
|
||||
|
||||
const iter = result.fullStream[Symbol.asyncIterator]()
|
||||
await pending.request
|
||||
await iter.next()
|
||||
ctrl.abort()
|
||||
|
||||
await Promise.race([pending.responseCanceled, timeout(500)])
|
||||
await Promise.race([pending.requestAborted, timeout(500)]).catch(() => undefined)
|
||||
await iter.return?.()
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("service stream cancellation cancels provider response body promptly", async () => {
|
||||
const server = state.server
|
||||
if (!server) throw new Error("Server not initialized")
|
||||
|
||||
const providerID = "alibaba"
|
||||
const modelID = "qwen-plus"
|
||||
const fixture = await loadFixture(providerID, modelID)
|
||||
const model = fixture.model
|
||||
const pending = waitStreamingRequest("/chat/completions")
|
||||
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
enabled_providers: [providerID],
|
||||
provider: {
|
||||
[providerID]: {
|
||||
options: {
|
||||
apiKey: "test-key",
|
||||
baseURL: `${server.url.origin}/v1`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const resolved = await Provider.getModel(ProviderID.make(providerID), ModelID.make(model.id))
|
||||
const sessionID = SessionID.make("session-test-service-abort")
|
||||
const agent = {
|
||||
name: "test",
|
||||
mode: "primary",
|
||||
options: {},
|
||||
permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
||||
} satisfies Agent.Info
|
||||
const user = {
|
||||
id: MessageID.make("user-service-abort"),
|
||||
sessionID,
|
||||
role: "user",
|
||||
time: { created: Date.now() },
|
||||
agent: agent.name,
|
||||
model: { providerID: ProviderID.make(providerID), modelID: resolved.id },
|
||||
} satisfies MessageV2.User
|
||||
|
||||
const ctrl = new AbortController()
|
||||
const { runPromiseExit } = makeRuntime(LLM.Service, LLM.defaultLayer)
|
||||
const run = runPromiseExit(
|
||||
(svc) =>
|
||||
svc
|
||||
.stream({
|
||||
user,
|
||||
sessionID,
|
||||
model: resolved,
|
||||
agent,
|
||||
system: ["You are a helpful assistant."],
|
||||
messages: [{ role: "user", content: "Hello" }],
|
||||
tools: {},
|
||||
})
|
||||
.pipe(Stream.runDrain),
|
||||
{ signal: ctrl.signal },
|
||||
)
|
||||
|
||||
await pending.request
|
||||
ctrl.abort()
|
||||
|
||||
await Promise.race([pending.responseCanceled, timeout(500)])
|
||||
const exit = await run
|
||||
expect(Exit.isFailure(exit)).toBe(true)
|
||||
if (Exit.isFailure(exit)) {
|
||||
expect(Cause.hasInterrupts(exit.cause)).toBe(true)
|
||||
}
|
||||
await Promise.race([pending.requestAborted, timeout(500)]).catch(() => undefined)
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("keeps tools enabled by prompt permissions", async () => {
|
||||
const server = state.server
|
||||
if (!server) {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { NodeFileSystem } from "@effect/platform-node"
|
||||
import { expect } from "bun:test"
|
||||
import { APICallError } from "ai"
|
||||
import { Effect, Layer, ServiceMap } from "effect"
|
||||
import { Cause, Effect, Exit, Fiber, Layer, ServiceMap } from "effect"
|
||||
import * as Stream from "effect/Stream"
|
||||
import path from "path"
|
||||
import type { Agent } from "../../src/agent/agent"
|
||||
@@ -10,7 +10,6 @@ import { Bus } from "../../src/bus"
|
||||
import { Config } from "../../src/config/config"
|
||||
import { Permission } from "../../src/permission"
|
||||
import { Plugin } from "../../src/plugin"
|
||||
import { Instance } from "../../src/project/instance"
|
||||
import type { Provider } from "../../src/provider/provider"
|
||||
import { ModelID, ProviderID } from "../../src/provider/schema"
|
||||
import { Session } from "../../src/session"
|
||||
@@ -120,21 +119,8 @@ function fail<E>(err: E, ...items: LLM.Event[]) {
|
||||
return stream(...items).pipe(Stream.concat(Stream.fail(err)))
|
||||
}
|
||||
|
||||
function wait(abort: AbortSignal) {
|
||||
return Effect.promise(
|
||||
() =>
|
||||
new Promise<void>((done) => {
|
||||
abort.addEventListener("abort", () => done(), { once: true })
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
function hang(input: LLM.StreamInput, ...items: LLM.Event[]) {
|
||||
return stream(...items).pipe(
|
||||
Stream.concat(
|
||||
Stream.unwrap(wait(input.abort).pipe(Effect.as(Stream.fail(new DOMException("Aborted", "AbortError"))))),
|
||||
),
|
||||
)
|
||||
function hang(_input: LLM.StreamInput, ...items: LLM.Event[]) {
|
||||
return stream(...items).pipe(Stream.concat(Stream.fromEffect(Effect.never)))
|
||||
}
|
||||
|
||||
function model(context: number): Provider.Model {
|
||||
@@ -291,13 +277,11 @@ it.effect("session.processor effect tests capture llm input cleanly", () => {
|
||||
const chat = yield* session.create({})
|
||||
const parent = yield* user(chat.id, "hi")
|
||||
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
|
||||
const abort = new AbortController()
|
||||
const mdl = model(100)
|
||||
const handle = yield* processors.create({
|
||||
assistantMessage: msg,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
abort: abort.signal,
|
||||
})
|
||||
|
||||
const input = {
|
||||
@@ -313,7 +297,6 @@ it.effect("session.processor effect tests capture llm input cleanly", () => {
|
||||
model: mdl,
|
||||
agent: agent(),
|
||||
system: [],
|
||||
abort: abort.signal,
|
||||
messages: [{ role: "user", content: "hi" }],
|
||||
tools: {},
|
||||
} satisfies LLM.StreamInput
|
||||
@@ -359,13 +342,11 @@ it.effect("session.processor effect tests stop after token overflow requests com
|
||||
const chat = yield* session.create({})
|
||||
const parent = yield* user(chat.id, "compact")
|
||||
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
|
||||
const abort = new AbortController()
|
||||
const mdl = model(20)
|
||||
const handle = yield* processors.create({
|
||||
assistantMessage: msg,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
abort: abort.signal,
|
||||
})
|
||||
|
||||
const value = yield* handle.process({
|
||||
@@ -381,7 +362,6 @@ it.effect("session.processor effect tests stop after token overflow requests com
|
||||
model: mdl,
|
||||
agent: agent(),
|
||||
system: [],
|
||||
abort: abort.signal,
|
||||
messages: [{ role: "user", content: "compact" }],
|
||||
tools: {},
|
||||
})
|
||||
@@ -433,13 +413,11 @@ it.effect("session.processor effect tests reset reasoning state across retries",
|
||||
const chat = yield* session.create({})
|
||||
const parent = yield* user(chat.id, "reason")
|
||||
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
|
||||
const abort = new AbortController()
|
||||
const mdl = model(100)
|
||||
const handle = yield* processors.create({
|
||||
assistantMessage: msg,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
abort: abort.signal,
|
||||
})
|
||||
|
||||
const value = yield* handle.process({
|
||||
@@ -455,7 +433,6 @@ it.effect("session.processor effect tests reset reasoning state across retries",
|
||||
model: mdl,
|
||||
agent: agent(),
|
||||
system: [],
|
||||
abort: abort.signal,
|
||||
messages: [{ role: "user", content: "reason" }],
|
||||
tools: {},
|
||||
})
|
||||
@@ -485,13 +462,11 @@ it.effect("session.processor effect tests do not retry unknown json errors", ()
|
||||
const chat = yield* session.create({})
|
||||
const parent = yield* user(chat.id, "json")
|
||||
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
|
||||
const abort = new AbortController()
|
||||
const mdl = model(100)
|
||||
const handle = yield* processors.create({
|
||||
assistantMessage: msg,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
abort: abort.signal,
|
||||
})
|
||||
|
||||
const value = yield* handle.process({
|
||||
@@ -507,7 +482,6 @@ it.effect("session.processor effect tests do not retry unknown json errors", ()
|
||||
model: mdl,
|
||||
agent: agent(),
|
||||
system: [],
|
||||
abort: abort.signal,
|
||||
messages: [{ role: "user", content: "json" }],
|
||||
tools: {},
|
||||
})
|
||||
@@ -535,13 +509,11 @@ it.effect("session.processor effect tests retry recognized structured json error
|
||||
const chat = yield* session.create({})
|
||||
const parent = yield* user(chat.id, "retry json")
|
||||
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
|
||||
const abort = new AbortController()
|
||||
const mdl = model(100)
|
||||
const handle = yield* processors.create({
|
||||
assistantMessage: msg,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
abort: abort.signal,
|
||||
})
|
||||
|
||||
const value = yield* handle.process({
|
||||
@@ -557,7 +529,6 @@ it.effect("session.processor effect tests retry recognized structured json error
|
||||
model: mdl,
|
||||
agent: agent(),
|
||||
system: [],
|
||||
abort: abort.signal,
|
||||
messages: [{ role: "user", content: "retry json" }],
|
||||
tools: {},
|
||||
})
|
||||
@@ -601,7 +572,6 @@ it.effect("session.processor effect tests publish retry status updates", () => {
|
||||
const chat = yield* session.create({})
|
||||
const parent = yield* user(chat.id, "retry")
|
||||
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
|
||||
const abort = new AbortController()
|
||||
const mdl = model(100)
|
||||
const states: number[] = []
|
||||
const off = yield* bus.subscribeCallback(SessionStatus.Event.Status, (evt) => {
|
||||
@@ -612,7 +582,6 @@ it.effect("session.processor effect tests publish retry status updates", () => {
|
||||
assistantMessage: msg,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
abort: abort.signal,
|
||||
})
|
||||
|
||||
const value = yield* handle.process({
|
||||
@@ -628,7 +597,6 @@ it.effect("session.processor effect tests publish retry status updates", () => {
|
||||
model: mdl,
|
||||
agent: agent(),
|
||||
system: [],
|
||||
abort: abort.signal,
|
||||
messages: [{ role: "user", content: "retry" }],
|
||||
tools: {},
|
||||
})
|
||||
@@ -656,13 +624,11 @@ it.effect("session.processor effect tests compact on structured context overflow
|
||||
const chat = yield* session.create({})
|
||||
const parent = yield* user(chat.id, "compact json")
|
||||
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
|
||||
const abort = new AbortController()
|
||||
const mdl = model(100)
|
||||
const handle = yield* processors.create({
|
||||
assistantMessage: msg,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
abort: abort.signal,
|
||||
})
|
||||
|
||||
const value = yield* handle.process({
|
||||
@@ -678,7 +644,6 @@ it.effect("session.processor effect tests compact on structured context overflow
|
||||
model: mdl,
|
||||
agent: agent(),
|
||||
system: [],
|
||||
abort: abort.signal,
|
||||
messages: [{ role: "user", content: "compact json" }],
|
||||
tools: {},
|
||||
})
|
||||
@@ -696,7 +661,6 @@ it.effect("session.processor effect tests mark pending tools as aborted on clean
|
||||
(dir) =>
|
||||
Effect.gen(function* () {
|
||||
const ready = defer<void>()
|
||||
const seen = defer<void>()
|
||||
const test = yield* TestLLM
|
||||
const processors = yield* SessionProcessor.Service
|
||||
const session = yield* Session.Service
|
||||
@@ -710,17 +674,15 @@ it.effect("session.processor effect tests mark pending tools as aborted on clean
|
||||
const chat = yield* session.create({})
|
||||
const parent = yield* user(chat.id, "tool abort")
|
||||
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
|
||||
const abort = new AbortController()
|
||||
const mdl = model(100)
|
||||
const handle = yield* processors.create({
|
||||
assistantMessage: msg,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
abort: abort.signal,
|
||||
})
|
||||
|
||||
const run = Effect.runPromise(
|
||||
handle.process({
|
||||
const run = yield* handle
|
||||
.process({
|
||||
user: {
|
||||
id: parent.id,
|
||||
sessionID: chat.id,
|
||||
@@ -733,20 +695,25 @@ it.effect("session.processor effect tests mark pending tools as aborted on clean
|
||||
model: mdl,
|
||||
agent: agent(),
|
||||
system: [],
|
||||
abort: abort.signal,
|
||||
messages: [{ role: "user", content: "tool abort" }],
|
||||
tools: {},
|
||||
}),
|
||||
)
|
||||
})
|
||||
.pipe(Effect.forkChild)
|
||||
|
||||
yield* Effect.promise(() => ready.promise)
|
||||
abort.abort()
|
||||
yield* Fiber.interrupt(run)
|
||||
|
||||
const value = yield* Effect.promise(() => run)
|
||||
const exit = yield* Fiber.await(run)
|
||||
if (Exit.isFailure(exit) && Cause.hasInterruptsOnly(exit.cause)) {
|
||||
yield* handle.abort()
|
||||
}
|
||||
const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
|
||||
const tool = parts.find((part): part is MessageV2.ToolPart => part.type === "tool")
|
||||
|
||||
expect(value).toBe("stop")
|
||||
expect(Exit.isFailure(exit)).toBe(true)
|
||||
if (Exit.isFailure(exit)) {
|
||||
expect(Cause.hasInterruptsOnly(exit.cause)).toBe(true)
|
||||
}
|
||||
expect(yield* test.calls).toBe(1)
|
||||
expect(tool?.state.status).toBe("error")
|
||||
if (tool?.state.status === "error") {
|
||||
@@ -779,7 +746,6 @@ it.effect("session.processor effect tests record aborted errors and idle state",
|
||||
const chat = yield* session.create({})
|
||||
const parent = yield* user(chat.id, "abort")
|
||||
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
|
||||
const abort = new AbortController()
|
||||
const mdl = model(100)
|
||||
const errs: string[] = []
|
||||
const off = yield* bus.subscribeCallback(Session.Event.Error, (evt) => {
|
||||
@@ -792,11 +758,10 @@ it.effect("session.processor effect tests record aborted errors and idle state",
|
||||
assistantMessage: msg,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
abort: abort.signal,
|
||||
})
|
||||
|
||||
const run = Effect.runPromise(
|
||||
handle.process({
|
||||
const run = yield* handle
|
||||
.process({
|
||||
user: {
|
||||
id: parent.id,
|
||||
sessionID: chat.id,
|
||||
@@ -809,22 +774,27 @@ it.effect("session.processor effect tests record aborted errors and idle state",
|
||||
model: mdl,
|
||||
agent: agent(),
|
||||
system: [],
|
||||
abort: abort.signal,
|
||||
messages: [{ role: "user", content: "abort" }],
|
||||
tools: {},
|
||||
}),
|
||||
)
|
||||
})
|
||||
.pipe(Effect.forkChild)
|
||||
|
||||
yield* Effect.promise(() => ready.promise)
|
||||
abort.abort()
|
||||
yield* Fiber.interrupt(run)
|
||||
|
||||
const value = yield* Effect.promise(() => run)
|
||||
const exit = yield* Fiber.await(run)
|
||||
if (Exit.isFailure(exit) && Cause.hasInterruptsOnly(exit.cause)) {
|
||||
yield* handle.abort()
|
||||
}
|
||||
yield* Effect.promise(() => seen.promise)
|
||||
const stored = yield* Effect.promise(() => MessageV2.get({ sessionID: chat.id, messageID: msg.id }))
|
||||
const state = yield* status.get(chat.id)
|
||||
off()
|
||||
|
||||
expect(value).toBe("stop")
|
||||
expect(Exit.isFailure(exit)).toBe(true)
|
||||
if (Exit.isFailure(exit)) {
|
||||
expect(Cause.hasInterruptsOnly(exit.cause)).toBe(true)
|
||||
}
|
||||
expect(handle.message.error?.name).toBe("MessageAbortedError")
|
||||
expect(stored.info.role).toBe("assistant")
|
||||
if (stored.info.role === "assistant") {
|
||||
@@ -836,3 +806,67 @@ it.effect("session.processor effect tests record aborted errors and idle state",
|
||||
{ git: true },
|
||||
)
|
||||
})
|
||||
|
||||
it.effect("session.processor effect tests mark interruptions aborted without manual abort", () => {
|
||||
return provideTmpdirInstance(
|
||||
(dir) =>
|
||||
Effect.gen(function* () {
|
||||
const ready = defer<void>()
|
||||
const processors = yield* SessionProcessor.Service
|
||||
const session = yield* Session.Service
|
||||
const status = yield* SessionStatus.Service
|
||||
const test = yield* TestLLM
|
||||
|
||||
yield* test.push((input) =>
|
||||
hang(input, start()).pipe(
|
||||
Stream.tap((event) => (event.type === "start" ? Effect.sync(() => ready.resolve()) : Effect.void)),
|
||||
),
|
||||
)
|
||||
|
||||
const chat = yield* session.create({})
|
||||
const parent = yield* user(chat.id, "interrupt")
|
||||
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
|
||||
const mdl = model(100)
|
||||
const handle = yield* processors.create({
|
||||
assistantMessage: msg,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
})
|
||||
|
||||
const run = yield* handle
|
||||
.process({
|
||||
user: {
|
||||
id: parent.id,
|
||||
sessionID: chat.id,
|
||||
role: "user",
|
||||
time: parent.time,
|
||||
agent: parent.agent,
|
||||
model: { providerID: ref.providerID, modelID: ref.modelID },
|
||||
} satisfies MessageV2.User,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
agent: agent(),
|
||||
system: [],
|
||||
messages: [{ role: "user", content: "interrupt" }],
|
||||
tools: {},
|
||||
})
|
||||
.pipe(Effect.forkChild)
|
||||
|
||||
yield* Effect.promise(() => ready.promise)
|
||||
yield* Fiber.interrupt(run)
|
||||
|
||||
const exit = yield* Fiber.await(run)
|
||||
const stored = yield* Effect.promise(() => MessageV2.get({ sessionID: chat.id, messageID: msg.id }))
|
||||
const state = yield* status.get(chat.id)
|
||||
|
||||
expect(Exit.isFailure(exit)).toBe(true)
|
||||
expect(handle.message.error?.name).toBe("MessageAbortedError")
|
||||
expect(stored.info.role).toBe("assistant")
|
||||
if (stored.info.role === "assistant") {
|
||||
expect(stored.info.error?.name).toBe("MessageAbortedError")
|
||||
}
|
||||
expect(state).toMatchObject({ type: "idle" })
|
||||
}),
|
||||
{ git: true },
|
||||
)
|
||||
})
|
||||
|
||||
247
packages/opencode/test/session/prompt-concurrency.test.ts
Normal file
247
packages/opencode/test/session/prompt-concurrency.test.ts
Normal file
@@ -0,0 +1,247 @@
|
||||
import { describe, expect, spyOn, test } from "bun:test"
|
||||
import { Instance } from "../../src/project/instance"
|
||||
import { Provider } from "../../src/provider/provider"
|
||||
import { Session } from "../../src/session"
|
||||
import { MessageV2 } from "../../src/session/message-v2"
|
||||
import { SessionPrompt } from "../../src/session/prompt"
|
||||
import { SessionStatus } from "../../src/session/status"
|
||||
import { MessageID, PartID, SessionID } from "../../src/session/schema"
|
||||
import { Log } from "../../src/util/log"
|
||||
import { tmpdir } from "../fixture/fixture"
|
||||
|
||||
Log.init({ print: false })
|
||||
|
||||
function deferred() {
|
||||
let resolve!: () => void
|
||||
const promise = new Promise<void>((done) => {
|
||||
resolve = done
|
||||
})
|
||||
return { promise, resolve }
|
||||
}
|
||||
|
||||
// Helper: seed a session with a user message + finished assistant message
|
||||
// so loop() exits immediately without calling any LLM
|
||||
async function seed(sessionID: SessionID) {
|
||||
const userMsg: MessageV2.Info = {
|
||||
id: MessageID.ascending(),
|
||||
role: "user",
|
||||
sessionID,
|
||||
time: { created: Date.now() },
|
||||
agent: "build",
|
||||
model: { providerID: "openai" as any, modelID: "gpt-5.2" as any },
|
||||
}
|
||||
await Session.updateMessage(userMsg)
|
||||
await Session.updatePart({
|
||||
id: PartID.ascending(),
|
||||
messageID: userMsg.id,
|
||||
sessionID,
|
||||
type: "text",
|
||||
text: "hello",
|
||||
})
|
||||
|
||||
const assistantMsg: MessageV2.Info = {
|
||||
id: MessageID.ascending(),
|
||||
role: "assistant",
|
||||
parentID: userMsg.id,
|
||||
sessionID,
|
||||
mode: "build",
|
||||
agent: "build",
|
||||
cost: 0,
|
||||
path: { cwd: "/tmp", root: "/tmp" },
|
||||
tokens: { input: 0, output: 0, reasoning: 0, cache: { read: 0, write: 0 } },
|
||||
modelID: "gpt-5.2" as any,
|
||||
providerID: "openai" as any,
|
||||
time: { created: Date.now(), completed: Date.now() },
|
||||
finish: "stop",
|
||||
}
|
||||
await Session.updateMessage(assistantMsg)
|
||||
await Session.updatePart({
|
||||
id: PartID.ascending(),
|
||||
messageID: assistantMsg.id,
|
||||
sessionID,
|
||||
type: "text",
|
||||
text: "hi there",
|
||||
})
|
||||
|
||||
return { userMsg, assistantMsg }
|
||||
}
|
||||
|
||||
describe("session.prompt concurrency", () => {
|
||||
test("loop returns assistant message and sets status to idle", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({})
|
||||
await seed(session.id)
|
||||
|
||||
const result = await SessionPrompt.loop({ sessionID: session.id })
|
||||
expect(result.info.role).toBe("assistant")
|
||||
if (result.info.role === "assistant") expect(result.info.finish).toBe("stop")
|
||||
|
||||
const status = await SessionStatus.get(session.id)
|
||||
expect(status.type).toBe("idle")
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("concurrent loop callers get the same result", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({})
|
||||
await seed(session.id)
|
||||
|
||||
const [a, b] = await Promise.all([
|
||||
SessionPrompt.loop({ sessionID: session.id }),
|
||||
SessionPrompt.loop({ sessionID: session.id }),
|
||||
])
|
||||
|
||||
expect(a.info.id).toBe(b.info.id)
|
||||
expect(a.info.role).toBe("assistant")
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("assertNotBusy throws when loop is running", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({})
|
||||
const userMsg: MessageV2.Info = {
|
||||
id: MessageID.ascending(),
|
||||
role: "user",
|
||||
sessionID: session.id,
|
||||
time: { created: Date.now() },
|
||||
agent: "build",
|
||||
model: { providerID: "openai" as any, modelID: "gpt-5.2" as any },
|
||||
}
|
||||
await Session.updateMessage(userMsg)
|
||||
await Session.updatePart({
|
||||
id: PartID.ascending(),
|
||||
messageID: userMsg.id,
|
||||
sessionID: session.id,
|
||||
type: "text",
|
||||
text: "hello",
|
||||
})
|
||||
|
||||
const ready = deferred()
|
||||
const gate = deferred()
|
||||
const getModel = spyOn(Provider, "getModel").mockImplementation(async () => {
|
||||
ready.resolve()
|
||||
await gate.promise
|
||||
throw new Error("test stop")
|
||||
})
|
||||
|
||||
try {
|
||||
const loopPromise = SessionPrompt.loop({ sessionID: session.id }).catch(() => undefined)
|
||||
await ready.promise
|
||||
|
||||
await expect(SessionPrompt.assertNotBusy(session.id)).rejects.toBeInstanceOf(Session.BusyError)
|
||||
|
||||
gate.resolve()
|
||||
await loopPromise
|
||||
} finally {
|
||||
gate.resolve()
|
||||
getModel.mockRestore()
|
||||
}
|
||||
|
||||
// After loop completes, assertNotBusy should succeed
|
||||
await SessionPrompt.assertNotBusy(session.id)
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("cancel sets status to idle", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({})
|
||||
// Seed only a user message — loop must call getModel to proceed
|
||||
const userMsg: MessageV2.Info = {
|
||||
id: MessageID.ascending(),
|
||||
role: "user",
|
||||
sessionID: session.id,
|
||||
time: { created: Date.now() },
|
||||
agent: "build",
|
||||
model: { providerID: "openai" as any, modelID: "gpt-5.2" as any },
|
||||
}
|
||||
await Session.updateMessage(userMsg)
|
||||
await Session.updatePart({
|
||||
id: PartID.ascending(),
|
||||
messageID: userMsg.id,
|
||||
sessionID: session.id,
|
||||
type: "text",
|
||||
text: "hello",
|
||||
})
|
||||
// Also seed an assistant message so lastAssistant() fallback can find it
|
||||
const assistantMsg: MessageV2.Info = {
|
||||
id: MessageID.ascending(),
|
||||
role: "assistant",
|
||||
parentID: userMsg.id,
|
||||
sessionID: session.id,
|
||||
mode: "build",
|
||||
agent: "build",
|
||||
cost: 0,
|
||||
path: { cwd: "/tmp", root: "/tmp" },
|
||||
tokens: { input: 0, output: 0, reasoning: 0, cache: { read: 0, write: 0 } },
|
||||
modelID: "gpt-5.2" as any,
|
||||
providerID: "openai" as any,
|
||||
time: { created: Date.now() },
|
||||
}
|
||||
await Session.updateMessage(assistantMsg)
|
||||
await Session.updatePart({
|
||||
id: PartID.ascending(),
|
||||
messageID: assistantMsg.id,
|
||||
sessionID: session.id,
|
||||
type: "text",
|
||||
text: "hi there",
|
||||
})
|
||||
|
||||
const ready = deferred()
|
||||
const gate = deferred()
|
||||
const getModel = spyOn(Provider, "getModel").mockImplementation(async () => {
|
||||
ready.resolve()
|
||||
await gate.promise
|
||||
throw new Error("test stop")
|
||||
})
|
||||
|
||||
try {
|
||||
// Start loop — it will block in getModel (assistant has no finish, so loop continues)
|
||||
const loopPromise = SessionPrompt.loop({ sessionID: session.id })
|
||||
|
||||
await ready.promise
|
||||
|
||||
await SessionPrompt.cancel(session.id)
|
||||
|
||||
const status = await SessionStatus.get(session.id)
|
||||
expect(status.type).toBe("idle")
|
||||
|
||||
// loop should resolve cleanly, not throw "All fibers interrupted"
|
||||
const result = await loopPromise
|
||||
expect(result.info.role).toBe("assistant")
|
||||
expect(result.info.id).toBe(assistantMsg.id)
|
||||
} finally {
|
||||
gate.resolve()
|
||||
getModel.mockRestore()
|
||||
}
|
||||
},
|
||||
})
|
||||
}, 10000)
|
||||
|
||||
test("cancel on idle session just sets idle", async () => {
|
||||
await using tmp = await tmpdir({ git: true })
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const session = await Session.create({})
|
||||
await SessionPrompt.cancel(session.id)
|
||||
const status = await SessionStatus.get(session.id)
|
||||
expect(status.type).toBe("idle")
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
1205
packages/opencode/test/session/prompt-effect.test.ts
Normal file
1205
packages/opencode/test/session/prompt-effect.test.ts
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"$schema": "https://json.schemastore.org/package.json",
|
||||
"name": "@opencode-ai/plugin",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"$schema": "https://json.schemastore.org/package.json",
|
||||
"name": "@opencode-ai/sdk",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@opencode-ai/slack",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@opencode-ai/ui",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
"exports": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@opencode-ai/util",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "@opencode-ai/web",
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"scripts": {
|
||||
"dev": "astro dev",
|
||||
"dev:remote": "VITE_API_URL=https://api.opencode.ai astro dev",
|
||||
|
||||
@@ -20,24 +20,24 @@ OpenCode Zen متاح حاليا بنسخة تجريبية.
|
||||
|
||||
## الخلفية
|
||||
|
||||
توجد نماذج كثيرة جدا، لكن عددا قليلا فقط منها يعمل بشكل جيد كعوامل للبرمجة.
|
||||
بالإضافة إلى ذلك، فإن معظم المزوّدين يختلفون كثيرا في طريقة الإعداد؛ لذلك قد
|
||||
تحصل على أداء وجودة مختلفين بشكل كبير.
|
||||
يوجد عدد كبير من النماذج، لكن عددا قليلا فقط منها يعمل جيدا كعوامل للبرمجة.
|
||||
إضافة إلى ذلك، يتم إعداد معظم المزوّدين بطرق مختلفة جدا؛ لذلك ستحصل على أداء
|
||||
وجودة مختلفين بشكل كبير.
|
||||
|
||||
:::tip
|
||||
اختبرنا مجموعة منتقاة من النماذج والمزوّدين الذين يعملون جيدا مع OpenCode.
|
||||
:::
|
||||
|
||||
لذلك إذا كنت تستخدم نموذجا عبر خدمة مثل OpenRouter، فلن تكون واثقا أبدا من أنك
|
||||
تحصل على أفضل نسخة من النموذج الذي تريده.
|
||||
لذلك إذا كنت تستخدم نموذجا عبر خدمة مثل OpenRouter، فلا يمكنك أبدا التأكد مما
|
||||
إذا كنت تحصل على أفضل نسخة من النموذج الذي تريده.
|
||||
|
||||
لمعالجة ذلك، قمنا بعدة أمور:
|
||||
ولحل ذلك، قمنا بعدة أمور:
|
||||
|
||||
1. اختبرنا مجموعة منتقاة من النماذج وتحدثنا مع فرقها حول افضل طريقة لتشغيلها.
|
||||
2. ثم عملنا مع عدد من المزوّدين للتأكد من تقديمها بشكل صحيح.
|
||||
3. أخيرا، قمنا بقياس أداء توليفة النموذج/المزوّد وخرجنا بقائمة نوصي بها بثقة.
|
||||
1. اختبرنا مجموعة منتقاة من النماذج وتحدثنا مع فرقها حول أفضل طريقة لتشغيلها.
|
||||
2. ثم عملنا مع عدد من المزوّدين للتأكد من تقديم هذه النماذج بالشكل الصحيح.
|
||||
3. وأخيرا، أجرينا قياسا معياريا لتوليفة النموذج/المزوّد ووضعنا قائمة نشعر بثقة في التوصية بها.
|
||||
|
||||
OpenCode Zen هو بوابة للذكاء الاصطناعي تتيح لك الوصول إلى هذه النماذج.
|
||||
OpenCode Zen هي بوابة AI تتيح لك الوصول إلى هذه النماذج.
|
||||
|
||||
---
|
||||
|
||||
@@ -47,9 +47,9 @@ OpenCode Zen هو بوابة للذكاء الاصطناعي تتيح لك ال
|
||||
|
||||
1. تسجّل الدخول إلى **<a href={console}>OpenCode Zen</a>**، وتضيف تفاصيل الفوترة، ثم تنسخ مفتاح API.
|
||||
2. تشغّل الأمر `/connect` في واجهة TUI، وتختار OpenCode Zen، ثم تلصق مفتاح API.
|
||||
3. شغّل `/models` في واجهة TUI لعرض قائمة النماذج التي نوصي بها.
|
||||
3. تشغّل `/models` في واجهة TUI لرؤية قائمة النماذج التي نوصي بها.
|
||||
|
||||
يتم احتساب الرسوم لكل طلب، ويمكنك إضافة رصيد إلى حسابك.
|
||||
تتم محاسبتك على كل طلب ويمكنك إضافة رصيد إلى حسابك.
|
||||
|
||||
---
|
||||
|
||||
@@ -57,44 +57,44 @@ OpenCode Zen هو بوابة للذكاء الاصطناعي تتيح لك ال
|
||||
|
||||
يمكنك أيضا الوصول إلى نماذجنا عبر نقاط نهاية API التالية.
|
||||
|
||||
| النموذج | معرّف النموذج | نقطة النهاية | حزمة AI SDK |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| النموذج | معرّف النموذج | نقطة النهاية | حزمة AI SDK |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
يستخدم [معرّف النموذج](/docs/config/#models) في إعدادات OpenCode الصيغة `opencode/<model-id>`.
|
||||
على سبيل المثال، بالنسبة إلى GPT 5.2 Codex ستستخدم `opencode/gpt-5.2-codex` في إعداداتك.
|
||||
يستخدم [معرّف النموذج](/docs/config/#models) في إعدادات OpenCode الصيغة `opencode/<model-id>`. على سبيل المثال، بالنسبة إلى GPT 5.3 Codex، ستستخدم `opencode/gpt-5.3-codex` في إعداداتك.
|
||||
|
||||
---
|
||||
|
||||
@@ -112,57 +112,60 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
ندعم نموذج الدفع حسب الاستخدام. فيما يلي الأسعار **لكل 1M tokens**.
|
||||
|
||||
| النموذج | الإدخال | الإخراج | قراءة مخزنة | كتابة مخزنة |
|
||||
| --------------------------------- | ------- | ------- | ----------- | ----------- |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | - |
|
||||
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
|
||||
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.08 | - |
|
||||
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0.40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K tokens) | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37.50 | $1.00 | $12.50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.6 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
| النموذج | الإدخال | الإخراج | القراءة المخزنة | الكتابة المخزنة |
|
||||
| --------------------------------- | ------- | ------- | --------------- | --------------- |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
قد تلاحظ _Claude Haiku 3.5_ في سجل الاستخدام. هذا [نموذج منخفض التكلفة](/docs/config/#models) يُستخدم لتوليد عناوين جلساتك.
|
||||
|
||||
:::note
|
||||
يتم تمرير رسوم بطاقات الائتمان بالتكلفة الفعلية (4.4% + $0.30 لكل معاملة)؛ ولا نفرض أي رسوم إضافية غير ذلك.
|
||||
يتم تمرير رسوم بطاقات الائتمان بالتكلفة الفعلية (4.4% + $0.30 لكل معاملة)؛ ولا نفرض أي رسوم إضافية فوق ذلك.
|
||||
:::
|
||||
|
||||
النماذج المجانية:
|
||||
|
||||
- MiniMax M2.5 Free متاح على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج.
|
||||
- MiMo V2 Pro Free متاح على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج.
|
||||
- MiMo V2 Omni Free متاح على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج.
|
||||
- Qwen3.6 Plus Free متاح على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج.
|
||||
- Nemotron 3 Super Free متاح على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج.
|
||||
- Big Pickle نموذج خفي ومتاح مجانا على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج.
|
||||
|
||||
<a href={email}>تواصل معنا</a> إذا كانت لديك أي أسئلة.
|
||||
@@ -181,32 +184,36 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
يمكنك أيضا تعيين حد شهري للاستخدام لمساحة العمل بالكامل ولكل عضو في فريقك.
|
||||
|
||||
على سبيل المثال، لنفترض أنك ضبطت حد الاستخدام الشهري على $20، فلن يتجاوز Zen مبلغ $20 خلال شهر.
|
||||
لكن إذا كانت إعادة الشحن التلقائي مفعّلة، فقد ينتهي الأمر بخصم أكثر من $20 إذا انخفض رصيدك عن $5.
|
||||
على سبيل المثال، لنفترض أنك ضبطت حد الاستخدام الشهري على $20، فلن يستخدم Zen أكثر من $20 خلال شهر. لكن إذا كانت إعادة الشحن التلقائي مفعّلة، فقد ينتهي الأمر بخصم أكثر من $20 إذا انخفض رصيدك عن $5.
|
||||
|
||||
---
|
||||
|
||||
### نماذج مهملة
|
||||
### النماذج المهملة
|
||||
|
||||
| النموذج | تاريخ الإيقاف |
|
||||
| ---------------- | ------------- |
|
||||
| Qwen3 Coder 480B | 6 فبراير 2026 |
|
||||
| Kimi K2 Thinking | 6 مارس 2026 |
|
||||
| Kimi K2 | 6 مارس 2026 |
|
||||
| MiniMax M2.1 | 15 مارس 2026 |
|
||||
| GLM 4.7 | 15 مارس 2026 |
|
||||
| GLM 4.6 | 15 مارس 2026 |
|
||||
| Gemini 3 Pro | 9 مارس 2026 |
|
||||
| Kimi K2 Thinking | 6 مارس 2026 |
|
||||
| Kimi K2 | 6 مارس 2026 |
|
||||
| Qwen3 Coder 480B | 6 فبراير 2026 |
|
||||
|
||||
---
|
||||
|
||||
## الخصوصية
|
||||
|
||||
تتم استضافة جميع نماذجنا في الولايات المتحدة. يلتزم مزوّدونا بسياسة عدم الاحتفاظ بالبيانات (zero-retention) ولا يستخدمون بياناتك لتدريب النماذج، مع الاستثناءات التالية:
|
||||
تتم استضافة جميع نماذجنا في الولايات المتحدة. يلتزم مزوّدونا بسياسة عدم الاحتفاظ بالبيانات ولا يستخدمون بياناتك لتدريب النماذج، مع الاستثناءات التالية:
|
||||
|
||||
- Big Pickle: خلال فترة إتاحته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج.
|
||||
- MiniMax M2.5 Free: خلال فترة إتاحته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج.
|
||||
- OpenAI APIs: يتم الاحتفاظ بالطلبات لمدة 30 يوما وفقا لـ [سياسات بيانات OpenAI](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: يتم الاحتفاظ بالطلبات لمدة 30 يوما وفقا لـ [سياسات بيانات Anthropic](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
- Big Pickle: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج.
|
||||
- MiniMax M2.5 Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج.
|
||||
- MiMo V2 Pro Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج.
|
||||
- MiMo V2 Omni Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج.
|
||||
- Qwen3.6 Plus Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج.
|
||||
- Nemotron 3 Super Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج.
|
||||
- OpenAI APIs: يتم الاحتفاظ بالطلبات لمدة 30 يوما وفقا لـ [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: يتم الاحتفاظ بالطلبات لمدة 30 يوما وفقا لـ [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
|
||||
---
|
||||
|
||||
@@ -230,21 +237,21 @@ https://opencode.ai/zen/v1/models
|
||||
- **Admin**: إدارة النماذج والأعضاء ومفاتيح API والفوترة
|
||||
- **Member**: إدارة مفاتيح API الخاصة به فقط
|
||||
|
||||
يمكن للمسؤولين أيضا تعيين حدود إنفاق شهرية لكل عضو للسيطرة على التكاليف.
|
||||
يمكن للمسؤولين أيضا تعيين حدود إنفاق شهرية لكل عضو لإبقاء التكاليف تحت السيطرة.
|
||||
|
||||
---
|
||||
|
||||
### صلاحيات الوصول للنماذج
|
||||
### الوصول إلى النماذج
|
||||
|
||||
يمكن للمسؤولين تفعيل نماذج محددة لمساحة العمل أو تعطيلها. ستعيد الطلبات المرسلة إلى نموذج معطّل خطأ.
|
||||
|
||||
يفيد ذلك في الحالات التي تريد فيها تعطيل استخدام نموذج يقوم بجمع البيانات.
|
||||
يفيد ذلك في الحالات التي تريد فيها تعطيل استخدام نموذج يجمع البيانات.
|
||||
|
||||
---
|
||||
|
||||
### استخدم مفتاحك الخاص
|
||||
|
||||
يمكنك استخدام مفاتيح API الخاصة بك لدى OpenAI أو Anthropic مع الاستمرار في الوصول إلى نماذج أخرى ضمن Zen.
|
||||
يمكنك استخدام مفاتيح API الخاصة بك لدى OpenAI أو Anthropic مع الاستمرار في الوصول إلى نماذج أخرى في Zen.
|
||||
|
||||
عند استخدام مفاتيحك الخاصة، تتم فوترة tokens مباشرة من المزوّد وليس من Zen.
|
||||
|
||||
@@ -257,6 +264,6 @@ https://opencode.ai/zen/v1/models
|
||||
أنشأنا OpenCode Zen من أجل:
|
||||
|
||||
1. **قياس الأداء** لأفضل النماذج/المزوّدين لعوامل البرمجة.
|
||||
2. إتاحة خيارات **عالية الجودة** دون خفض الأداء أو توجيه الطلبات إلى مزوّدين أرخص.
|
||||
3. تمرير أي **انخفاض في الأسعار** عبر البيع بالتكلفة؛ بحيث تكون الزيادة الوحيدة لتغطية رسوم المعالجة.
|
||||
4. عدم فرض **أي ارتباط حصري (lock-in)** عبر تمكينك من استخدامه مع أي عامل برمجة آخر، مع إتاحة استخدام أي مزوّد آخر مع OpenCode أيضا.
|
||||
2. إتاحة الخيارات **الأعلى جودة** دون خفض الأداء أو توجيه الطلبات إلى مزوّدين أرخص.
|
||||
3. تمرير أي **انخفاضات في الأسعار** عبر البيع بالتكلفة؛ بحيث تكون الزيادة الوحيدة لتغطية رسوم المعالجة.
|
||||
4. **عدم فرض أي ارتباط حصري** عبر تمكينك من استخدامه مع أي عامل برمجة آخر. ومع إبقاء إمكانية استخدام أي مزوّد آخر مع OpenCode أيضا دائما.
|
||||
|
||||
@@ -7,31 +7,39 @@ import config from "../../../../config.mjs"
|
||||
export const console = config.console
|
||||
export const email = `mailto:${config.email}`
|
||||
|
||||
OpenCode Zen je lista testiranih i provjerenih modela koje obezbjeduje OpenCode tim.
|
||||
OpenCode Zen je lista testiranih i provjerenih modela koje pruža OpenCode tim.
|
||||
|
||||
:::note
|
||||
OpenCode Zen je trenutno u beta fazi.
|
||||
:::
|
||||
|
||||
Zen radi kao i svaki drugi provajder u OpenCode. Prijavite se u OpenCode Zen i uzmete API kljuc. Ovo je **potpuno opcionalno** i ne morate ga koristiti da biste koristili OpenCode.
|
||||
Zen radi kao i svaki drugi provajder u OpenCode. Prijavite se na OpenCode Zen i
|
||||
preuzmete svoj API ključ. To je **potpuno opcionalno** i ne morate ga koristiti
|
||||
da biste koristili OpenCode.
|
||||
|
||||
---
|
||||
|
||||
## Pozadina
|
||||
|
||||
Postoji veliki broj modela, ali samo mali dio radi dobro kao coding agent. Dodatno, vecina provajdera je drugacije konfigurisana, pa su performanse i kvalitet cesto neujednaceni.
|
||||
Postoji veliki broj modela, ali samo mali broj tih modela dobro funkcioniše kao
|
||||
coding agent. Osim toga, većina provajdera je konfigurisana veoma različito, pa
|
||||
zbog toga dobijate veoma različite performanse i kvalitet.
|
||||
|
||||
:::tip
|
||||
Testirali smo odabranu grupu modela i provajdera koji dobro rade s OpenCode.
|
||||
:::
|
||||
|
||||
Ako model koristite preko servisa poput OpenRouter-a, cesto ne mozete biti sigurni da dobijate najbolju verziju zeljenog modela.
|
||||
Zato, ako model koristite preko nečega poput OpenRouter, nikada ne možete biti
|
||||
sigurni da dobijate najbolju verziju modela koji želite.
|
||||
|
||||
Da to rijesimo, uradili smo nekoliko stvari:
|
||||
Da bismo to riješili, uradili smo nekoliko stvari:
|
||||
|
||||
1. Testirali smo odabrane modele i razgovarali sa njihovim timovima kako ih najbolje pokretati.
|
||||
2. Zatim smo saradjivali s nekoliko provajdera da potvrdimo da se modeli isporucuju ispravno.
|
||||
3. Na kraju smo benchmarkirali kombinacije model/provajder i sastavili listu koju preporucujemo.
|
||||
1. Testirali smo odabranu grupu modela i razgovarali s njihovim timovima o tome
|
||||
kako ih najbolje pokretati.
|
||||
2. Zatim smo radili s nekoliko provajdera kako bismo bili sigurni da se ti
|
||||
modeli isporučuju ispravno.
|
||||
3. Na kraju smo benchmarkirali kombinacije model/provajder i sastavili listu
|
||||
koju s punim povjerenjem preporučujemo.
|
||||
|
||||
OpenCode Zen je AI gateway koji vam daje pristup tim modelima.
|
||||
|
||||
@@ -41,61 +49,65 @@ OpenCode Zen je AI gateway koji vam daje pristup tim modelima.
|
||||
|
||||
OpenCode Zen radi kao i svaki drugi provajder u OpenCode.
|
||||
|
||||
1. Prijavite se na **<a href={console}>OpenCode Zen</a>**, dodajte billing podatke i kopirajte API kljuc.
|
||||
2. U TUI-ju pokrenite `/connect`, izaberite OpenCode Zen i zalijepite API kljuc.
|
||||
3. Pokrenite `/models` u TUI-ju da vidite listu preporucenih modela.
|
||||
1. Prijavite se na **<a href={console}>OpenCode Zen</a>**, dodajte podatke za
|
||||
naplatu i kopirajte svoj API ključ.
|
||||
2. Pokrenite komandu `/connect` u TUI, izaberite OpenCode Zen i zalijepite svoj API ključ.
|
||||
3. Pokrenite `/models` u TUI da vidite listu modela koje preporučujemo.
|
||||
|
||||
Naplata je po zahtjevu i mozete dodavati kredit na racun.
|
||||
Naplata se vrši po zahtjevu i možete dodavati kredit na svoj račun.
|
||||
|
||||
---
|
||||
|
||||
## Endpoints
|
||||
|
||||
Nasim modelima mozete pristupiti i preko sljedecih API endpointa.
|
||||
Našim modelima možete pristupiti i preko sljedećih API endpointa.
|
||||
|
||||
| Model | Model ID | Endpoint | AI SDK Package |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Model | Model ID | Endpoint | AI SDK Package |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
[model id](/docs/config/#models) u OpenCode konfiguraciji koristi format `opencode/<model-id>`. Na primjer, za GPT 5.2 Codex u konfiguraciji koristite `opencode/gpt-5.2-codex`.
|
||||
[model id](/docs/config/#models) u vašoj OpenCode konfiguraciji koristi format
|
||||
`opencode/<model-id>`. Na primjer, za GPT 5.3 Codex u konfiguraciji biste
|
||||
koristili `opencode/gpt-5.3-codex`.
|
||||
|
||||
---
|
||||
|
||||
### Modeli
|
||||
|
||||
Pun spisak dostupnih modela i metapodataka mozete preuzeti na:
|
||||
Pun spisak dostupnih modela i njihovih metapodataka možete preuzeti na:
|
||||
|
||||
```
|
||||
https://opencode.ai/zen/v1/models
|
||||
@@ -105,143 +117,163 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
## Cijene
|
||||
|
||||
Podrzavamo pay-as-you-go model. Ispod su cijene **po 1M tokena**.
|
||||
Podržavamo pay-as-you-go model. Ispod su cijene **po 1M tokena**.
|
||||
|
||||
| Model | Input | Output | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------ | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | - |
|
||||
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
|
||||
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.08 | - |
|
||||
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0.40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K tokens) | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37.50 | $1.00 | $12.50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.6 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
| Model | Input | Output | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------- | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
U historiji koristenja mozete primijetiti _Claude Haiku 3.5_. To je [low cost model](/docs/config/#models) koji se koristi za generisanje naslova sesija.
|
||||
Možda ćete primijetiti _Claude Haiku 3.5_ u historiji korištenja. To je [low cost model](/docs/config/#models) koji se koristi za generisanje naslova vaših sesija.
|
||||
|
||||
:::note
|
||||
Naknade kartica se prenose po stvarnom trosku (4.4% + $0.30 po transakciji) i ne naplacujemo nista preko toga.
|
||||
Naknade za kreditne kartice prosljeđujemo po stvarnom trošku (4.4% + $0.30 po transakciji); ne naplaćujemo ništa preko toga.
|
||||
:::
|
||||
|
||||
Besplatni modeli:
|
||||
|
||||
- MiniMax M2.5 Free je dostupan na OpenCode ograniceno vrijeme. Tim koristi taj period za prikupljanje povratnih informacija i poboljsanje modela.
|
||||
- Big Pickle je stealth model koji je besplatan na OpenCode ograniceno vrijeme. Tim koristi taj period za prikupljanje povratnih informacija i poboljsanje modela.
|
||||
- MiniMax M2.5 Free je dostupan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model.
|
||||
- MiMo V2 Pro Free je dostupan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model.
|
||||
- MiMo V2 Omni Free je dostupan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model.
|
||||
- Qwen3.6 Plus Free je dostupan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model.
|
||||
- Nemotron 3 Super Free je dostupan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model.
|
||||
- Big Pickle je stealth model koji je besplatan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model.
|
||||
|
||||
Ako imate pitanja, <a href={email}>kontaktirajte nas</a>.
|
||||
<a href={email}>Kontaktirajte nas</a> ako imate bilo kakvih pitanja.
|
||||
|
||||
---
|
||||
|
||||
### Automatska dopuna
|
||||
### Auto-reload
|
||||
|
||||
Ako vam stanje padne ispod $5, Zen ce automatski dopuniti $20.
|
||||
Ako vam stanje padne ispod $5, Zen će automatski dopuniti $20.
|
||||
|
||||
Iznos auto-reloada mozete promijeniti. Auto-reload mozete i potpuno iskljuciti.
|
||||
Možete promijeniti iznos auto-reload dopune. Auto-reload možete i potpuno
|
||||
isključiti.
|
||||
|
||||
---
|
||||
|
||||
### Mjesečni limiti
|
||||
|
||||
Mozete postaviti mjesecni limit potrosnje za cijeli workspace i za svakog clana tima.
|
||||
Možete postaviti mjesečni limit korištenja za cijeli workspace i za svakog člana
|
||||
vašeg tima.
|
||||
|
||||
Na primjer, ako postavite mjesecni limit na $20, Zen nece potrositi vise od $20 u mjesecu. Ali ako je auto-reload ukljucen, ukupna naplata moze preci $20 ako stanje padne ispod $5.
|
||||
Na primjer, recimo da postavite mjesečni limit korištenja na $20 — Zen neće
|
||||
potrošiti više od $20 u toku mjeseca. Ali ako imate uključen auto-reload, Zen
|
||||
vam ipak može naplatiti više od $20 ako vam stanje padne ispod $5.
|
||||
|
||||
---
|
||||
|
||||
### Zastarjeli modeli
|
||||
|
||||
| Model | Datum ukidanja |
|
||||
| ---------------- | -------------- |
|
||||
| Qwen3 Coder 480B | 6. feb. 2026. |
|
||||
| Kimi K2 Thinking | 6. mart 2026. |
|
||||
| Kimi K2 | 6. mart 2026. |
|
||||
| MiniMax M2.1 | 15. mart 2026. |
|
||||
| GLM 4.7 | 15. mart 2026. |
|
||||
| GLM 4.6 | 15. mart 2026. |
|
||||
| Model | Datum zastarijevanja |
|
||||
| ---------------- | -------------------- |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## Privatnost
|
||||
|
||||
Svi nasi modeli su hostovani u SAD-u. Provajderi prate zero-retention politiku i ne koriste vase podatke za treniranje modela, uz sljedece izuzetke:
|
||||
Svi naši modeli su hostovani u US. Naši provajderi prate zero-retention politiku
|
||||
i ne koriste vaše podatke za treniranje modela, uz sljedeće izuzetke:
|
||||
|
||||
- Big Pickle: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljsanje modela.
|
||||
- MiniMax M2.5 Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljsanje modela.
|
||||
- OpenAI API-ji: Zahtjevi se cuvaju 30 dana prema [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic API-ji: Zahtjevi se cuvaju 30 dana prema [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
- Big Pickle: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela.
|
||||
- MiniMax M2.5 Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela.
|
||||
- MiMo V2 Pro Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela.
|
||||
- MiMo V2 Omni Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela.
|
||||
- Qwen3.6 Plus Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela.
|
||||
- Nemotron 3 Super Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela.
|
||||
- OpenAI APIs: Requests are retained for 30 days in accordance with [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: Requests are retained for 30 days in accordance with [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
|
||||
---
|
||||
|
||||
## Za timove
|
||||
|
||||
Zen odlicno radi i za timove. Mozete pozvati clanove tima, dodijeliti uloge, birati modele koje tim koristi i jos mnogo toga.
|
||||
Zen odlično radi i za timove. Možete pozvati saigrače, dodijeliti uloge, birati
|
||||
modele koje vaš tim koristi i još mnogo toga.
|
||||
|
||||
:::note
|
||||
Workspaces su trenutno besplatni za timove kao dio beta faze.
|
||||
:::
|
||||
|
||||
Upravljanje workspace-om je trenutno besplatno za timove tokom beta faze. Vise detalja o cijenama podijelit cemo uskoro.
|
||||
Upravljanje vašim workspace-om je trenutno besplatno za timove kao dio beta
|
||||
faze. Uskoro ćemo podijeliti više detalja o cijenama.
|
||||
|
||||
---
|
||||
|
||||
### Uloge
|
||||
|
||||
Mozete pozvati clanove tima u workspace i dodijeliti uloge:
|
||||
Možete pozvati saigrače u svoj workspace i dodijeliti im uloge:
|
||||
|
||||
- **Admin**: upravlja modelima, clanovima, API kljucevima i billingom
|
||||
- **Member**: upravlja samo svojim API kljucevima
|
||||
- **Admin**: Upravlja modelima, članovima, API ključevima i naplatom
|
||||
- **Member**: Upravlja samo vlastitim API ključevima
|
||||
|
||||
Admini mogu postaviti i mjesecne limite potrosnje po clanu da drze troskove pod kontrolom.
|
||||
Admini mogu postaviti i mjesečne limite potrošnje za svakog člana kako bi držali
|
||||
troškove pod kontrolom.
|
||||
|
||||
---
|
||||
|
||||
### Pristup modelima
|
||||
|
||||
Admini mogu ukljuciti ili iskljuciti odredene modele za workspace. Zahtjevi prema iskljucenom modelu vracaju gresku.
|
||||
Admini mogu uključiti ili isključiti određene modele za workspace. Zahtjevi
|
||||
poslani prema isključenom modelu vratiće grešku.
|
||||
|
||||
Ovo je korisno kada zelite zabraniti model koji prikuplja podatke.
|
||||
Ovo je korisno u slučajevima kada želite onemogućiti korištenje modela koji
|
||||
prikuplja podatke.
|
||||
|
||||
---
|
||||
|
||||
### Donesite vlastiti ključ
|
||||
|
||||
Mozete koristiti vlastite OpenAI ili Anthropic API kljuceve i dalje koristiti ostale modele u Zen-u.
|
||||
Možete koristiti vlastite OpenAI ili Anthropic API ključeve dok i dalje imate
|
||||
pristup drugim modelima u Zen.
|
||||
|
||||
Kada koristite vlastite kljuceve, tokene direktno naplacuje provajder, ne Zen.
|
||||
Kada koristite vlastite ključeve, tokene direktno naplaćuje provajder, a ne Zen.
|
||||
|
||||
Na primjer, vasa organizacija mozda vec ima OpenAI ili Anthropic kljuc i zelite koristiti njega umjesto onog koji daje Zen.
|
||||
Na primjer, vaša organizacija možda već ima OpenAI ili Anthropic ključ i želite
|
||||
koristiti njega umjesto onog koji pruža Zen.
|
||||
|
||||
---
|
||||
|
||||
@@ -249,7 +281,7 @@ Na primjer, vasa organizacija mozda vec ima OpenAI ili Anthropic kljuc i zelite
|
||||
|
||||
OpenCode Zen smo napravili da:
|
||||
|
||||
1. **Benchmarkiramo** najbolje kombinacije model/provajder za coding agente.
|
||||
2. Omogucimo pristup opcijama **najviseg kvaliteta** bez degradacije performansi i preusmjeravanja na jeftinije provajdere.
|
||||
3. Prenesemo svaka **snizenja cijena** prodajom po trosku, tako da je jedini markup pokrice processing naknada.
|
||||
4. Obezbijedimo **bez lock-ina** tako da Zen mozete koristiti sa bilo kojim coding agentom, uz slobodu koristenja drugih provajdera u OpenCode.
|
||||
1. **Benchmarkiramo** najbolje modele/provajdere za coding agente.
|
||||
2. Imamo pristup opcijama **najvišeg kvaliteta** bez snižavanja performansi ili preusmjeravanja na jeftinije provajdere.
|
||||
3. Prenesemo sva **sniženja cijena** prodajom po stvarnom trošku; tako da je jedini markup pokrivanje naših processing naknada.
|
||||
4. Obezbijedimo **bez lock-ina** time što vam omogućavamo da ga koristite s bilo kojim drugim coding agentom. I da vam uvijek omogućimo da koristite bilo koji drugi provajder i u OpenCode.
|
||||
|
||||
@@ -13,33 +13,33 @@ OpenCode Zen er en liste over testede og verificerede modeller leveret af OpenCo
|
||||
OpenCode Zen er i øjeblikket i beta.
|
||||
:::
|
||||
|
||||
Zen fungerer som alle andre udbydere i OpenCode. Du logger på OpenCode Zen og får
|
||||
din API-nøgle. Den er **helt valgfri** og du behøver ikke bruge den for at bruge
|
||||
Zen fungerer som enhver anden udbyder i OpenCode. Du logger ind på OpenCode Zen og får
|
||||
din API-nøgle. Det er **helt valgfrit**, og du behøver ikke bruge det for at bruge
|
||||
OpenCode.
|
||||
|
||||
---
|
||||
|
||||
## Baggrund
|
||||
|
||||
Der er et stort antal modeller derude, men kun få af dem
|
||||
Der findes et stort antal modeller, men kun få af dem
|
||||
fungerer godt som kodeagenter. Derudover er de fleste udbydere
|
||||
konfigureret meget forskelligt, så du får meget forskellig ydeevne og kvalitet.
|
||||
|
||||
:::tip
|
||||
Vi testede en udvalgt gruppe modeller og udbydere, der fungerer godt med OpenCode.
|
||||
Vi testede en udvalgt gruppe modeller og udbydere, som fungerer godt med OpenCode.
|
||||
:::
|
||||
|
||||
Så hvis du bruger en model gennem noget som OpenRouter, kan du aldrig være
|
||||
sikker på, om du får den bedste version af modellen, du ønsker.
|
||||
sikker på, om du får den bedste version af den model, du vil have.
|
||||
|
||||
For at fikse dette gjorde vi et par ting:
|
||||
For at løse det gjorde vi et par ting:
|
||||
|
||||
1. Vi testede en udvalgt gruppe modeller og talte med deres teams om, hvordan
|
||||
man bedst kører dem.
|
||||
2. Vi samarbejdede derefter med nogle få udbydere for at sikre, at disse blev serveret
|
||||
de bedst køres.
|
||||
2. Derefter arbejdede vi sammen med nogle få udbydere for at sikre, at de blev leveret
|
||||
korrekt.
|
||||
3. Til sidst benchmarkede vi kombinationen af model/udbyder og kom frem
|
||||
til en liste, som vi har lyst til at anbefale.
|
||||
3. Til sidst benchmarkede vi kombinationen af model/udbyder og lavede
|
||||
en liste, som vi trygt kan anbefale.
|
||||
|
||||
OpenCode Zen er en AI gateway, der giver dig adgang til disse modeller.
|
||||
|
||||
@@ -47,14 +47,14 @@ OpenCode Zen er en AI gateway, der giver dig adgang til disse modeller.
|
||||
|
||||
## Sådan fungerer det
|
||||
|
||||
OpenCode Zen fungerer som alle andre udbydere i OpenCode.
|
||||
OpenCode Zen fungerer som enhver anden udbyder i OpenCode.
|
||||
|
||||
1. Du logger på **<a href={console}>OpenCode Zen</a>**, tilføjer faktureringsoplysninger
|
||||
og kopierer API-nøglen.
|
||||
2. Du kører kommandoen `/connect` i TUI, vælger OpenCode Zen og indsætter API-nøglen.
|
||||
1. Du logger ind på **<a href={console}>OpenCode Zen</a>**, tilføjer dine faktureringsoplysninger
|
||||
og kopierer din API-nøgle.
|
||||
2. Du kører kommandoen `/connect` i TUI, vælger OpenCode Zen og indsætter din API-nøgle.
|
||||
3. Kør `/models` i TUI for at se listen over modeller, vi anbefaler.
|
||||
|
||||
Du opkræves per anmodning, og du kan tilføje kredit til din konto.
|
||||
Du bliver opkrævet pr. anmodning, og du kan tilføje kredit til din konto.
|
||||
|
||||
---
|
||||
|
||||
@@ -62,51 +62,52 @@ Du opkræves per anmodning, og du kan tilføje kredit til din konto.
|
||||
|
||||
Du kan også få adgang til vores modeller gennem følgende API-endpoints.
|
||||
|
||||
| Model | Model ID | Endpoint | AI SDK Pakke |
|
||||
| ------------------- | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Gratis | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Tenker | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3-koder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Stor sylteagurk | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Model | Model ID | Endpoint | AI SDK-pakke |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
[model-id](/docs/config/#models) i OpenCode-konfigurationen
|
||||
bruger formatet `opencode/<model-id>`. For eksempel, for GPT 5.2 Codex, ville du
|
||||
bruge `opencode/gpt-5.2-codex` i din konfiguration.
|
||||
[model id](/docs/config/#models) i din OpenCode-konfiguration
|
||||
bruger formatet `opencode/<model-id>`. For eksempel ville du for GPT 5.3 Codex
|
||||
bruge `opencode/gpt-5.3-codex` i din konfiguration.
|
||||
|
||||
---
|
||||
|
||||
### Modeller
|
||||
|
||||
Du kan hente hele listen over tilgængelige modeller og deres metadata fra:
|
||||
Du kan hente den fulde liste over tilgængelige modeller og deres metadata fra:
|
||||
|
||||
```
|
||||
https://opencode.ai/zen/v1/models
|
||||
@@ -116,81 +117,84 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
## Priser
|
||||
|
||||
Vi støtter en pay-as-you-go-model. Nedenfor er priserne **per 1 million tokens**.
|
||||
Vi understøtter en pay-as-you-go-model. Nedenfor er priserne **pr. 1M tokens**.
|
||||
|
||||
| Model | Input | Output | Cached Læs | Cached Skriv |
|
||||
| --------------------------------- | ------ | ------ | ---------- | ------------ |
|
||||
| Stor sylteagurk | Gratis | Gratis | Gratis | - |
|
||||
| MiniMax M2.5 Gratis | Gratis | Gratis | Gratis | - |
|
||||
| MiniMax M2.5 | $0,30 | $1,20 | $0,06 | - |
|
||||
| MiniMax M2.1 | $0,30 | $1,20 | $0,10 | - |
|
||||
| GLM 5 | $1,00 | $3,20 | $0,20 | - |
|
||||
| GLM 4.7 | $0,60 | $2,20 | $0,10 | - |
|
||||
| GLM 4.6 | $0,60 | $2,20 | $0,10 | - |
|
||||
| Kimi K2.5 | $0,60 | $3,00 | $0,08 | - |
|
||||
| Kimi K2 Tenker | $0,40 | $2,50 | - | - |
|
||||
| Kimi K2 | $0,40 | $2,50 | - | - |
|
||||
| Qwen3-koder 480B | $0,45 | $1,50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K tokens) | $5,00 | $25,00 | $0,50 | $6,25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10,00 | $37,50 | $1,00 | $12,50 |
|
||||
| Claude Opus 4.5 | $5,00 | $25,00 | $0,50 | $6,25 |
|
||||
| Claude Opus 4.1 | $15,00 | $75,00 | $1,50 | $18,75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K tokens) | $3,00 | $15,00 | $0,30 | $3,75 |
|
||||
| Claude Sonnet 4.6 (> 200K tokens) | $6,00 | $22,50 | $0,60 | $7,50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3,00 | $15,00 | $0,30 | $3,75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6,00 | $22,50 | $0,60 | $7,50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3,00 | $15,00 | $0,30 | $3,75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6,00 | $22,50 | $0,60 | $7,50 |
|
||||
| Claude Haiku 4.5 | $1,00 | $5,00 | $0,10 | $1,25 |
|
||||
| Claude Haiku 3.5 | $0,80 | $4,00 | $0,08 | $1,00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2,00 | $12,00 | $0,20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4,00 | $18,00 | $0,40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2,00 | $12,00 | $0,20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4,00 | $18,00 | $0,40 | - |
|
||||
| Gemini 3 Flash | $0,50 | $3,00 | $0,05 | - |
|
||||
| GPT 5.4 | $2,50 | $15,00 | $0,25 | - |
|
||||
| GPT 5.3 Codex | $1,75 | $14,00 | $0,175 | - |
|
||||
| GPT 5.2 | $1,75 | $14,00 | $0,175 | - |
|
||||
| GPT 5.2 Codex | $1,75 | $14,00 | $0,175 | - |
|
||||
| GPT 5.1 | $1,07 | $8,50 | $0,107 | - |
|
||||
| GPT 5.1 Codex | $1,07 | $8,50 | $0,107 | - |
|
||||
| GPT 5.1 Codex Max | $1,25 | $10,00 | $0,125 | - |
|
||||
| GPT 5.1 Codex Mini | $0,25 | $2,00 | $0,025 | - |
|
||||
| GPT 5 | $1,07 | $8,50 | $0,107 | - |
|
||||
| GPT 5 Codex | $1,07 | $8,50 | $0,107 | - |
|
||||
| GPT 5 Nano | Gratis | Gratis | Gratis | - |
|
||||
| Model | Input | Output | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------- | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
Du bemærker måske _Claude Haiku 3.5_ i din brugshistorik. Dette er en [lavprismodel](/docs/config/#models), som bruges til at generere titlerne på dine sessioner.
|
||||
Du vil måske bemærke _Claude Haiku 3.5_ i din brugshistorik. Det er en [lavprismodel](/docs/config/#models), som bruges til at generere titlerne på dine sessioner.
|
||||
|
||||
:::note
|
||||
Kreditkortgebyrer overføres til kostpris (4,4 % + $0,30 per transaktion); vi opkræver ikke noget udover det.
|
||||
Kreditkortgebyrer videregives til kostpris (4.4% + $0.30 pr. transaktion); vi opkræver ikke noget ud over det.
|
||||
:::
|
||||
|
||||
De gratis modeller:
|
||||
|
||||
- MiniMax M2.5 Gratis er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at samle feedback og forbedre modellen.
|
||||
- Stor sylteagurk er en stealth-model som er gratis på OpenCode i en begrænset periode. Teamet bruger denne tid til at samle feedback og forbedre modellen.
|
||||
- MiniMax M2.5 Free er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen.
|
||||
- MiMo V2 Pro Free er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen.
|
||||
- MiMo V2 Omni Free er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen.
|
||||
- Qwen3.6 Plus Free er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen.
|
||||
- Nemotron 3 Super Free er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen.
|
||||
- Big Pickle er en stealth-model, som er gratis på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen.
|
||||
|
||||
<a href={email}>Kontakt os</a> hvis du har spørgsmål.
|
||||
<a href={email}>Kontakt os</a>, hvis du har spørgsmål.
|
||||
|
||||
---
|
||||
|
||||
### Automatisk opfyldning
|
||||
### Automatisk genopfyldning
|
||||
|
||||
Hvis din saldo går under $5, vil Zen automatisk laste $20 ind på nytt.
|
||||
Hvis din saldo kommer under $5, genopfylder Zen automatisk med $20.
|
||||
|
||||
Du kan ændre beløbet for automatisk påfyldning. Du kan også deaktivere automatisk genindlæsning helt.
|
||||
Du kan ændre beløbet for automatisk genopfyldning. Du kan også deaktivere automatisk genopfyldning helt.
|
||||
|
||||
---
|
||||
|
||||
### Månedlige grænser
|
||||
|
||||
Du kan også angive en månedlig brugsgrænse for hele arbejdsområdet og for hvert
|
||||
Du kan også sætte en månedlig forbrugsgrænse for hele arbejdsområdet og for hvert
|
||||
medlem af dit team.
|
||||
|
||||
Lad os for eksempel sige, at du sætter en månedlig brugsgrænse til $20, Zen vil ikke bruge
|
||||
mere end $20 på en måned. Men hvis du har automatisk genindlæsning aktiveret, kan Zen ende med
|
||||
at opkræve dig mere end $20, hvis din saldo går under $5.
|
||||
Lad os for eksempel sige, at du sætter en månedlig forbrugsgrænse til $20. Zen vil ikke bruge
|
||||
mere end $20 på en måned. Men hvis du har automatisk genopfyldning aktiveret, kan Zen ende med
|
||||
at opkræve dig mere end $20, hvis din saldo kommer under $5.
|
||||
|
||||
---
|
||||
|
||||
@@ -198,43 +202,48 @@ at opkræve dig mere end $20, hvis din saldo går under $5.
|
||||
|
||||
| Model | Udfasningsdato |
|
||||
| ---------------- | -------------- |
|
||||
| Qwen3-koder 480B | 6. feb. 2026 |
|
||||
| Kimi K2 Tenker | 6. marts 2026 |
|
||||
| Kimi K2 | 6. marts 2026 |
|
||||
| MiniMax M2.1 | 15. marts 2026 |
|
||||
| GLM 4.7 | 15. marts 2026 |
|
||||
| GLM 4.6 | 15. marts 2026 |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## Privatliv
|
||||
|
||||
Alle vores modeller er hostet i USA. Vores udbydere følger en nul-opbevaringspolitik og bruger ikke dine data til modeltræning, med følgende undtagelser:
|
||||
Alle vores modeller hostes i US. Vores udbydere følger en nul-opbevaringspolitik og bruger ikke dine data til modeltræning, med følgende undtagelser:
|
||||
|
||||
- Stor sylteagurk: I løbet af gratisperioden kan indsamlede data bruges til at forbedre modellen.
|
||||
- MiniMax M2.5 Gratis: I løbet af gratisperioden kan indsamlede data bruges til at forbedre modellen.
|
||||
- OpenAI API'er: Anmodninger opbevares i 30 dage i overensstemmelse med [OpenAIs datapolitikker](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic API'er: Anmodninger opbevares i 30 dage i overensstemmelse med [Anthropics datapolitikker](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
- Big Pickle: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen.
|
||||
- MiniMax M2.5 Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen.
|
||||
- MiMo V2 Pro Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen.
|
||||
- MiMo V2 Omni Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen.
|
||||
- Qwen3.6 Plus Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen.
|
||||
- Nemotron 3 Super Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen.
|
||||
- OpenAI APIs: Anmodninger opbevares i 30 dage i overensstemmelse med [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: Anmodninger opbevares i 30 dage i overensstemmelse med [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
|
||||
---
|
||||
|
||||
## For teams
|
||||
|
||||
Zen fungerer også fremragende for teams. Du kan invitere holdkammerater, tildele roller, kuratere
|
||||
de modeller dit hold bruger, og mere.
|
||||
Zen fungerer også rigtig godt for teams. Du kan invitere teammedlemmer, tildele roller, kuratere
|
||||
de modeller dit team bruger, og mere.
|
||||
|
||||
:::note
|
||||
Arbejdsområder er i øjeblikket gratis for teams som en del af betaversionen.
|
||||
Arbejdsområder er i øjeblikket gratis for teams som en del af betaen.
|
||||
:::
|
||||
|
||||
At administrere dit arbejdsområde er i øjeblikket gratis for teams som en del af betaversionen. Vi vil
|
||||
dele mere information om priserne snart.
|
||||
Det er i øjeblikket gratis for teams at administrere deres arbejdsområde som en del af betaen. Vi deler
|
||||
flere detaljer om priserne snart.
|
||||
|
||||
---
|
||||
|
||||
### Roller
|
||||
|
||||
Du kan invitere holdkammerater til dit arbejdsområde og tildele roller:
|
||||
Du kan invitere teammedlemmer til dit arbejdsområde og tildele roller:
|
||||
|
||||
- **Admin**: Administrer modeller, medlemmer, API-nøgler og fakturering
|
||||
- **Medlem**: Administrer kun sine egne API-nøgler
|
||||
@@ -245,29 +254,29 @@ Administratorer kan også sætte månedlige forbrugsgrænser for hvert medlem fo
|
||||
|
||||
### Modeladgang
|
||||
|
||||
Administratorer kan aktivere eller deaktivere specifikke modeller for arbejdsområdet. Anmodninger til en deaktiveret model vil returnere en fejl.
|
||||
Administratorer kan aktivere eller deaktivere bestemte modeller for arbejdsområdet. Anmodninger til en deaktiveret model returnerer en fejl.
|
||||
|
||||
Dette er nyttigt for tilfælde, hvor du ønsker at deaktivere brugen af en model, som
|
||||
samler ind data.
|
||||
Dette er nyttigt i tilfælde, hvor du vil deaktivere brugen af en model, der
|
||||
indsamler data.
|
||||
|
||||
---
|
||||
|
||||
### Medbring din egen nøgle
|
||||
### Brug din egen nøgle
|
||||
|
||||
Du kan bruge dine egne OpenAI- eller Anthropic API-nøgler, mens du stadig har adgang til andre modeller i Zen.
|
||||
|
||||
Når du bruger dine egne nøgler, faktureres tokens direkte af udbyderen, ikke af Zen.
|
||||
Når du bruger dine egne nøgler, bliver tokens faktureret direkte af udbyderen, ikke af Zen.
|
||||
|
||||
For eksempel kan din organisation allerede have en nøgle for OpenAI eller Anthropic
|
||||
og du vil bruge det i stedet for det Zen giver.
|
||||
For eksempel kan din organisation allerede have en nøgle til OpenAI eller Anthropic,
|
||||
og du vil bruge den i stedet for den, som Zen stiller til rådighed.
|
||||
|
||||
---
|
||||
|
||||
## Mål
|
||||
|
||||
Vi oprettede OpenCode Zen for at:
|
||||
Vi skabte OpenCode Zen for at:
|
||||
|
||||
1. **Benchmarke** de bedste modeller/udbydere for kodningsagenter.
|
||||
2. Have adgang til mulighederne for **højeste kvalitet** og ikke nedgradere ydeevnen eller rute til billigere udbydere.
|
||||
3. Give videre eventuelle **prisfald** ved at sælge til kostpris; så den eneste mark-up er for at dække vores behandlingsgebyrer.
|
||||
4. Have **ingen fastlåsning** ved at lade dig bruge den med en hvilken som helst anden kodeagent. Og lade dig altid bruge en hvilken som helst anden udbyder med OpenCode også.
|
||||
1. **Benchmarke** de bedste modeller/udbydere til kodeagenter.
|
||||
2. Give adgang til muligheder af **højeste kvalitet** uden at nedgradere ydeevnen eller route til billigere udbydere.
|
||||
3. Videreføre eventuelle **prisfald** ved at sælge til kostpris; så den eneste markup er for at dække vores behandlingsgebyrer.
|
||||
4. Have **ingen lock-in** ved at lade dig bruge det med enhver anden kodeagent. Og altid lade dig bruge enhver anden udbyder med OpenCode også.
|
||||
|
||||
@@ -7,98 +7,96 @@ import config from "../../../../config.mjs"
|
||||
export const console = config.console
|
||||
export const email = `mailto:${config.email}`
|
||||
|
||||
OpenCode Zen ist eine vom OpenCode-Team getestete und verifizierte Modellliste.
|
||||
OpenCode Zen ist eine Liste von getesteten und verifizierten Modellen, die vom OpenCode-Team bereitgestellt wird.
|
||||
|
||||
:::note
|
||||
OpenCode Zen befindet sich aktuell in der Beta.
|
||||
OpenCode Zen befindet sich derzeit in der Beta.
|
||||
:::
|
||||
|
||||
Zen funktioniert wie jeder andere Provider in OpenCode.
|
||||
Du meldest dich bei OpenCode Zen an, holst dir deinen API-Key und nutzt ihn optional.
|
||||
Zen funktioniert wie jeder andere Provider in OpenCode. Du meldest dich bei OpenCode Zen an und erhältst deinen API-Key. Es ist **vollständig optional** und du musst es nicht verwenden, um OpenCode zu nutzen.
|
||||
|
||||
---
|
||||
|
||||
## Hintergrund
|
||||
|
||||
Es gibt sehr viele Modelle, aber nur ein Teil davon eignet sich wirklich gut als Coding-Agent.
|
||||
Ausserdem konfigurieren Provider Modelle sehr unterschiedlich, was die Qualitaet stark beeinflusst.
|
||||
Es gibt eine große Anzahl an Modellen, aber nur wenige davon eignen sich gut als Coding-Agenten. Außerdem sind die meisten Provider sehr unterschiedlich konfiguriert, sodass du sehr unterschiedliche Leistung und Qualität erhältst.
|
||||
|
||||
:::tip
|
||||
Wir haben eine Auswahl aus Modellen und Providern getestet, die gut mit OpenCode funktionieren.
|
||||
Wir haben eine ausgewählte Gruppe von Modellen und Providern getestet, die gut mit OpenCode funktionieren.
|
||||
:::
|
||||
|
||||
Wenn du Modelle ueber Gateways wie OpenRouter nutzt, ist oft unklar, ob du die beste Ausfuehrung eines Modells bekommst.
|
||||
Wenn du also ein Modell über etwas wie OpenRouter nutzt, kannst du nie sicher sein, ob du die beste Version des gewünschten Modells bekommst.
|
||||
|
||||
Um das zu verbessern, haben wir:
|
||||
Um das zu beheben, haben wir ein paar Dinge getan:
|
||||
|
||||
1. Eine Auswahl an Modellen getestet und mit den Teams ueber optimale Laufzeit-Setups gesprochen
|
||||
2. Mit Providern zusammengearbeitet, damit diese Modelle korrekt ausgeliefert werden
|
||||
3. Modell/Provider-Kombinationen gebenchmarkt und eine empfehlenswerte Liste erstellt
|
||||
1. Wir haben eine ausgewählte Gruppe von Modellen getestet und mit ihren Teams darüber gesprochen, wie man sie am besten betreibt.
|
||||
2. Anschließend haben wir mit einigen Providern zusammengearbeitet, um sicherzustellen, dass diese korrekt ausgeliefert werden.
|
||||
3. Schließlich haben wir die Kombination aus Modell und Provider benchmarked und eine Liste erstellt, die wir guten Gewissens empfehlen können.
|
||||
|
||||
OpenCode Zen ist ein AI-Gateway, das dir Zugriff auf genau diese Modelle gibt.
|
||||
OpenCode Zen ist ein AI-Gateway, das dir Zugriff auf diese Modelle gibt.
|
||||
|
||||
---
|
||||
|
||||
## Funktionsweise
|
||||
## So funktioniert es
|
||||
|
||||
OpenCode Zen funktioniert wie jeder andere Provider in OpenCode.
|
||||
|
||||
1. Melde dich bei **<a href={console}>OpenCode Zen</a>** an, hinterlege Zahlungsdaten und kopiere deinen API-Key.
|
||||
2. Fuehre in der TUI `/connect` aus, waehle OpenCode Zen und fuege den API-Key ein.
|
||||
3. Starte `/models` in der TUI, um empfohlene Modelle zu sehen.
|
||||
1. Du meldest dich bei **<a href={console}>OpenCode Zen</a>** an, hinterlegst deine Zahlungsdaten und kopierst deinen API-Key.
|
||||
2. Du führst den Befehl `/connect` in der TUI aus, wählst OpenCode Zen aus und fügst deinen API-Key ein.
|
||||
3. Führe `/models` in der TUI aus, um die Liste der Modelle zu sehen, die wir empfehlen.
|
||||
|
||||
Abgerechnet wird pro Anfrage, Guthaben kannst du jederzeit aufladen.
|
||||
Dir wird pro Anfrage berechnet, und du kannst deinem Konto Guthaben hinzufügen.
|
||||
|
||||
---
|
||||
|
||||
## Endpunkte
|
||||
|
||||
Du kannst unsere Modelle auch ueber die folgenden API-Endpunkte aufrufen.
|
||||
Du kannst auch über die folgenden API-Endpunkte auf unsere Modelle zugreifen.
|
||||
|
||||
| Model | Model ID | Endpoint | AI SDK Package |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Model | Model ID | Endpoint | AI SDK Package |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
Die [Model-ID](/docs/config/#models) in deiner OpenCode-Konfiguration hat das Format `opencode/<model-id>`.
|
||||
Fuer GPT 5.2 Codex verwendest du zum Beispiel `opencode/gpt-5.2-codex`.
|
||||
Die [Model-ID](/docs/config/#models) in deiner OpenCode-Konfiguration verwendet das Format `opencode/<model-id>`. Für GPT 5.3 Codex würdest du zum Beispiel `opencode/gpt-5.3-codex` in deiner Konfiguration verwenden.
|
||||
|
||||
---
|
||||
|
||||
### Models
|
||||
|
||||
Die komplette Liste verfuegbarer Modelle inklusive Metadaten findest du unter:
|
||||
Du kannst die vollständige Liste der verfügbaren Modelle und ihrer Metadaten hier abrufen:
|
||||
|
||||
```
|
||||
https://opencode.ai/zen/v1/models
|
||||
@@ -106,160 +104,161 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
---
|
||||
|
||||
## Preisgestaltung
|
||||
## Preise
|
||||
|
||||
Wir nutzen ein Pay-as-you-go-Modell.
|
||||
Unten siehst du die Preise **pro 1 Mio. Tokens**.
|
||||
Wir unterstützen ein Pay-as-you-go-Modell. Unten findest du die Preise **pro 1M Tokens**.
|
||||
|
||||
| Model | Input | Output | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------ | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
|
||||
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0.40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K tokens) | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37.50 | $1.00 | $12.50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.6 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
| Model | Input | Output | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------- | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
In deinem Verlauf siehst du eventuell _Claude Haiku 3.5_.
|
||||
Das ist ein [guenstiges Modell](/docs/config/#models), das fuer Session-Titel verwendet wird.
|
||||
Möglicherweise siehst du _Claude Haiku 3.5_ in deinem Nutzungsverlauf. Das ist ein [kostengünstiges Modell](/docs/config/#models), das verwendet wird, um die Titel deiner Sessions zu generieren.
|
||||
|
||||
:::note
|
||||
Kreditkartengebuehren geben wir zum Selbstkostenpreis weiter (4,4 % + $0.30 pro Transaktion), ohne Aufschlag.
|
||||
Kreditkartengebühren werden zum Selbstkostenpreis weitergegeben (4.4% + $0.30 pro Transaktion); wir berechnen darüber hinaus nichts.
|
||||
:::
|
||||
|
||||
Die kostenlosen Modelle:
|
||||
|
||||
- MiniMax M2.5 Free ist fuer begrenzte Zeit auf OpenCode verfuegbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern.
|
||||
- Big Pickle ist ein Stealth-Modell, das fuer begrenzte Zeit kostenlos auf OpenCode verfuegbar ist. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern.
|
||||
- MiniMax M2.5 Free ist für begrenzte Zeit auf OpenCode verfügbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern.
|
||||
- MiMo V2 Pro Free ist für begrenzte Zeit auf OpenCode verfügbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern.
|
||||
- MiMo V2 Omni Free ist für begrenzte Zeit auf OpenCode verfügbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern.
|
||||
- Qwen3.6 Plus Free ist für begrenzte Zeit auf OpenCode verfügbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern.
|
||||
- Nemotron 3 Super Free ist für begrenzte Zeit auf OpenCode verfügbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern.
|
||||
- Big Pickle ist ein Stealth-Modell, das für begrenzte Zeit kostenlos auf OpenCode verfügbar ist. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern.
|
||||
|
||||
Wenn du Fragen hast, <a href={email}>kontaktiere uns</a>.
|
||||
<a href={email}>Kontaktiere uns</a>, wenn du Fragen hast.
|
||||
|
||||
---
|
||||
|
||||
### Auto-reload
|
||||
|
||||
Wenn dein Guthaben unter $5 faellt, laedt Zen automatisch $20 nach.
|
||||
Wenn dein Guthaben unter $5 fällt, lädt Zen automatisch $20 nach.
|
||||
|
||||
Du kannst den Betrag anpassen oder Auto-Reload komplett deaktivieren.
|
||||
Du kannst den Auto-reload-Betrag ändern. Du kannst Auto-reload auch vollständig deaktivieren.
|
||||
|
||||
---
|
||||
|
||||
### Monatslimits
|
||||
### Monatliche Limits
|
||||
|
||||
Du kannst monatliche Limits fuer den gesamten Workspace und pro Teammitglied festlegen.
|
||||
Du kannst auch ein monatliches Nutzungslimit für den gesamten Workspace und für jedes Mitglied deines Teams festlegen.
|
||||
|
||||
Wenn du z. B. ein Monatslimit von $20 setzt, verbraucht Zen nicht mehr als $20 pro Monat.
|
||||
Mit aktiviertem Auto-Reload kann die Abrechnung dennoch darueber liegen, falls das Guthaben unter $5 sinkt.
|
||||
Angenommen, du setzt ein monatliches Nutzungslimit von $20, dann wird Zen in einem Monat nicht mehr als $20 verwenden. Wenn du jedoch Auto-reload aktiviert hast, kann es sein, dass Zen dir mehr als $20 berechnet, wenn dein Guthaben unter $5 fällt.
|
||||
|
||||
---
|
||||
|
||||
### Veraltete Modelle
|
||||
|
||||
| Model | Datum der Abschaltung |
|
||||
| ---------------- | --------------------- |
|
||||
| Qwen3 Coder 480B | 6. Feb. 2026 |
|
||||
| Kimi K2 Thinking | 6. Maerz 2026 |
|
||||
| Kimi K2 | 6. Maerz 2026 |
|
||||
| MiniMax M2.1 | 15. Maerz 2026 |
|
||||
| GLM 4.7 | 15. Maerz 2026 |
|
||||
| GLM 4.6 | 15. Maerz 2026 |
|
||||
| Model | Deprecation date |
|
||||
| ---------------- | ---------------- |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## Datenschutz
|
||||
|
||||
Alle Modelle werden in den USA gehostet.
|
||||
Unsere Provider arbeiten grundsaetzlich mit Zero-Retention und nutzen deine Daten nicht zum Training, mit folgenden Ausnahmen:
|
||||
Alle unsere Modelle werden in den USA gehostet. Unsere Provider folgen einer Zero-Retention-Richtlinie und verwenden deine Daten nicht zum Trainieren von Modellen, mit den folgenden Ausnahmen:
|
||||
|
||||
- Big Pickle: Waehrend der kostenlosen Phase koennen gesammelte Daten zur Verbesserung des Modells genutzt werden.
|
||||
- MiniMax M2.5 Free: Waehrend der kostenlosen Phase koennen gesammelte Daten zur Verbesserung des Modells genutzt werden.
|
||||
- OpenAI APIs: Anfragen werden fuer 30 Tage gemaess [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) aufbewahrt.
|
||||
- Anthropic APIs: Anfragen werden fuer 30 Tage gemaess [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) aufbewahrt.
|
||||
- Big Pickle: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden.
|
||||
- MiniMax M2.5 Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden.
|
||||
- MiMo V2 Pro Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden.
|
||||
- MiMo V2 Omni Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden.
|
||||
- Qwen3.6 Plus Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden.
|
||||
- Nemotron 3 Super Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden.
|
||||
- OpenAI APIs: Anfragen werden in Übereinstimmung mit [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) 30 Tage lang gespeichert.
|
||||
- Anthropic APIs: Anfragen werden in Übereinstimmung mit [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) 30 Tage lang gespeichert.
|
||||
|
||||
---
|
||||
|
||||
## Für Teams
|
||||
|
||||
Zen eignet sich auch gut fuer Teams.
|
||||
Du kannst Mitglieder einladen, Rollen vergeben und den Modellzugriff fuer dein Team steuern.
|
||||
Zen funktioniert auch hervorragend für Teams. Du kannst Teammitglieder einladen, Rollen zuweisen, die Modelle kuratieren, die dein Team verwendet, und mehr.
|
||||
|
||||
:::note
|
||||
Workspaces sind fuer Teams waehrend der Beta derzeit kostenlos.
|
||||
Workspaces sind für Teams derzeit als Teil der Beta kostenlos.
|
||||
:::
|
||||
|
||||
Workspace-Verwaltung ist in der Beta kostenlos.
|
||||
Details zur spaeteren Preisgestaltung folgen.
|
||||
Die Verwaltung deines Workspace ist für Teams derzeit als Teil der Beta kostenlos. Wir werden bald mehr Details zur Preisgestaltung teilen.
|
||||
|
||||
---
|
||||
|
||||
### Rollen
|
||||
|
||||
Du kannst Teammitglieder einladen und Rollen vergeben:
|
||||
Du kannst Teammitglieder in deinen Workspace einladen und Rollen zuweisen:
|
||||
|
||||
- **Admin**: Verwalten Modelle, Mitglieder, API-Keys und Abrechnung
|
||||
- **Member**: Verwalten nur eigene API-Keys
|
||||
- **Admin**: Verwaltet Modelle, Mitglieder, API-Keys und Abrechnung
|
||||
- **Member**: Verwaltet nur die eigenen API-Keys
|
||||
|
||||
Admins koennen zusaetzlich monatliche Ausgabenlimits pro Mitglied setzen.
|
||||
Admins können außerdem monatliche Ausgabenlimits für jedes Mitglied festlegen, um die Kosten unter Kontrolle zu halten.
|
||||
|
||||
---
|
||||
|
||||
### Modellzugriff
|
||||
|
||||
Admins koennen einzelne Modelle fuer den Workspace aktivieren oder deaktivieren.
|
||||
Anfragen an deaktivierte Modelle liefern einen Fehler.
|
||||
Admins können bestimmte Modelle für den Workspace aktivieren oder deaktivieren. Anfragen an ein deaktiviertes Modell geben einen Fehler zurück.
|
||||
|
||||
Das ist hilfreich, wenn bestimmte datenverarbeitende Modelle ausgeschlossen werden sollen.
|
||||
Das ist nützlich, wenn du die Verwendung eines Modells deaktivieren möchtest, das Daten sammelt.
|
||||
|
||||
---
|
||||
|
||||
### Eigenen Key mitbringen
|
||||
|
||||
Du kannst eigene OpenAI- oder Anthropic-API-Keys verwenden und trotzdem andere Zen-Modelle nutzen.
|
||||
Du kannst deine eigenen OpenAI- oder Anthropic-API-Keys verwenden und trotzdem auf andere Modelle in Zen zugreifen.
|
||||
|
||||
Bei eigenen Keys erfolgt die Token-Abrechnung direkt ueber den Provider, nicht ueber Zen.
|
||||
Wenn du deine eigenen Keys verwendest, werden Tokens direkt vom Provider abgerechnet, nicht von Zen.
|
||||
|
||||
Zum Beispiel hat deine Organisation vielleicht bereits einen Key fuer OpenAI oder Anthropic und du moechtest diesen anstelle des von Zen bereitgestellten nutzen.
|
||||
Zum Beispiel hat deine Organisation möglicherweise bereits einen Key für OpenAI oder Anthropic und du möchtest diesen anstelle des von Zen bereitgestellten verwenden.
|
||||
|
||||
---
|
||||
|
||||
## Ziele
|
||||
|
||||
Wir haben OpenCode Zen entwickelt, um:
|
||||
Wir haben OpenCode Zen geschaffen, um:
|
||||
|
||||
1. Die besten Modell/Provider-Kombinationen fuer Coding-Agenten zu **benchmarken**
|
||||
2. Stets **hohe Qualitaet** ohne Downgrades oder versteckte Umleitungen auf billigere Provider zu liefern
|
||||
3. **Preissenkungen** zum Selbstkostenpreis weiterzugeben, mit Aufschlag nur fuer Zahlungsgebuehren
|
||||
4. **Kein Lock-in** zu erzwingen, damit du Zen mit anderen Coding-Agents und OpenCode weiter mit anderen Providern nutzen kannst
|
||||
1. die besten Modelle/Provider für Coding-Agenten zu **benchmarken**.
|
||||
2. Zugriff auf die **höchste Qualität** zu haben und die Leistung nicht zu verschlechtern oder an günstigere Provider weiterzuleiten.
|
||||
3. **Preissenkungen** weiterzugeben, indem wir zum Selbstkostenpreis verkaufen; der einzige Aufschlag dient also dazu, unsere Bearbeitungsgebühren abzudecken.
|
||||
4. **keinen Lock-in** zu haben, indem du es mit jedem anderen Coding-Agenten verwenden kannst. Und dir außerdem immer zu erlauben, mit OpenCode auch jeden anderen Provider zu verwenden.
|
||||
|
||||
@@ -7,39 +7,41 @@ import config from "../../../../config.mjs"
|
||||
export const console = config.console
|
||||
export const email = `mailto:${config.email}`
|
||||
|
||||
OpenCode Zen es una lista de modelos probados y verificados proporcionada por el equipo OpenCode.
|
||||
OpenCode Zen es una lista de modelos probados y verificados proporcionada por el equipo de OpenCode.
|
||||
|
||||
:::note
|
||||
OpenCode Zen se encuentra actualmente en versión beta.
|
||||
OpenCode Zen está actualmente en beta.
|
||||
:::
|
||||
|
||||
Zen funciona como cualquier otro proveedor en OpenCode. Inicias sesion en OpenCode Zen y obtienes
|
||||
tu API key. Es **completamente opcional** y no necesitas usarlo para usar OpenCode.
|
||||
Zen funciona como cualquier otro proveedor en OpenCode. Inicias sesión en OpenCode Zen y obtienes
|
||||
tu API key. Es **completamente opcional** y no necesitas usarlo para usar
|
||||
OpenCode.
|
||||
|
||||
---
|
||||
|
||||
## Contexto
|
||||
|
||||
Hay una gran cantidad de modelos, pero solo unos pocos funcionan bien como agentes de coding.
|
||||
Ademas, la mayoria de proveedores se configura de forma diferente, asi que el rendimiento y la calidad varian mucho.
|
||||
Existe una gran cantidad de modelos, pero solo unos pocos de
|
||||
estos modelos funcionan bien como agentes de coding. Además, la mayoría de los proveedores están
|
||||
configurados de forma muy diferente; por eso obtienes un rendimiento y una calidad muy distintos.
|
||||
|
||||
:::tip
|
||||
Probamos un grupo selecto de modelos y proveedores que funcionan bien con OpenCode.
|
||||
:::
|
||||
|
||||
Si usas un modelo a traves de algo como OpenRouter, nunca puedes estar
|
||||
seguro de que recibes la mejor version del modelo que quieres.
|
||||
Así que, si estás usando un modelo a través de algo como OpenRouter, nunca puedes estar
|
||||
seguro de si estás obteniendo la mejor versión del modelo que quieres.
|
||||
|
||||
Para solucionar este problema, hicimos un par de cosas:
|
||||
Para solucionar esto, hicimos un par de cosas:
|
||||
|
||||
1. Probamos un grupo selecto de modelos y hablamos con sus equipos sobre cómo
|
||||
mejor ejecutarlos.
|
||||
2. Luego trabajamos con algunos proveedores para asegurarnos de que estuvieran siendo atendidos.
|
||||
ejecutarlos de la mejor manera.
|
||||
2. Luego trabajamos con algunos proveedores para asegurarnos de que se estuvieran sirviendo
|
||||
correctamente.
|
||||
3. Finalmente, comparamos la combinación modelo/proveedor y llegamos
|
||||
con una lista que nos sentimos bien recomendando.
|
||||
3. Por último, evaluamos la combinación de modelo/proveedor y elaboramos
|
||||
una lista que creemos que vale la pena recomendar.
|
||||
|
||||
OpenCode Zen es una puerta de enlace de IA que le brinda acceso a estos modelos.
|
||||
OpenCode Zen es un gateway de AI que te da acceso a estos modelos.
|
||||
|
||||
---
|
||||
|
||||
@@ -47,64 +49,65 @@ OpenCode Zen es una puerta de enlace de IA que le brinda acceso a estos modelos.
|
||||
|
||||
OpenCode Zen funciona como cualquier otro proveedor en OpenCode.
|
||||
|
||||
1. Inicias sesión en **<a href={console}>OpenCode Zen</a>**, agregas tu facturación
|
||||
detalles y copie su clave API.
|
||||
2. Ejecuta el comando `/connect` en TUI, selecciona OpenCode Zen y pega tu clave API.
|
||||
3. Ejecute `/models` en TUI para ver la lista de modelos que recomendamos.
|
||||
1. Inicias sesión en **<a href={console}>OpenCode Zen</a>**, agregas tus datos de facturación
|
||||
y copias tu API key.
|
||||
2. Ejecutas el comando `/connect` en la TUI, seleccionas OpenCode Zen y pegas tu API key.
|
||||
3. Ejecuta `/models` en la TUI para ver la lista de modelos que recomendamos.
|
||||
|
||||
Se le cobra por solicitud y puede agregar créditos a su cuenta.
|
||||
Se te cobra por solicitud y puedes agregar créditos a tu cuenta.
|
||||
|
||||
---
|
||||
|
||||
## Puntos finales
|
||||
## Endpoints
|
||||
|
||||
También puede acceder a nuestros modelos a través de los siguientes puntos finales API.
|
||||
También puedes acceder a nuestros modelos a través de los siguientes endpoints de API.
|
||||
|
||||
| Modelo | Model ID | Endpoint | AI SDK package |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Modelo | Model ID | Endpoint | AI SDK Package |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
El [model ID](/docs/config/#models) en tu configuracion de OpenCode
|
||||
usa el formato `opencode/<model-id>`. Por ejemplo, para GPT 5.2 Codex, debes
|
||||
usar `opencode/gpt-5.2-codex` en tu configuracion.
|
||||
El [identificador del modelo](/docs/config/#models) en tu configuración de OpenCode
|
||||
usa el formato `opencode/<model-id>`. Por ejemplo, para GPT 5.3 Codex, usarías
|
||||
`opencode/gpt-5.3-codex` en tu configuración.
|
||||
|
||||
---
|
||||
|
||||
### Modelos
|
||||
|
||||
Puede obtener la lista completa de modelos disponibles y sus metadatos en:
|
||||
Puedes obtener la lista completa de modelos disponibles y sus metadatos desde:
|
||||
|
||||
```
|
||||
https://opencode.ai/zen/v1/models
|
||||
@@ -114,150 +117,158 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
## Precios
|
||||
|
||||
Apoyamos un modelo de pago por uso. A continuación se muestran los precios **por 1 millón de tokens**.
|
||||
Admitimos un modelo de pago por uso. A continuación se muestran los precios **por 1M tokens**.
|
||||
|
||||
| Modelo | Entrada | Salida | Lectura en caché | Escritura en caché |
|
||||
| ------------------------------------ | ------- | ------ | ---------------- | ------------------ |
|
||||
| Big Pickle | Gratis | Gratis | Gratis | - |
|
||||
| MiniMax M2.5 Free | Gratis | Gratis | Gratis | - |
|
||||
| MiniMax M2.5 | $0,30 | $1,20 | $0,06 | - |
|
||||
| MiniMax M2.1 | $0,30 | $1,20 | $0,10 | - |
|
||||
| GLM 5 | $1,00 | $3,20 | $0,20 | - |
|
||||
| GLM 4.7 | $0,60 | $2.20 | $0,10 | - |
|
||||
| GLM 4.6 | $0,60 | $2.20 | $0,10 | - |
|
||||
| Kimi K2.5 | $0,60 | $3.00 | $0,08 | - |
|
||||
| Kimi K2 Thinking | $0,40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0,40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0,45 | $1,50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200.000 tokens) | $5.00 | $25.00 | $0,50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37,50 | $1.00 | $12,50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0,50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1,50 | $18,75 |
|
||||
| Claude Sonnet 4.6 (≤ 200.000 tokens) | $3.00 | $15.00 | $0,30 | $3,75 |
|
||||
| Claude Sonnet 4.6 (> 200.000 tokens) | $6.00 | $22,50 | $0,60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200.000 tokens) | $3.00 | $15.00 | $0,30 | $3,75 |
|
||||
| Claude Sonnet 4.5 (> 200.000 tokens) | $6.00 | $22,50 | $0,60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0,30 | $3,75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22,50 | $0,60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0,10 | $1,25 |
|
||||
| Claude Haiku 3.5 | $0,80 | $4.00 | $0,08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0,20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0,40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0,20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0,40 | - |
|
||||
| Gemini 3 Flash | $0,50 | $3.00 | $0,05 | - |
|
||||
| GPT 5.4 | $2,50 | $15,00 | $0,25 | - |
|
||||
| GPT 5.3 Codex | $1,75 | $14.00 | $0,175 | - |
|
||||
| GPT 5.2 | $1,75 | $14.00 | $0,175 | - |
|
||||
| GPT 5.2 Codex | $1,75 | $14.00 | $0,175 | - |
|
||||
| GPT 5.1 | $1.07 | $8,50 | $0,107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8,50 | $0,107 | - |
|
||||
| GPT 5.1 Codex Max | $1,25 | $10.00 | $0,125 | - |
|
||||
| GPT 5.1 Codex Mini | $0,25 | $2.00 | $0,025 | - |
|
||||
| GPT 5 | $1.07 | $8,50 | $0,107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8,50 | $0,107 | - |
|
||||
| GPT 5 Nano | Gratis | Gratis | Gratis | - |
|
||||
| Modelo | Entrada | Salida | Lectura en caché | Escritura en caché |
|
||||
| --------------------------------- | ------- | ------- | ---------------- | ------------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
Es posible que notes _Claude Haiku 3.5_ en tu historial de uso. Este es un [modelo de bajo costo](/docs/config/#models) que se utiliza para generar los títulos de tus sesiones.
|
||||
Puede que notes _Claude Haiku 3.5_ en tu historial de uso. Este es un [modelo de bajo costo](/docs/config/#models) que se usa para generar los títulos de tus sesiones.
|
||||
|
||||
:::note
|
||||
Las tarifas de las tarjetas de crédito se trasladan al costo (4,4% + 0,30 dólares por transacción); No cobramos nada más allá de eso.
|
||||
Las comisiones de tarjeta de crédito se trasladan al costo (4.4% + $0.30 por transacción); no cobramos nada más allá de eso.
|
||||
:::
|
||||
|
||||
Los modelos gratuitos:
|
||||
|
||||
- MiniMax M2.5 Free está disponible en OpenCode por tiempo limitado. El equipo está aprovechando este tiempo para recopilar comentarios y mejorar el modelo.
|
||||
- Big Pickle es un modelo sigiloso gratuito en OpenCode por tiempo limitado. El equipo está aprovechando este tiempo para recopilar comentarios y mejorar el modelo.
|
||||
- MiniMax M2.5 Free está disponible en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo.
|
||||
- MiMo V2 Pro Free está disponible en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo.
|
||||
- MiMo V2 Omni Free está disponible en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo.
|
||||
- Qwen3.6 Plus Free está disponible en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo.
|
||||
- Nemotron 3 Super Free está disponible en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo.
|
||||
- Big Pickle es un modelo stealth que es gratuito en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo.
|
||||
|
||||
<a href={email}>Contáctenos</a> si tiene alguna pregunta.
|
||||
<a href={email}>Contáctanos</a> si tienes alguna pregunta.
|
||||
|
||||
---
|
||||
|
||||
### Recarga automática
|
||||
|
||||
Si su saldo es inferior a $5, Zen recargará automáticamente $20.
|
||||
Si tu saldo baja de $5, Zen recargará automáticamente $20.
|
||||
|
||||
Puedes cambiar el monto de la recarga automática. También puedes desactivar la recarga automática por completo.
|
||||
Puedes cambiar el monto de la recarga automática. También puedes desactivar por completo la recarga automática.
|
||||
|
||||
---
|
||||
|
||||
### Límites mensuales
|
||||
|
||||
También puede establecer un límite de uso mensual para todo el espacio de trabajo y para cada
|
||||
También puedes establecer un límite de uso mensual para todo el workspace y para cada
|
||||
miembro de tu equipo.
|
||||
|
||||
Por ejemplo, digamos que establece un límite de uso mensual de $20, Zen no usará
|
||||
Por ejemplo, supongamos que estableces un límite de uso mensual de $20. Zen no usará
|
||||
más de $20 en un mes. Pero si tienes habilitada la recarga automática, Zen podría terminar
|
||||
cobrarle más de $20 si su saldo es inferior a $5.
|
||||
cobrándote más de $20 si tu saldo baja de $5.
|
||||
|
||||
---
|
||||
|
||||
### Modelos obsoletos
|
||||
|
||||
| Modelo | Fecha de retiro |
|
||||
| ---------------- | ------------------- |
|
||||
| Qwen3 Coder 480B | 6 de feb. de 2026 |
|
||||
| Kimi K2 Thinking | 6 de marzo de 2026 |
|
||||
| Kimi K2 | 6 de marzo de 2026 |
|
||||
| MiniMax M2.1 | 15 de marzo de 2026 |
|
||||
| GLM 4.7 | 15 de marzo de 2026 |
|
||||
| GLM 4.6 | 15 de marzo de 2026 |
|
||||
| Modelo | Fecha de retirada |
|
||||
| ---------------- | ----------------- |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## Privacidad
|
||||
|
||||
Todos nuestros modelos están alojados en los EE. UU. Nuestros proveedores siguen una política de retención cero y no utilizan sus datos para la capacitación de modelos, con las siguientes excepciones:
|
||||
Todos nuestros modelos están alojados en US. Nuestros proveedores siguen una política de zero-retention y no usan tus datos para el entrenamiento de modelos, con las siguientes excepciones:
|
||||
|
||||
- Big Pickle: Durante su periodo gratuito, los datos recopilados podrán utilizarse para mejorar el modelo.
|
||||
- MiniMax M2.5 Free: Durante su período gratuito, los datos recopilados podrán utilizarse para mejorar el modelo.
|
||||
- API de OpenAI: las solicitudes se conservan durante 30 días de acuerdo con las [Políticas de datos de OpenAI](https://platform.openai.com/docs/guides/your-data).
|
||||
- API de Anthropic: las solicitudes se conservan durante 30 días de acuerdo con las [Políticas de datos de Anthropic](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
- Big Pickle: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo.
|
||||
- MiniMax M2.5 Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo.
|
||||
- MiMo V2 Pro Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo.
|
||||
- MiMo V2 Omni Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo.
|
||||
- Qwen3.6 Plus Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo.
|
||||
- Nemotron 3 Super Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo.
|
||||
- OpenAI APIs: Las solicitudes se conservan durante 30 días de acuerdo con [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: Las solicitudes se conservan durante 30 días de acuerdo con [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
|
||||
---
|
||||
|
||||
## Para equipos
|
||||
|
||||
Zen también funciona muy bien para equipos. Puedes invitar a compañeros de equipo, asignar roles, seleccionar
|
||||
los modelos que utiliza su equipo y más.
|
||||
los modelos que usa tu equipo y más.
|
||||
|
||||
:::note
|
||||
Actualmente, los espacios de trabajo son gratuitos para los equipos como parte de la versión beta.
|
||||
Los workspaces son actualmente gratuitos para equipos como parte de la beta.
|
||||
:::
|
||||
|
||||
Actualmente, administrar su espacio de trabajo es gratuito para equipos como parte de la versión beta. estaremos
|
||||
compartiremos más detalles sobre los precios pronto.
|
||||
Gestionar tu workspace es actualmente gratuito para equipos como parte de la beta. Compartiremos
|
||||
más detalles sobre los precios pronto.
|
||||
|
||||
---
|
||||
|
||||
### Roles
|
||||
|
||||
Puedes invitar a compañeros de equipo a tu espacio de trabajo y asignar roles:
|
||||
Puedes invitar a compañeros de equipo a tu workspace y asignar roles:
|
||||
|
||||
- **Administrador**: administra modelos, miembros, claves API y facturación
|
||||
- **Miembro**: administra solo sus propias claves API
|
||||
- **Admin**: Gestiona modelos, miembros, API keys y facturación
|
||||
- **Member**: Gestiona solo sus propias API keys
|
||||
|
||||
Los administradores también pueden establecer límites de gastos mensuales para cada miembro para mantener los costos bajo control.
|
||||
Los admins también pueden establecer límites mensuales de gasto para cada miembro para mantener los costos bajo control.
|
||||
|
||||
---
|
||||
|
||||
### Acceso al modelo
|
||||
### Acceso a modelos
|
||||
|
||||
Los administradores pueden habilitar o deshabilitar modelos específicos para el espacio de trabajo. Las solicitudes realizadas a un modelo deshabilitado devolverán un error.
|
||||
Los admins pueden habilitar o deshabilitar modelos específicos para el workspace. Las solicitudes realizadas a un modelo deshabilitado devolverán un error.
|
||||
|
||||
Esto es útil para los casos en los que desea desactivar el uso de un modelo que
|
||||
Esto resulta útil en los casos en los que quieres deshabilitar el uso de un modelo que
|
||||
recopila datos.
|
||||
|
||||
---
|
||||
|
||||
### Trae tu propia clave API
|
||||
### Bring your own key
|
||||
|
||||
Puedes usar tus propias API keys de OpenAI o Anthropic mientras accedes a otros modelos en Zen.
|
||||
Puedes usar tus propias API keys de OpenAI o Anthropic mientras sigues accediendo a otros modelos en Zen.
|
||||
|
||||
Cuando utiliza sus propias claves, los tokens los factura directamente el proveedor, no Zen.
|
||||
Cuando usas tus propias keys, los tokens te los factura directamente el proveedor, no Zen.
|
||||
|
||||
Por ejemplo, es posible que su organización ya tenga una clave para OpenAI o Anthropic.
|
||||
y quieres usar ese en lugar del que proporciona Zen.
|
||||
Por ejemplo, tu organización podría ya tener una key para OpenAI o Anthropic
|
||||
y quieres usarla en lugar de la que proporciona Zen.
|
||||
|
||||
---
|
||||
|
||||
@@ -265,7 +276,7 @@ y quieres usar ese en lugar del que proporciona Zen.
|
||||
|
||||
Creamos OpenCode Zen para:
|
||||
|
||||
1. **Evaluar** los mejores modelos/proveedores de agentes de codificación.
|
||||
2. Tener acceso a las opciones de **más alta calidad** y no degradar el rendimiento ni recurrir a proveedores más baratos.
|
||||
3. Transmitir cualquier **bajada de precio** vendiendo al costo; por lo que el único margen de beneficio es para cubrir nuestras tarifas de procesamiento.
|
||||
4. **No tener ningún bloqueo** al permitirle usarlo con cualquier otro agente de codificación. Y siempre le permitirá utilizar cualquier otro proveedor con OpenCode también.
|
||||
1. **Benchmark** de los mejores modelos/proveedores para agentes de coding.
|
||||
2. Tener acceso a las opciones de **más alta calidad** y no degradar el rendimiento ni enrutar a proveedores más baratos.
|
||||
3. Trasladar cualquier **bajada de precio** vendiendo al costo; así que el único margen es para cubrir nuestras comisiones de procesamiento.
|
||||
4. Que no haya **lock-in** al permitirte usarlo con cualquier otro agente de coding. Y permitirte siempre usar también cualquier otro proveedor con OpenCode.
|
||||
|
||||
@@ -13,27 +13,27 @@ OpenCode Zen est une liste de modèles testés et vérifiés fournie par l'équi
|
||||
OpenCode Zen est actuellement en version bêta.
|
||||
:::
|
||||
|
||||
Zen fonctionne comme n'importe quel autre fournisseur dans OpenCode. Vous vous connectez à OpenCode Zen et obtenez votre clé API. C'est **complètement facultatif** et vous n'avez pas besoin de l'utiliser pour utiliser OpenCode.
|
||||
Zen fonctionne comme n'importe quel autre fournisseur dans OpenCode. Vous vous connectez à OpenCode Zen et obtenez votre clé API. C'est **entièrement facultatif** et vous n'avez pas besoin de l'utiliser pour utiliser OpenCode.
|
||||
|
||||
---
|
||||
|
||||
## Arrière-plan
|
||||
|
||||
Il existe un grand nombre de modèles, mais seulement quelques-uns d'entre eux fonctionnent bien comme agents de codage. De plus, la plupart des fournisseurs sont configurés très différemment; vous obtenez donc des performances et une qualité très différentes.
|
||||
Il existe un grand nombre de modèles, mais seuls quelques-uns fonctionnent bien comme agents de codage. De plus, la plupart des fournisseurs sont configurés très différemment ; vous obtenez donc des performances et une qualité très variables.
|
||||
|
||||
:::tip
|
||||
Nous avons testé un groupe sélectionné de modèles et de fournisseurs qui fonctionnent bien avec OpenCode.
|
||||
:::
|
||||
|
||||
Donc, si vous utilisez un modèle via quelque chose comme OpenRouter, vous ne pourrez jamais être sûr que vous obtenez la meilleure version du modèle que vous souhaitez.
|
||||
Donc, si vous utilisez un modèle via quelque chose comme OpenRouter, vous ne pouvez jamais être sûr d'obtenir la meilleure version du modèle que vous voulez.
|
||||
|
||||
Pour résoudre ce problème, nous avons effectué plusieurs opérations :
|
||||
Pour corriger cela, nous avons fait plusieurs choses :
|
||||
|
||||
1. Nous avons testé un groupe sélectionné de modèles et discuté avec leurs équipes de la manière de mieux les exécuter.
|
||||
2. Nous avons ensuite travaillé avec quelques prestataires pour nous assurer qu'ils étaient servis correctement.
|
||||
3. Enfin, nous avons comparé la combinaison modèle/fournisseur et sommes arrivés avec une liste que nous nous ferons un plaisir de recommander.
|
||||
1. Nous avons testé un groupe sélectionné de modèles et discuté avec leurs équipes de la meilleure façon de les faire fonctionner.
|
||||
2. Nous avons ensuite travaillé avec quelques fournisseurs pour nous assurer qu'ils étaient correctement servis.
|
||||
3. Enfin, nous avons comparé la combinaison modèle/fournisseur et établi une liste que nous recommandons en toute confiance.
|
||||
|
||||
OpenCode Zen est une passerelle IA qui vous donne accès à ces modèles.
|
||||
OpenCode Zen est une passerelle AI qui vous donne accès à ces modèles.
|
||||
|
||||
---
|
||||
|
||||
@@ -45,7 +45,7 @@ OpenCode Zen fonctionne comme n'importe quel autre fournisseur dans OpenCode.
|
||||
2. Vous exécutez la commande `/connect` dans le TUI, sélectionnez OpenCode Zen et collez votre clé API.
|
||||
3. Exécutez `/models` dans le TUI pour voir la liste des modèles que nous recommandons.
|
||||
|
||||
Vous êtes facturé par demande et vous pouvez ajouter des crédits à votre compte.
|
||||
La facturation se fait à la requête et vous pouvez ajouter des crédits à votre compte.
|
||||
|
||||
---
|
||||
|
||||
@@ -53,43 +53,44 @@ Vous êtes facturé par demande et vous pouvez ajouter des crédits à votre com
|
||||
|
||||
Vous pouvez également accéder à nos modèles via les points de terminaison API suivants.
|
||||
|
||||
| Modèle | ID du modèle | Point de terminaison | Package SDK IA |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Modèle | ID du modèle | Point de terminaison | Package AI SDK |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
Le [id du modèle](/docs/config/#models) dans votre configuration OpenCode utilise le format `opencode/<model-id>`. Par exemple, pour GPT 5.2 Codex, vous devez utilisez `opencode/gpt-5.2-codex` dans votre configuration.
|
||||
Le [model id](/docs/config/#models) dans votre configuration OpenCode utilise le format `opencode/<model-id>`. Par exemple, pour GPT 5.3 Codex, vous utiliseriez `opencode/gpt-5.3-codex` dans votre configuration.
|
||||
|
||||
---
|
||||
|
||||
@@ -103,62 +104,65 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
---
|
||||
|
||||
## Tarifs
|
||||
## Tarification
|
||||
|
||||
Nous soutenons un modèle de paiement à l'utilisation. Vous trouverez ci-dessous les prix **par 1 million de jetons**.
|
||||
Nous prenons en charge un modèle de paiement à l'utilisation. Vous trouverez ci-dessous les prix **par 1M tokens**.
|
||||
|
||||
| Modèle | Entrée | Sortie | Lecture en cache | Écriture en cache |
|
||||
| --------------------------------- | ------- | ------- | ---------------- | ----------------- |
|
||||
| Big Pickle | Gratuit | Gratuit | Gratuit | - |
|
||||
| MiniMax M2.5 Free | Gratuit | Gratuit | Gratuit | - |
|
||||
| MiniMax M2.5 | 0,30 $ | 1,20 $ | 0,06 $ | - |
|
||||
| MiniMax M2.1 | 0,30 $ | 1,20 $ | 0,10 $ | - |
|
||||
| GLM 5 | 1,00 $ | 3,20 $ | 0,20 $ | - |
|
||||
| GLM 4.7 | 0,60 $ | 2,20 $ | 0,10 $ | - |
|
||||
| GLM 4.6 | 0,60 $ | 2,20 $ | 0,10 $ | - |
|
||||
| Kimi K2.5 | 0,60 $ | 3,00 $ | 0,08 $ | - |
|
||||
| Kimi K2 Thinking | 0,40 $ | 2,50 $ | - | - |
|
||||
| Kimi K2 | 0,40 $ | 2,50 $ | - | - |
|
||||
| Qwen3 Coder 480B | 0,45 $ | 1,50 $ | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K jetons) | 5,00 $ | 25,00 $ | 0,50 $ | 6,25 $ |
|
||||
| Claude Opus 4.6 (> 200K jetons) | 10,00 $ | 37,50 $ | 1,00 $ | 12,50 $ |
|
||||
| Claude Opus 4.5 | 5,00 $ | 25,00 $ | 0,50 $ | 6,25 $ |
|
||||
| Claude Opus 4.1 | 15,00 $ | 75,00 $ | 1,50 $ | 18,75 $ |
|
||||
| Claude Sonnet 4.6 (≤ 200K jetons) | 3,00 $ | 15,00 $ | 0,30 $ | 3,75 $ |
|
||||
| Claude Sonnet 4.6 (> 200K jetons) | 6,00 $ | 22,50 $ | 0,60 $ | 7,50 $ |
|
||||
| Claude Sonnet 4.5 (≤ 200K jetons) | 3,00 $ | 15,00 $ | 0,30 $ | 3,75 $ |
|
||||
| Claude Sonnet 4.5 (> 200K jetons) | 6,00 $ | 22,50 $ | 0,60 $ | 7,50 $ |
|
||||
| Claude Sonnet 4 (≤ 200K jetons) | 3,00 $ | 15,00 $ | 0,30 $ | 3,75 $ |
|
||||
| Claude Sonnet 4 (> 200K jetons) | 6,00 $ | 22,50 $ | 0,60 $ | 7,50 $ |
|
||||
| Claude Haiku 4.5 | 1,00 $ | 5,00 $ | 0,10 $ | 1,25 $ |
|
||||
| Claude Haiku 3.5 | 0,80 $ | 4,00 $ | 0,08 $ | 1,00 $ |
|
||||
| Gemini 3.1 Pro (≤ 200K jetons) | 2,00 $ | 12,00 $ | 0,20 $ | - |
|
||||
| Gemini 3.1 Pro (> 200K jetons) | 4,00 $ | 18,00 $ | 0,40 $ | - |
|
||||
| Gemini 3 Pro (≤ 200K jetons) | 2,00 $ | 12,00 $ | 0,20 $ | - |
|
||||
| Gemini 3 Pro (> 200K jetons) | 4,00 $ | 18,00 $ | 0,40 $ | - |
|
||||
| Gemini 3 Flash | 0,50 $ | 3,00 $ | 0,05 $ | - |
|
||||
| GPT 5.4 | 2,50 $ | 15,00 $ | 0,25 $ | - |
|
||||
| GPT 5.3 Codex | 1,75 $ | 14,00 $ | 0,175 $ | - |
|
||||
| GPT 5.2 | 1,75 $ | 14,00 $ | 0,175 $ | - |
|
||||
| GPT 5.2 Codex | 1,75 $ | 14,00 $ | 0,175 $ | - |
|
||||
| GPT 5.1 | 1,07 $ | 8,50 $ | 0,107 $ | - |
|
||||
| GPT 5.1 Codex | 1,07 $ | 8,50 $ | 0,107 $ | - |
|
||||
| GPT 5.1 Codex Max | 1,25 $ | 10,00 $ | 0,125 $ | - |
|
||||
| GPT 5.1 Codex Mini | 0,25 $ | 2,00 $ | 0,025 $ | - |
|
||||
| GPT 5 | 1,07 $ | 8,50 $ | 0,107 $ | - |
|
||||
| GPT 5 Codex | 1,07 $ | 8,50 $ | 0,107 $ | - |
|
||||
| GPT 5 Nano | Gratuit | Gratuit | Gratuit | - |
|
||||
| Modèle | Input | Output | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------- | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
Vous remarquerez peut-être _Claude Haiku 3.5_ dans votre historique d'utilisation. Il s'agit d'un [modèle low cost](/docs/config/#models) qui est utilisé pour générer les titres de vos sessions.
|
||||
Vous remarquerez peut-être _Claude Haiku 3.5_ dans votre historique d'utilisation. Il s'agit d'un [modèle à faible coût](/docs/config/#models) utilisé pour générer les titres de vos sessions.
|
||||
|
||||
:::note
|
||||
Les frais de carte de crédit sont répercutés au prix coûtant (4,4 % + 0,30 $ par transaction) ; nous ne facturons rien au-delà.
|
||||
Les frais de carte de crédit sont répercutés au prix coûtant (4.4% + $0.30 par transaction) ; nous ne facturons rien au-delà.
|
||||
:::
|
||||
|
||||
Les modèles gratuits :
|
||||
|
||||
- MiniMax M2.5 Free est disponible sur OpenCode pour une durée limitée. L’équipe profite de ce temps pour recueillir des commentaires et améliorer le modèle.
|
||||
- Big Pickle est un modèle furtif gratuit sur OpenCode pour une durée limitée. L’équipe profite de ce temps pour recueillir des commentaires et améliorer le modèle.
|
||||
- MiniMax M2.5 Free est disponible sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle.
|
||||
- MiMo V2 Pro Free est disponible sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle.
|
||||
- MiMo V2 Omni Free est disponible sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle.
|
||||
- Qwen3.6 Plus Free est disponible sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle.
|
||||
- Nemotron 3 Super Free est disponible sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle.
|
||||
- Big Pickle est un modèle stealth gratuit sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle.
|
||||
|
||||
<a href={email}>Contactez-nous</a> si vous avez des questions.
|
||||
|
||||
@@ -166,7 +170,7 @@ Les modèles gratuits :
|
||||
|
||||
### Rechargement automatique
|
||||
|
||||
Si votre solde descend en dessous de 5 $, Zen rechargera automatiquement 20 $.
|
||||
Si votre solde passe sous $5, Zen rechargera automatiquement $20.
|
||||
|
||||
Vous pouvez modifier le montant du rechargement automatique. Vous pouvez également désactiver complètement le rechargement automatique.
|
||||
|
||||
@@ -176,7 +180,7 @@ Vous pouvez modifier le montant du rechargement automatique. Vous pouvez égalem
|
||||
|
||||
Vous pouvez également définir une limite d'utilisation mensuelle pour l'ensemble de l'espace de travail et pour chaque membre de votre équipe.
|
||||
|
||||
Par exemple, disons que vous définissez une limite d'utilisation mensuelle à 20 $, Zen n'utilisera pas plus de 20 $ par mois. Mais si le rechargement automatique est activé, Zen pourrait finir par vous facturant plus de 20 $ si votre solde descend en dessous de 5 $.
|
||||
Par exemple, si vous définissez une limite d'utilisation mensuelle à $20, Zen n'utilisera pas plus de $20 sur un mois. Mais si le rechargement automatique est activé, Zen peut finir par vous facturer plus de $20 si votre solde passe sous $5.
|
||||
|
||||
---
|
||||
|
||||
@@ -184,35 +188,40 @@ Par exemple, disons que vous définissez une limite d'utilisation mensuelle à 2
|
||||
|
||||
| Modèle | Date de dépréciation |
|
||||
| ---------------- | -------------------- |
|
||||
| Qwen3 Coder 480B | 6 février 2026 |
|
||||
| Kimi K2 Thinking | 6 mars 2026 |
|
||||
| Kimi K2 | 6 mars 2026 |
|
||||
| MiniMax M2.1 | 15 mars 2026 |
|
||||
| GLM 4.7 | 15 mars 2026 |
|
||||
| GLM 4.6 | 15 mars 2026 |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## Confidentialité
|
||||
|
||||
Tous nos modèles sont hébergés aux États-Unis. Nos fournisseurs suivent une politique de rétention zéro et n'utilisent pas vos données pour la formation de modèles, avec les exceptions suivantes :
|
||||
Tous nos modèles sont hébergés aux US. Nos fournisseurs suivent une politique de rétention zéro et n'utilisent pas vos données pour l'entraînement des modèles, avec les exceptions suivantes :
|
||||
|
||||
- Big Pickle : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle.
|
||||
- MiniMax M2.5 Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle.
|
||||
- API OpenAI : Les demandes sont conservées pendant 30 jours conformément aux politiques de données de [OpenAI](https://platform.openai.com/docs/guides/your-data).
|
||||
- API Anthropic : Les demandes sont conservées pendant 30 jours conformément aux [Politiques de données d'Anthropic](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
- MiMo V2 Pro Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle.
|
||||
- MiMo V2 Omni Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle.
|
||||
- Qwen3.6 Plus Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle.
|
||||
- Nemotron 3 Super Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle.
|
||||
- OpenAI APIs : Les requêtes sont conservées pendant 30 jours conformément à [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs : Les requêtes sont conservées pendant 30 jours conformément à [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
|
||||
---
|
||||
|
||||
## Pour les équipes
|
||||
|
||||
Zen fonctionne également très bien pour les équipes. Vous pouvez inviter des coéquipiers, attribuer des rôles, organiser les modèles utilisés par votre équipe, et bien plus encore.
|
||||
Zen fonctionne aussi très bien pour les équipes. Vous pouvez inviter des coéquipiers, attribuer des rôles, sélectionner les modèles que votre équipe utilise, et plus encore.
|
||||
|
||||
:::note
|
||||
Les espaces de travail sont actuellement gratuits pour les équipes dans le cadre de la version bêta.
|
||||
:::
|
||||
|
||||
La gestion de votre espace de travail est actuellement gratuite pour les équipes dans le cadre de la version bêta. Nous partagerons bientôt plus de détails sur les prix.
|
||||
La gestion de votre espace de travail est actuellement gratuite pour les équipes dans le cadre de la version bêta. Nous partagerons bientôt plus de détails sur la tarification.
|
||||
|
||||
---
|
||||
|
||||
@@ -220,28 +229,28 @@ La gestion de votre espace de travail est actuellement gratuite pour les équipe
|
||||
|
||||
Vous pouvez inviter des coéquipiers dans votre espace de travail et attribuer des rôles :
|
||||
|
||||
- **Administrateur** : gérez les modèles, les membres, les clés API et la facturation.
|
||||
- **Membre** : gérer uniquement ses propres clés API
|
||||
- **Admin** : Gérer les modèles, les membres, les clés API et la facturation
|
||||
- **Member** : Gérer uniquement ses propres clés API
|
||||
|
||||
Les administrateurs peuvent également définir des limites de dépenses mensuelles pour chaque membre afin de garder les coûts sous contrôle.
|
||||
|
||||
---
|
||||
|
||||
### Accès au modèle
|
||||
### Accès aux modèles
|
||||
|
||||
Les administrateurs peuvent activer ou désactiver des modèles spécifiques pour l'espace de travail. Les requêtes adressées à un modèle désactivé renverront une erreur.
|
||||
Les administrateurs peuvent activer ou désactiver des modèles spécifiques pour l'espace de travail. Les requêtes effectuées vers un modèle désactivé renverront une erreur.
|
||||
|
||||
Ceci est utile dans les cas où vous souhaitez désactiver l'utilisation d'un modèle qui collecte des données.
|
||||
Cela est utile si vous souhaitez désactiver l'utilisation d'un modèle qui collecte des données.
|
||||
|
||||
---
|
||||
|
||||
### Apportez votre propre clé
|
||||
|
||||
Vous pouvez utiliser vos propres clés OpenAI ou Anthropic API tout en accédant à d'autres modèles dans Zen.
|
||||
Vous pouvez utiliser vos propres clés API OpenAI ou Anthropic tout en accédant à d'autres modèles dans Zen.
|
||||
|
||||
Lorsque vous utilisez vos propres clés, les tokens sont facturés directement par le fournisseur et non par Zen.
|
||||
Lorsque vous utilisez vos propres clés, les tokens sont facturés directement par le fournisseur, pas par Zen.
|
||||
|
||||
Par exemple, votre organisation dispose peut-être déjà d'une clé pour OpenAI ou Anthropic et vous souhaitez l'utiliser à la place de celui fourni par Zen.
|
||||
Par exemple, votre organisation a peut-être déjà une clé pour OpenAI ou Anthropic et vous souhaitez l'utiliser à la place de celle fournie par Zen.
|
||||
|
||||
---
|
||||
|
||||
@@ -249,7 +258,7 @@ Par exemple, votre organisation dispose peut-être déjà d'une clé pour OpenAI
|
||||
|
||||
Nous avons créé OpenCode Zen pour :
|
||||
|
||||
1. **Benchmark** les meilleurs modèles/fournisseurs d'agents de codage.
|
||||
2. Ayez accès aux options de **la plus haute qualité** et ne dégradez pas les performances ni ne vous dirigez vers des fournisseurs moins chers.
|
||||
3. Répercutez toute **baisse de prix** en vendant au prix coûtant ; la seule majoration est donc pour couvrir nos frais de traitement.
|
||||
4. N'ayez **aucun verrouillage** en vous permettant de l'utiliser avec n'importe quel autre agent de codage. Et laissez-vous toujours utiliser n'importe quel autre fournisseur avec OpenCode également.
|
||||
1. **Benchmark** les meilleurs modèles/fournisseurs pour les agents de codage.
|
||||
2. Avoir accès aux options de **la plus haute qualité** sans dégrader les performances ni basculer vers des fournisseurs moins chers.
|
||||
3. Répercuter toute **baisse de prix** en vendant au prix coûtant ; la seule marge sert à couvrir nos frais de traitement.
|
||||
4. N'avoir **aucun lock-in** en vous permettant de l'utiliser avec n'importe quel autre agent de codage. Et toujours vous permettre d'utiliser n'importe quel autre fournisseur avec OpenCode également.
|
||||
|
||||
@@ -7,31 +7,39 @@ import config from "../../../../config.mjs"
|
||||
export const console = config.console
|
||||
export const email = `mailto:${config.email}`
|
||||
|
||||
OpenCode Zen è una lista di modelli testati e verificati dal team di OpenCode.
|
||||
OpenCode Zen è un elenco di modelli testati e verificati forniti dal team di OpenCode.
|
||||
|
||||
:::note
|
||||
OpenCode Zen è attualmente in beta.
|
||||
:::
|
||||
|
||||
Zen funziona come qualunque altro provider in OpenCode. Accedi a OpenCode Zen e ottieni la tua chiave API. È **completamente opzionale** e non devi usarlo per usare OpenCode.
|
||||
Zen funziona come qualsiasi altro provider in OpenCode. Accedi a OpenCode Zen e ottieni
|
||||
la tua chiave API. È **completamente opzionale** e non ti serve usarlo per usare
|
||||
OpenCode.
|
||||
|
||||
---
|
||||
|
||||
## Contesto
|
||||
|
||||
Ci sono moltissimi modelli, ma solo pochi funzionano bene come agenti di coding. Inoltre, la maggior parte dei provider è configurata in modo molto diverso, quindi prestazioni e qualità possono variare parecchio.
|
||||
Esiste un gran numero di modelli, ma solo pochi di
|
||||
questi modelli funzionano bene come agenti di coding. Inoltre, la maggior parte dei provider è
|
||||
configurata in modo molto diverso; quindi ottieni prestazioni e qualità molto diverse.
|
||||
|
||||
:::tip
|
||||
Abbiamo testato un gruppo selezionato di modelli e provider che funzionano bene con OpenCode.
|
||||
:::
|
||||
|
||||
Quindi, se usi un modello tramite qualcosa come OpenRouter, non puoi mai essere sicuro di ottenere la migliore versione del modello che vuoi.
|
||||
Quindi, se usi un modello tramite qualcosa come OpenRouter, non puoi mai essere
|
||||
sicuro di ottenere la migliore versione del modello che vuoi.
|
||||
|
||||
Per risolvere, abbiamo fatto alcune cose:
|
||||
Per risolvere questo problema, abbiamo fatto un paio di cose:
|
||||
|
||||
1. Abbiamo testato un gruppo selezionato di modelli e parlato coi loro team su come eseguirli al meglio.
|
||||
2. Poi abbiamo lavorato con alcuni provider per assicurarci che venissero serviti correttamente.
|
||||
3. Infine, abbiamo fatto benchmark delle combinazioni modello/provider e creato una lista che ci sentiamo di raccomandare.
|
||||
1. Abbiamo testato un gruppo selezionato di modelli e parlato con i loro team di come
|
||||
farli funzionare al meglio.
|
||||
2. Poi abbiamo collaborato con alcuni provider per assicurarci che fossero serviti
|
||||
correttamente.
|
||||
3. Infine, abbiamo fatto benchmark della combinazione modello/provider e creato
|
||||
un elenco che ci sentiamo di raccomandare.
|
||||
|
||||
OpenCode Zen è un gateway AI che ti dà accesso a questi modelli.
|
||||
|
||||
@@ -39,13 +47,14 @@ OpenCode Zen è un gateway AI che ti dà accesso a questi modelli.
|
||||
|
||||
## Come funziona
|
||||
|
||||
OpenCode Zen funziona come qualunque altro provider in OpenCode.
|
||||
OpenCode Zen funziona come qualsiasi altro provider in OpenCode.
|
||||
|
||||
1. Accedi a **<a href={console}>OpenCode Zen</a>**, aggiungi i dettagli di fatturazione e copia la chiave API.
|
||||
2. Esegui il comando `/connect` nella TUI, seleziona OpenCode Zen e incolla la chiave API.
|
||||
3. Esegui `/models` nella TUI per vedere la lista dei modelli che consigliamo.
|
||||
1. Accedi a **<a href={console}>OpenCode Zen</a>**, aggiungi i dettagli di fatturazione,
|
||||
e copia la tua chiave API.
|
||||
2. Esegui il comando `/connect` nella TUI, seleziona OpenCode Zen, e incolla la tua chiave API.
|
||||
3. Esegui `/models` nella TUI per vedere l'elenco dei modelli che raccomandiamo.
|
||||
|
||||
Paghi per richiesta e puoi aggiungere credito al tuo account.
|
||||
Ti viene addebitato ogni richiesta e puoi aggiungere credito al tuo account.
|
||||
|
||||
---
|
||||
|
||||
@@ -53,7 +62,7 @@ Paghi per richiesta e puoi aggiungere credito al tuo account.
|
||||
|
||||
Puoi anche accedere ai nostri modelli tramite i seguenti endpoint API.
|
||||
|
||||
| Modello | ID modello | Endpoint | Pacchetto AI SDK |
|
||||
| Modello | Model ID | Endpoint | Pacchetto AI SDK |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
@@ -85,16 +94,20 @@ Puoi anche accedere ai nostri modelli tramite i seguenti endpoint API.
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Flash Free | mimo-v2-flash-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
Il [model id](/docs/config/#models) nella config di OpenCode usa il formato `opencode/<model-id>`. Per esempio, per GPT 5.3 Codex, useresti `opencode/gpt-5.3-codex` nella config.
|
||||
Il [model id](/docs/config/#models) nella config di OpenCode
|
||||
usa il formato `opencode/<model-id>`. Per esempio, per GPT 5.3 Codex, useresti
|
||||
`opencode/gpt-5.3-codex` nella tua config.
|
||||
|
||||
---
|
||||
|
||||
### Modelli
|
||||
|
||||
Puoi recuperare la lista completa dei modelli disponibili e i relativi metadati da:
|
||||
Puoi recuperare l'elenco completo dei modelli disponibili e i relativi metadati da:
|
||||
|
||||
```
|
||||
https://opencode.ai/zen/v1/models
|
||||
@@ -106,57 +119,61 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
Supportiamo un modello pay-as-you-go. Qui sotto trovi i prezzi **per 1M token**.
|
||||
|
||||
| Modello | Input | Output | Lettura in cache | Scrittura in cache |
|
||||
| --------------------------------- | ------ | ------- | ---------------- | ------------------ |
|
||||
| Big Pickle | Gratis | Gratis | Gratis | - |
|
||||
| MiMo V2 Flash Free | Gratis | Gratis | Gratis | - |
|
||||
| Nemotron 3 Super Free | Gratis | Gratis | Gratis | - |
|
||||
| MiniMax M2.5 Free | Gratis | Gratis | Gratis | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Gratis | Gratis | Gratis | - |
|
||||
| Modello | Input | Output | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------- | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
Potresti notare _Claude Haiku 3.5_ nella cronologia d'uso. È un [modello a basso costo](/docs/config/#models) usato per generare i titoli delle sessioni.
|
||||
Potresti notare _Claude Haiku 3.5_ nella cronologia di utilizzo. È un [modello a basso costo](/docs/config/#models) usato per generare i titoli delle tue sessioni.
|
||||
|
||||
:::note
|
||||
Le commissioni della carta di credito vengono ribaltate al costo (4.4% + $0.30 per transazione); non addebitiamo nulla oltre a questo.
|
||||
Le commissioni della carta di credito vengono trasferite al costo (4.4% + $0.30 per transazione); non addebitiamo nulla oltre a questo.
|
||||
:::
|
||||
|
||||
I modelli gratuiti:
|
||||
|
||||
- MiniMax M2.5 Free è disponibile su OpenCode per un periodo limitato. Il team usa questo tempo per raccogliere feedback e migliorare il modello.
|
||||
- MiMo V2 Flash Free è disponibile su OpenCode per un periodo limitato. Il team usa questo tempo per raccogliere feedback e migliorare il modello.
|
||||
- Nemotron 3 Super Free è disponibile su OpenCode per un periodo limitato. Il team usa questo tempo per raccogliere feedback e migliorare il modello.
|
||||
- Big Pickle è un modello stealth gratuito su OpenCode per un periodo limitato. Il team usa questo tempo per raccogliere feedback e migliorare il modello.
|
||||
- MiniMax M2.5 Free è disponibile su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello.
|
||||
- MiMo V2 Pro Free è disponibile su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello.
|
||||
- MiMo V2 Omni Free è disponibile su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello.
|
||||
- Qwen3.6 Plus Free è disponibile su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello.
|
||||
- Nemotron 3 Super Free è disponibile su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello.
|
||||
- Big Pickle è un modello stealth che è gratuito su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello.
|
||||
|
||||
<a href={email}>Contattaci</a> se hai domande.
|
||||
|
||||
@@ -164,17 +181,20 @@ I modelli gratuiti:
|
||||
|
||||
### Ricarica automatica
|
||||
|
||||
Se il saldo scende sotto $5, Zen ricarica automaticamente $20.
|
||||
Se il tuo saldo scende sotto $5, Zen ricarica automaticamente $20.
|
||||
|
||||
Puoi cambiare l'importo della ricarica automatica. Puoi anche disabilitare completamente l'auto-reload.
|
||||
Puoi modificare l'importo della ricarica automatica. Puoi anche disabilitare completamente la ricarica automatica.
|
||||
|
||||
---
|
||||
|
||||
### Limiti mensili
|
||||
|
||||
Puoi anche impostare un limite mensile di utilizzo per l'intero workspace e per ogni membro del team.
|
||||
Puoi anche impostare un limite di utilizzo mensile per l'intero workspace e per ogni
|
||||
membro del tuo team.
|
||||
|
||||
Per esempio, se imposti un limite mensile a $20, Zen non userà più di $20 in un mese. Ma se hai l'auto-reload attivo, Zen potrebbe finire per addebitarti più di $20 se il saldo scende sotto $5.
|
||||
Per esempio, supponiamo che tu imposti un limite di utilizzo mensile di $20, Zen non userà
|
||||
più di $20 in un mese. Ma se hai la ricarica automatica abilitata, Zen potrebbe finire
|
||||
per addebitarti più di $20 se il tuo saldo scende sotto $5.
|
||||
|
||||
---
|
||||
|
||||
@@ -182,67 +202,73 @@ Per esempio, se imposti un limite mensile a $20, Zen non userà più di $20 in u
|
||||
|
||||
| Modello | Data di deprecazione |
|
||||
| ---------------- | -------------------- |
|
||||
| MiniMax M2.1 | 15 mar 2026 |
|
||||
| GLM 4.7 | 15 mar 2026 |
|
||||
| GLM 4.6 | 15 mar 2026 |
|
||||
| Gemini 3 Pro | 9 mar 2026 |
|
||||
| Kimi K2 Thinking | 6 mar 2026 |
|
||||
| Kimi K2 | 6 mar 2026 |
|
||||
| Qwen3 Coder 480B | 6 feb 2026 |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## Privacy
|
||||
|
||||
Tutti i nostri modelli sono ospitati negli US. I nostri provider seguono una policy di zero-retention e non usano i tuoi dati per training dei modelli, con le seguenti eccezioni:
|
||||
Tutti i nostri modelli sono ospitati negli US. I nostri provider seguono una policy di zero-retention e non usano i tuoi dati per l'addestramento dei modelli, con le seguenti eccezioni:
|
||||
|
||||
- Big Pickle: durante il periodo gratuito, i dati raccolti potrebbero essere usati per migliorare il modello.
|
||||
- MiniMax M2.5 Free: durante il periodo gratuito, i dati raccolti potrebbero essere usati per migliorare il modello.
|
||||
- MiniMax M2.5 Free: durante il periodo gratuito, i dati raccolti potrebbero essere usati per migliorare il modello.
|
||||
- Nemotron 3 Super Free: durante il periodo gratuito, i dati raccolti potrebbero essere usati per migliorare il modello.
|
||||
- OpenAI APIs: le richieste vengono conservate per 30 giorni in conformità alle [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: le richieste vengono conservate per 30 giorni in conformità alle [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
- Big Pickle: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello.
|
||||
- MiniMax M2.5 Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello.
|
||||
- MiMo V2 Pro Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello.
|
||||
- MiMo V2 Omni Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello.
|
||||
- Qwen3.6 Plus Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello.
|
||||
- Nemotron 3 Super Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello.
|
||||
- OpenAI APIs: le richieste vengono conservate per 30 giorni in conformità con [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: le richieste vengono conservate per 30 giorni in conformità con [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
|
||||
---
|
||||
|
||||
## Per i team
|
||||
|
||||
Zen funziona benissimo anche per i team. Puoi invitare colleghi, assegnare ruoli, curare i modelli usati dal team e altro.
|
||||
Zen funziona benissimo anche per i team. Puoi invitare compagni di squadra, assegnare ruoli, curare
|
||||
i modelli che il tuo team usa, e altro ancora.
|
||||
|
||||
:::note
|
||||
I workspace sono attualmente gratuiti per i team come parte della beta.
|
||||
:::
|
||||
|
||||
Gestire il workspace è attualmente gratuito per i team come parte della beta. Condivideremo presto più dettagli sul pricing.
|
||||
La gestione del tuo workspace è attualmente gratuita per i team come parte della beta. Condivideremo presto
|
||||
più dettagli sui prezzi.
|
||||
|
||||
---
|
||||
|
||||
### Ruoli
|
||||
|
||||
Puoi invitare colleghi nel workspace e assegnare ruoli:
|
||||
Puoi invitare compagni di squadra nel tuo workspace e assegnare ruoli:
|
||||
|
||||
- **Admin**: gestisce modelli, membri, chiavi API e fatturazione
|
||||
- **Member**: gestisce solo le proprie chiavi API
|
||||
- **Admin**: Gestisce modelli, membri, chiavi API, e fatturazione
|
||||
- **Member**: Gestisce solo le proprie chiavi API
|
||||
|
||||
Gli admin possono anche impostare limiti mensili di spesa per ogni membro per tenere i costi sotto controllo.
|
||||
Gli admin possono anche impostare limiti di spesa mensili per ogni membro per tenere i costi sotto controllo.
|
||||
|
||||
---
|
||||
|
||||
### Accesso ai modelli
|
||||
|
||||
Gli admin possono abilitare o disabilitare modelli specifici per il workspace. Le richieste verso un modello disabilitato restituiscono un errore.
|
||||
Gli admin possono abilitare o disabilitare modelli specifici per il workspace. Le richieste fatte a un modello disabilitato restituiranno un errore.
|
||||
|
||||
Questo è utile quando vuoi disabilitare l'uso di un modello che raccoglie dati.
|
||||
Questo è utile nei casi in cui vuoi disabilitare l'uso di un modello che
|
||||
raccoglie dati.
|
||||
|
||||
---
|
||||
|
||||
### Usa le tue chiavi
|
||||
### Porta la tua chiave
|
||||
|
||||
Puoi usare le tue chiavi API OpenAI o Anthropic continuando ad accedere agli altri modelli in Zen.
|
||||
|
||||
Quando usi le tue chiavi, i token vengono fatturati direttamente dal provider, non da Zen.
|
||||
|
||||
Per esempio, la tua organizzazione potrebbe avere già una chiave per OpenAI o Anthropic e vuoi usare quella invece di quella fornita da Zen.
|
||||
Per esempio, la tua organizzazione potrebbe avere già una chiave per OpenAI o Anthropic
|
||||
e vuoi usare quella invece di quella fornita da Zen.
|
||||
|
||||
---
|
||||
|
||||
@@ -251,6 +277,6 @@ Per esempio, la tua organizzazione potrebbe avere già una chiave per OpenAI o A
|
||||
Abbiamo creato OpenCode Zen per:
|
||||
|
||||
1. Fare **benchmark** dei migliori modelli/provider per agenti di coding.
|
||||
2. Dare accesso alle opzioni di **massima qualità** senza ridurre le prestazioni o instradare verso provider più economici.
|
||||
3. Trasferire eventuali **riduzioni di prezzo** vendendo al costo; l'unico markup copre le commissioni di elaborazione.
|
||||
4. Evitare **lock-in** permettendoti di usarlo con qualunque altro agente di coding e lasciandoti sempre usare anche altri provider con OpenCode.
|
||||
2. Avere accesso alle opzioni di **massima qualità** e non degradare le prestazioni né instradare verso provider più economici.
|
||||
3. Trasferire eventuali **riduzioni di prezzo** vendendo al costo; quindi l'unico markup serve a coprire le nostre commissioni di elaborazione.
|
||||
4. Non avere **lock-in** permettendoti di usarlo con qualsiasi altro agente di coding. E permettendoti sempre di usare anche qualsiasi altro provider con OpenCode.
|
||||
|
||||
@@ -7,96 +7,96 @@ import config from "../../../../config.mjs"
|
||||
export const console = config.console
|
||||
export const email = `mailto:${config.email}`
|
||||
|
||||
OpenCode Zen は、OpenCode チームによって提供される、テストおよび検証されたモデルのリストです。
|
||||
OpenCode Zen は、OpenCode チームが提供する、テスト済みかつ検証済みのモデルの一覧です。
|
||||
|
||||
:::note
|
||||
OpenCode Zen は現在ベータ版です。
|
||||
:::
|
||||
Zen は OpenCode の他のプロバイダーと同様に機能します。 OpenCode Zen にログインすると、API キーを取得できます。これは **完全にオプション** であり、OpenCode を使用するために使用する必要はありません。
|
||||
|
||||
Zen は OpenCode のほかのプロバイダーと同じように動作します。OpenCode Zen にログインして API キーを取得できます。これは **完全にオプション** であり、OpenCode を使うために必須ではありません。
|
||||
|
||||
---
|
||||
|
||||
## 背景
|
||||
|
||||
世の中には多数のモデルがありますが、コーディングエージェントとしてうまく機能するのはごく一部です。さらに、ほとんどのプロバイダーは構成が大きく異なるため、パフォーマンスと品質も大きく異なります。
|
||||
世の中には非常に多くのモデルがありますが、コーディングエージェントとして十分に機能するものはごく一部です。さらに、ほとんどのプロバイダーは設定が大きく異なるため、得られるパフォーマンスや品質にも大きな差が出ます。
|
||||
|
||||
:::tip
|
||||
OpenCode で適切に動作する、厳選されたモデルとプロバイダーのグループをテストしました。
|
||||
OpenCode でうまく動作する、厳選したモデルとプロバイダーの組み合わせをテストしました。
|
||||
:::
|
||||
|
||||
OpenRouter などを通じてモデルを使用している場合、必要なモデルの最高のバージョンを取得できているか確信が持てません。
|
||||
そのため、OpenRouter のようなものを通じてモデルを使っている場合、欲しいモデルの最適なバージョンを本当に使えているかを判断するのは困難です。
|
||||
|
||||
これを修正するために、いくつかのことを行いました。
|
||||
これを解決するために、私たちはいくつかのことを行いました。
|
||||
|
||||
1. 選抜したモデルグループをテストし、それらを最適に実行する方法についてチームと話し合いました。
|
||||
2. その後、いくつかのプロバイダーと協力して、これらが確実に提供されるようにしました。
|
||||
3. 最後に、モデルとプロバイダーの組み合わせをベンチマークし、自信を持ってお勧めできるリストを作成しました。
|
||||
1. 厳選したモデル群をテストし、それぞれのチームと、最適な動かし方について話し合いました。
|
||||
2. そのうえで、いくつかのプロバイダーと連携し、それらが正しく提供されるようにしました。
|
||||
3. 最後に、モデルとプロバイダーの組み合わせをベンチマークし、自信を持っておすすめできる一覧を作成しました。
|
||||
|
||||
OpenCode Zen は、これらのモデルへのアクセスを可能にする AI ゲートウェイです。
|
||||
OpenCode Zen は、これらのモデルにアクセスできる AI ゲートウェイです。
|
||||
|
||||
---
|
||||
|
||||
## 仕組み
|
||||
|
||||
OpenCode Zen は、OpenCode の他のプロバイダーと同様に機能します。
|
||||
OpenCode Zen は、OpenCode のほかのプロバイダーと同じように動作します。
|
||||
|
||||
1. **<a href={console}>OpenCode Zen</a>** にサインインし、請求情報を追加して、API キーをコピーします。
|
||||
2. TUI で `/connect` コマンドを実行し、OpenCode Zen を選択して API キーを貼り付けます。
|
||||
3. TUI で `/models` を実行すると、推奨されるモデルのリストが表示されます。
|
||||
2. TUI で `/connect` コマンドを実行し、OpenCode Zen を選んで API キーを貼り付けます。
|
||||
3. TUI で `/models` を実行すると、私たちがおすすめするモデルの一覧を確認できます。
|
||||
|
||||
リクエストごとに料金が請求され、アカウントにクレジットを追加できます。
|
||||
料金はリクエストごとに発生し、アカウントにクレジットを追加できます。
|
||||
|
||||
---
|
||||
|
||||
## エンドポイント
|
||||
|
||||
次の API エンドポイントを通じてモデルにアクセスすることもできます。
|
||||
以下の API エンドポイントを通じて、私たちのモデルにアクセスすることもできます。
|
||||
|
||||
| Model | Model ID | Endpoint | AI SDK Package |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Model | Model ID | Endpoint | AI SDK Package |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
OpenCode 設定の [model id](/docs/config/#models)
|
||||
`opencode/<model-id>` 形式を使用します。たとえば、GPT 5.2 Codex の場合は、
|
||||
設定で `opencode/gpt-5.2-codex` を使用してください。
|
||||
OpenCode 設定で使う [model id](/docs/config/#models) は `opencode/<model-id>` 形式です。たとえば、GPT 5.3 Codex では設定に `opencode/gpt-5.3-codex` を使用します。
|
||||
|
||||
---
|
||||
|
||||
### モデル
|
||||
|
||||
利用可能なモデルとそのメタデータの完全なリストは、次から取得できます。
|
||||
利用可能なモデルとそのメタデータの完全な一覧は、次から取得できます。
|
||||
|
||||
```
|
||||
https://opencode.ai/zen/v1/models
|
||||
@@ -106,78 +106,81 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
## 価格
|
||||
|
||||
当社は従量課金制モデルをサポートしています。以下は **100 万トークンあたりの価格**です。
|
||||
従量課金制をサポートしています。以下は **100万トークンあたり** の価格です。
|
||||
|
||||
| Model | Input | Output | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------ | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | - |
|
||||
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
|
||||
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.08 | - |
|
||||
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0.40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K tokens) | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37.50 | $1.00 | $12.50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.6 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
| Model | Input | Output | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------- | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
使用履歴に _Claude Haiku 3.5_ が表示されるかもしれません。これは [セッションのタイトルを生成するために使用される低コストモデル](/docs/config/#models) です。
|
||||
使用履歴に _Claude Haiku 3.5_ が表示されることがあります。これはセッションのタイトル生成に使われる [low cost model](/docs/config/#models) です。
|
||||
|
||||
:::note
|
||||
クレジットカード手数料は実費で引き継がれます (4.4% + 取引ごとに 0.30 ドル)。それ以上の料金はかかりません。
|
||||
クレジットカード手数料は実費で転嫁されます(各取引につき 4.4% + $0.30)。それ以外の上乗せはありません。
|
||||
:::
|
||||
|
||||
無料のモデル:
|
||||
無料モデル:
|
||||
|
||||
- MiniMax M2.5 Free は期間限定で OpenCode で利用可能です。チームはこの期間を利用してフィードバックを収集し、モデルを改善します。
|
||||
- Big Pickle は、期間限定で OpenCode で無料で利用できるステルスモデルです。チームはこの期間を利用してフィードバックを収集し、モデルを改善します。
|
||||
- MiniMax M2.5 Free は期間限定で OpenCode で利用できます。チームはこの期間中にフィードバックを集め、モデルを改善しています。
|
||||
- MiMo V2 Pro Free は期間限定で OpenCode で利用できます。チームはこの期間中にフィードバックを集め、モデルを改善しています。
|
||||
- MiMo V2 Omni Free は期間限定で OpenCode で利用できます。チームはこの期間中にフィードバックを集め、モデルを改善しています。
|
||||
- Qwen3.6 Plus Free は期間限定で OpenCode で利用できます。チームはこの期間中にフィードバックを集め、モデルを改善しています。
|
||||
- Nemotron 3 Super Free は期間限定で OpenCode で利用できます。チームはこの期間中にフィードバックを集め、モデルを改善しています。
|
||||
- Big Pickle はステルスモデルで、期間限定で OpenCode で無料提供されています。チームはこの期間中にフィードバックを集め、モデルを改善しています。
|
||||
|
||||
ご質問がございましたら、<a href={email}>お問い合わせ</a>ください。
|
||||
ご不明な点があれば、<a href={email}>お問い合わせください</a>。
|
||||
|
||||
---
|
||||
|
||||
### 自動リロード
|
||||
|
||||
残高が 5 ドルを下回ると、Zen は自動的に 20 ドルをリロードします。
|
||||
残高が $5 を下回ると、Zen は自動的に $20 をリロードします。
|
||||
|
||||
自動リロード量を変更できます。自動リロードを完全に無効にすることもできます。
|
||||
自動リロード額は変更できます。また、自動リロード自体を完全に無効化することもできます。
|
||||
|
||||
---
|
||||
|
||||
### 月ごとの制限
|
||||
### 月次上限
|
||||
|
||||
ワークスペース全体およびチームの各メンバーの月ごとの使用制限を設定することもできます。
|
||||
ワークスペース全体とチームの各メンバーに対して、月ごとの利用上限を設定することもできます。
|
||||
|
||||
たとえば、毎月の使用制限を 20 ドルに設定したとします。Zen は月に 20 ドル以上を使用しません。ただし、自動リロードを有効にしている場合、残高が 5 ドルを下回ると、Zen が 20 ドル以上の請求を行う可能性があります。
|
||||
たとえば、月次利用上限を $20 に設定した場合、Zen は 1 か月で $20 を超えて使われることはありません。ただし、自動リロードが有効になっていると、残高が $5 を下回った時点で Zen が $20 を自動チャージするため、結果として $20 を超えて請求される可能性があります。
|
||||
|
||||
---
|
||||
|
||||
@@ -185,74 +188,77 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
| Model | Deprecation date |
|
||||
| ---------------- | ---------------- |
|
||||
| Qwen3 Coder 480B | 2026年2月6日 |
|
||||
| Kimi K2 Thinking | 2026年3月6日 |
|
||||
| Kimi K2 | 2026年3月6日 |
|
||||
| MiniMax M2.1 | 2026年3月15日 |
|
||||
| GLM 4.7 | 2026年3月15日 |
|
||||
| GLM 4.6 | 2026年3月15日 |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## プライバシー
|
||||
|
||||
すべてのモデルは米国でホストされています。当社のプロバイダーはゼロ保持ポリシーに従い、次の例外を除いて、モデルのトレーニングにデータを使用しません。
|
||||
すべてのモデルは米国でホストされています。私たちのプロバイダーはゼロ保持ポリシーに従っており、以下の例外を除き、モデル学習にあなたのデータを使用しません。
|
||||
|
||||
- Big Pickle: 無料期間中、収集されたデータはモデルの改善に使用される場合があります。
|
||||
- MiniMax M2.5 Free: 無料期間中、収集されたデータはモデルの改善に使用される場合があります。
|
||||
- OpenAI API: リクエストは [OpenAI のデータポリシー](https://platform.openai.com/docs/guides/your-data) に従い、30 日間保持されます。
|
||||
- Anthropic API: リクエストは [Anthropic のデータポリシー](https://docs.anthropic.com/en/docs/claude-code/data-usage) に従い、30 日間保持されます。
|
||||
- Big Pickle: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。
|
||||
- MiniMax M2.5 Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。
|
||||
- MiMo V2 Pro Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。
|
||||
- MiMo V2 Omni Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。
|
||||
- Qwen3.6 Plus Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。
|
||||
- Nemotron 3 Super Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。
|
||||
- OpenAI APIs: リクエストは [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) に従って 30 日間保持されます。
|
||||
- Anthropic APIs: リクエストは [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) に従って 30 日間保持されます。
|
||||
|
||||
---
|
||||
|
||||
## チーム向け
|
||||
|
||||
Zen はチームにも効果的です。チームメイトを招待し、役割を割り当て、チームが使用するモデルをキュレートすることなどができます。
|
||||
Zen はチームでも非常に便利です。チームメイトを招待し、ロールを割り当て、チームで使うモデルを厳選するなど、さまざまなことができます。
|
||||
|
||||
:::note
|
||||
ワークスペースは現在、ベータ版の一部としてチームに無料で提供されています。
|
||||
ワークスペースは現在、ベータの一環としてチーム向けに無料です。
|
||||
:::
|
||||
|
||||
ワークスペースの管理は現在、ベータ版の一部としてチームに無料で提供されています。価格の詳細については近日中にお知らせします。
|
||||
ワークスペース管理は現在、ベータの一環としてチーム向けに無料です。価格についての詳細は近日中に共有します。
|
||||
|
||||
---
|
||||
|
||||
### ロール
|
||||
|
||||
チームメイトをワークスペースに招待し、役割を割り当てることができます。
|
||||
チームメイトをワークスペースに招待し、ロールを割り当てられます。
|
||||
|
||||
- **管理者**: モデル、メンバー、API キー、請求を管理します。
|
||||
- **メンバー**: 自分の API キーのみを管理します
|
||||
- **Admin**: モデル、メンバー、API キー、請求を管理できます
|
||||
- **Member**: 自分自身の API キーのみ管理できます
|
||||
|
||||
管理者は、コストを管理するために各メンバーの毎月の支出制限を設定することもできます。
|
||||
Admin は、コストを抑えるために各メンバーの月次支出上限を設定することもできます。
|
||||
|
||||
---
|
||||
|
||||
### モデルアクセス
|
||||
|
||||
管理者は、ワークスペースの特定のモデルを有効または無効にすることができます。無効なモデルに対してリクエストを行うと、エラーが返されます。
|
||||
Admin は、ワークスペースで特定のモデルを有効化または無効化できます。無効化されたモデルへのリクエストはエラーになります。
|
||||
|
||||
これは、モデルの使用を無効にしたい場合に便利です。
|
||||
データを収集します。
|
||||
これは、データを収集するモデルの利用を無効にしたい場合に便利です。
|
||||
|
||||
---
|
||||
|
||||
### 自分のキーを持ち込む
|
||||
### Bring your own key
|
||||
|
||||
Zen の他のモデルにアクセスしながら、独自の OpenAI または Anthropic API キーを使用できます。
|
||||
Zen のほかのモデルにアクセスしながら、自分の OpenAI または Anthropic API キーを使うこともできます。
|
||||
|
||||
独自のキーを使用する場合、トークンは Zen ではなくプロバイダーによって直接請求されます。
|
||||
自分のキーを使う場合、トークン料金は Zen ではなくプロバイダーから直接請求されます。
|
||||
|
||||
たとえば、組織はすでに OpenAI または Anthropic のキーを持っている可能性があります。
|
||||
Zen が提供するものの代わりにそれを使用したいとします。
|
||||
たとえば、あなたの組織がすでに OpenAI や Anthropic のキーを持っていて、Zen が提供するものではなくそちらを使いたい場合があります。
|
||||
|
||||
---
|
||||
|
||||
## 目標
|
||||
|
||||
私たちは次の目的で OpenCode Zen を作成しました。
|
||||
私たちが OpenCode Zen を作った目的は次のとおりです。
|
||||
|
||||
1. **ベンチマーク** コーディングエージェントに最適なモデル/プロバイダー。
|
||||
2. **最高品質**のオプションにアクセスでき、パフォーマンスをダウングレードしたり、より安価なプロバイダーにルートしたりする必要はありません。
|
||||
3. 原価で販売することで**価格下落**を転嫁します。したがって、唯一のマークアップは処理手数料をカバーすることです。
|
||||
4. 他のコーディングエージェントとの併用を許可することで、**ロックイン**がなくなります。また、常に OpenCode で他のプロバイダーも使用できるようにします。
|
||||
1. コーディングエージェントに最適なモデル/プロバイダーを **Benchmark** すること。
|
||||
2. **highest quality** の選択肢にアクセスできるようにし、性能を落としたり、より安価なプロバイダーへルーティングされたりしないこと。
|
||||
3. 原価で提供することで **price drops** をそのまま反映し、上乗せは処理手数料のカバー分だけにすること。
|
||||
4. ほかのコーディングエージェントでも使えるようにして **no lock-in** を実現し、さらに OpenCode では常にほかのプロバイダーも使えるようにすること。
|
||||
|
||||
@@ -7,96 +7,96 @@ import config from "../../../../config.mjs"
|
||||
export const console = config.console
|
||||
export const email = `mailto:${config.email}`
|
||||
|
||||
OpenCode Zen은 OpenCode 팀이 제공하는, 테스트 및 검증을 완료한 모델 목록입니다.
|
||||
OpenCode Zen은 OpenCode 팀이 제공하는, 테스트와 검증을 거친 모델 목록입니다.
|
||||
|
||||
:::note
|
||||
OpenCode Zen은 현재 베타(Beta) 단계에 있습니다.
|
||||
OpenCode Zen은 현재 beta입니다.
|
||||
:::
|
||||
|
||||
Zen은 OpenCode 내의 다른 공급자와 동일한 방식으로 작동합니다. 사용자는 OpenCode Zen에 로그인하여 API 키를 발급받을 수 있습니다. 본 서비스는 **전적으로 선택 사항**이며, OpenCode를 사용하기 위해 반드시 OpenCode Zen을 이용할 필요는 없습니다.
|
||||
Zen은 OpenCode의 다른 provider와 똑같이 작동합니다. OpenCode Zen에 로그인해 API 키를 받으면 됩니다. 이는 **완전히 선택 사항**이며, OpenCode를 사용하기 위해 반드시 사용할 필요는 없습니다.
|
||||
|
||||
---
|
||||
|
||||
## 배경
|
||||
|
||||
현재 다양한 모델이 존재하지만, 이 중 코딩 에이전트로서 우수한 성능을 발휘하는 모델은 일부에 불과합니다. 또한 대부분의 공급자는 각기 다른 방식으로 구성되어 있어, 그에 따라 성능과 품질 또한 크게 달라질 수 있습니다.
|
||||
시중에는 매우 많은 모델이 있지만, 이 가운데 코딩 에이전트로 잘 작동하는 모델은 일부뿐입니다. 또한 대부분의 provider는 설정 방식이 매우 달라서, 성능과 품질에도 큰 차이가 생깁니다.
|
||||
|
||||
:::tip
|
||||
OpenCode와 함께 원활하게 작동하는 일부 모델 및 제공자를 선별하여 테스트를 진행하였습니다.
|
||||
OpenCode에서 잘 작동하는 일부 모델과 provider를 직접 테스트했습니다.
|
||||
:::
|
||||
|
||||
따라서 OpenRouter와 같은 서비스를 통해 모델을 사용하는 경우, 사용자가 원하는 모델의 최적 버전을 실제로 사용하고 있는지 확신하기 어렵습니다.
|
||||
따라서 OpenRouter 같은 서비스를 통해 모델을 사용하면, 원하는 모델의 최적 버전을 정말 쓰고 있는지 확신하기 어렵습니다.
|
||||
|
||||
이 문제를 해결하기 위해 다음과 같은 조치를 수행하였습니다.
|
||||
이 문제를 해결하기 위해 다음과 같은 작업을 했습니다.
|
||||
|
||||
1. 일부 모델을 선별하여 테스트를 진행하고, 각 모델 팀과 협력하여 최적의 운영 방안을 논의하였습니다.
|
||||
2. 일부 제공자와 협력하여 해당 모델이 올바르게 제공되도록 구성하였습니다.
|
||||
3. 마지막으로 모델과 제공자 조합에 대한 벤치마크를 수행하여, 신뢰를 바탕으로 권장할 수 있는 목록을 도출하였습니다.
|
||||
1. 일부 모델을 직접 테스트하고, 각 팀과 함께 가장 잘 운영하는 방법을 논의했습니다.
|
||||
2. 이어서 몇몇 provider와 협력해, 이 모델들이 올바르게 제공되도록 했습니다.
|
||||
3. 마지막으로 모델/provider 조합을 벤치마크하고, 자신 있게 추천할 수 있는 목록을 정리했습니다.
|
||||
|
||||
OpenCode Zen은 이러한 모델에 대한 접근을 제공하는 AI 게이트웨이입니다.
|
||||
OpenCode Zen은 이런 모델에 접근할 수 있게 해주는 AI gateway입니다.
|
||||
|
||||
---
|
||||
|
||||
## 어떻게 작동하나요?
|
||||
## 작동 방식
|
||||
|
||||
OpenCode Zen은 OpenCode의 다른 제공자와 동일한 방식으로 작동합니다.
|
||||
OpenCode Zen은 OpenCode의 다른 provider와 똑같이 작동합니다.
|
||||
|
||||
1. **<a href={console}>OpenCode Zen</a>**에 로그인한 후, 결제 정보를 추가하고 API 키를 복사합니다.
|
||||
2. TUI에서 `/connect` 명령어를 실행한 뒤, OpenCode Zen을 선택하고 API 키를 붙여넣습니다.
|
||||
3. TUI에서 `/models` 명령어를 실행하여, 당사가 권장하는 모델 목록을 확인합니다.
|
||||
1. **<a href={console}>OpenCode Zen</a>**에 로그인하고, 결제 정보를 추가한 뒤, API 키를 복사합니다.
|
||||
2. TUI에서 `/connect` 명령을 실행하고, OpenCode Zen을 선택한 다음 API 키를 붙여넣습니다.
|
||||
3. TUI에서 `/models`를 실행해, 저희가 추천하는 모델 목록을 확인합니다.
|
||||
|
||||
요금은 요청 단위로 부과되며, 계정에 크레딧을 추가하여 사용할 수 있습니다.
|
||||
요금은 요청별로 청구되며, 계정에 크레딧을 추가할 수 있습니다.
|
||||
|
||||
---
|
||||
|
||||
## 엔드포인트
|
||||
|
||||
다음 API 엔드포인트를 통해서도 당사의 모델에 접근할 수 있습니다.
|
||||
다음 API 엔드포인트를 통해서도 모델에 접근할 수 있습니다.
|
||||
|
||||
| 모델 | 모델 ID | 엔드포인트 | AI SDK 패키지 |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| 모델 | 모델 ID | 엔드포인트 | AI SDK 패키지 |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
OpenCode 설정 파일에서 사용하는 [모델 ID](/docs/config/#models)는 `opencode/<model-id>` 형식을 따릅니다.
|
||||
예를 들어 GPT 5.2 Codex의 경우 설정에서 `opencode/gpt-5.2-codex`와 같이 사용합니다.
|
||||
OpenCode config에서 사용하는 [모델 ID](/docs/config/#models)는 `opencode/<model-id>` 형식입니다. 예를 들어 GPT 5.3 Codex를 사용하려면 config에서 `opencode/gpt-5.3-codex`를 사용하면 됩니다.
|
||||
|
||||
---
|
||||
|
||||
### 모델
|
||||
|
||||
사용 가능한 전체 모델 목록과 해당 메타데이터는 다음 경로에서 확인할 수 있습니다:
|
||||
사용 가능한 전체 모델 목록과 메타데이터는 다음에서 가져올 수 있습니다.
|
||||
|
||||
```
|
||||
https://opencode.ai/zen/v1/models
|
||||
@@ -104,154 +104,161 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
---
|
||||
|
||||
## 요금제
|
||||
## 요금
|
||||
|
||||
당사는 종량제(pay-as-you-go) 요금 모델을 지원합니다. 아래는 **1백만 토큰(1M tokens)당** 요금입니다.
|
||||
종량제(pay-as-you-go)를 지원합니다. 아래 가격은 **1M tokens당** 기준입니다.
|
||||
|
||||
| 모델 | 입력 | 출력 | 캐시 읽기 | 캐시 쓰기 |
|
||||
| --------------------------------- | ------ | ------ | --------- | --------- |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
|
||||
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0.40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K tokens) | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37.50 | $1.00 | $12.50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.6 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
| 모델 | 입력 | 출력 | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------- | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
사용 내역에서 *Claude Haiku 3.5*를 확인하실 수 있습니다. 이는 세션 제목을 생성하는 데 사용되는 [저비용 모델](/docs/config/#models)입니다.
|
||||
사용 기록에서 *Claude Haiku 3.5*를 볼 수 있습니다. 이는 세션 제목을 생성할 때 사용되는 [저비용 모델](/docs/config/#models)입니다.
|
||||
|
||||
:::note
|
||||
신용카드 수수료는 원가 기준(거래당 4.4% + $0.30)으로 그대로 반영되며, 당사는 그 외의 추가 수수료를 부과하지 않습니다.
|
||||
신용카드 수수료는 원가 그대로 전달됩니다(거래당 4.4% + $0.30). 그 외 추가 요금은 부과하지 않습니다.
|
||||
:::
|
||||
|
||||
무료 모델:
|
||||
|
||||
- MiniMax M2.5 Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 해당 기간 동안 팀은 사용자 피드백을 수집하고 모델을 개선할 예정입니다.
|
||||
- Big Pickle은 한정된 기간 동안 OpenCode에서 무료로 제공되는 스텔스 모델입니다. 해당 기간 동안 팀은 사용자 피드백을 수집하고 모델을 개선할 예정입니다.
|
||||
- MiniMax M2.5 Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다.
|
||||
- MiMo V2 Pro Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다.
|
||||
- MiMo V2 Omni Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다.
|
||||
- Qwen3.6 Plus Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다.
|
||||
- Nemotron 3 Super Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다.
|
||||
- Big Pickle은 한정된 기간 동안 OpenCode에서 무료로 제공되는 stealth model입니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다.
|
||||
|
||||
문의 사항이 있으시면 <a href={email}>Contact us</a>를 통해 연락해 주시기 바랍니다.
|
||||
궁금한 점이 있으면 <a href={email}>Contact us</a>로 문의해 주세요.
|
||||
|
||||
---
|
||||
|
||||
### 자동 충전
|
||||
|
||||
잔액이 $5 미만으로 내려가면, Zen은 자동으로 $20을 충전합니다.
|
||||
잔액이 $5 아래로 내려가면 Zen이 자동으로 $20을 충전합니다.
|
||||
|
||||
자동 충전 금액은 변경할 수 있으며, 자동 충전 기능을 완전히 비활성화할 수도 있습니다.
|
||||
자동 충전 금액은 변경할 수 있습니다. 자동 충전을 완전히 비활성화할 수도 있습니다.
|
||||
|
||||
---
|
||||
|
||||
### 월간 사용 한도
|
||||
### 월간 한도
|
||||
|
||||
워크스페이스 전체 및 각 팀 구성원별로 월간 사용 한도를 설정할 수 있습니다.
|
||||
워크스페이스 전체와 팀의 각 구성원에 대해 월간 사용 한도를 설정할 수도 있습니다.
|
||||
|
||||
예를 들어 월간 사용 한도를 $20로 설정한 경우, Zen은 한 달 동안 $20을 초과하여 사용하지 않습니다.
|
||||
다만 자동 충전이 활성화되어 있는 경우, 잔액이 $5 미만으로 내려가면 자동으로 충전이 이루어질 수 있으므로 실제 청구 금액이 $20을 초과할 수 있습니다.
|
||||
예를 들어 월간 사용 한도를 $20로 설정하면, Zen은 한 달에 $20를 초과해 사용하지 않습니다. 하지만 자동 충전이 켜져 있다면 잔액이 $5 아래로 내려갈 때 자동으로 충전되어, 실제 청구 금액은 $20를 넘을 수 있습니다.
|
||||
|
||||
---
|
||||
|
||||
### 지원 중단 모델
|
||||
|
||||
| 모델 | 지원 중단일 |
|
||||
| ---------------- | --------------- |
|
||||
| Qwen3 Coder 480B | 2026년 2월 6일 |
|
||||
| Kimi K2 Thinking | 2026년 3월 6일 |
|
||||
| Kimi K2 | 2026년 3월 6일 |
|
||||
| MiniMax M2.1 | 2026년 3월 15일 |
|
||||
| GLM 4.7 | 2026년 3월 15일 |
|
||||
| GLM 4.6 | 2026년 3월 15일 |
|
||||
| 모델 | 지원 중단 날짜 |
|
||||
| ---------------- | -------------- |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## 개인정보 보호
|
||||
|
||||
당사의 모든 모델은 미국에서 호스팅됩니다. 당사 제공자는 데이터 무보존(zero-retention) 정책을 따르며, 아래의 예외를 제외하고는 귀하의 데이터를 모델 학습에 사용하지 않습니다.
|
||||
모든 모델은 미국에서 호스팅됩니다. provider는 zero-retention 정책을 따르며, 다음 예외를 제외하고는 데이터를 모델 학습에 사용하지 않습니다.
|
||||
|
||||
- Big Pickle: 무료 제공 기간 동안 수집된 데이터는 모델 개선을 위해 사용될 수 있습니다.
|
||||
- MiniMax M2.5 Free: 무료 제공 기간 동안 수집된 데이터는 모델 개선을 위해 사용될 수 있습니다.
|
||||
- OpenAI APIs: 요청 데이터는 [OpenAI의 데이터 정책](https://platform.openai.com/docs/guides/your-data)에 따라 30일간 보관됩니다.
|
||||
- Anthropic APIs: 요청 데이터는 [Anthropic의 데이터 정책](https://docs.anthropic.com/en/docs/claude-code/data-usage)에 따라 30일간 보관됩니다.
|
||||
- Big Pickle: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다.
|
||||
- MiniMax M2.5 Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다.
|
||||
- MiMo V2 Pro Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다.
|
||||
- MiMo V2 Omni Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다.
|
||||
- Qwen3.6 Plus Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다.
|
||||
- Nemotron 3 Super Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다.
|
||||
- OpenAI APIs: 요청은 [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data)에 따라 30일 동안 보관됩니다.
|
||||
- Anthropic APIs: 요청은 [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage)에 따라 30일 동안 보관됩니다.
|
||||
|
||||
---
|
||||
|
||||
## 팀을 위한 기능
|
||||
## 팀용 기능
|
||||
|
||||
Zen은 팀 환경에서도 효과적으로 활용할 수 있습니다. 팀원을 초대하고, 역할을 지정하며, 팀에서 사용할 모델을 선별하는 등 다양한 기능을 제공합니다.
|
||||
Zen은 팀에서도 매우 잘 작동합니다. 팀원을 초대하고, 역할을 할당하고, 팀에서 사용할 모델을 선별하는 등 다양한 작업을 할 수 있습니다.
|
||||
|
||||
:::note
|
||||
워크스페이스는 현재 베타 프로그램의 일환으로 팀에 무료로 제공되고 있습니다.
|
||||
워크스페이스는 현재 beta의 일부로 팀에 무료 제공되고 있습니다.
|
||||
:::
|
||||
|
||||
현재 베타 기간 동안 팀의 워크스페이스 관리 기능은 무료로 제공됩니다. 요금 정책에 대한 보다 자세한 내용은 추후 안내될 예정입니다.
|
||||
현재 beta의 일부로 팀의 워크스페이스 관리도 무료입니다. 가격에 대한 더 자세한 내용은 곧 공유할 예정입니다.
|
||||
|
||||
---
|
||||
|
||||
### 역할
|
||||
|
||||
워크스페이스에 팀원을 초대하고 다음과 같은 역할을 부여할 수 있습니다.
|
||||
워크스페이스에 팀원을 초대하고 역할을 할당할 수 있습니다.
|
||||
|
||||
- **Admin**: 모델, 구성원, API 키 및 결제를 관리
|
||||
- **Member**: 본인의 API 키만 관리
|
||||
- **Admin**: 모델, 멤버, API 키, 결제를 관리
|
||||
- **Member**: 자신의 API 키만 관리
|
||||
|
||||
관리자는 비용 통제를 위해 각 구성원별 월간 지출 한도를 설정할 수 있습니다.
|
||||
Admin은 비용을 통제할 수 있도록 각 멤버의 월간 지출 한도도 설정할 수 있습니다.
|
||||
|
||||
---
|
||||
|
||||
### 모델 접근 권한
|
||||
### 모델 접근
|
||||
|
||||
관리자는 워크스페이스에서 특정 모델의 사용을 활성화하거나 비활성화할 수 있습니다. 비활성화된 모델에 대한 요청은 오류를 반환합니다.
|
||||
Admin은 워크스페이스에서 특정 모델을 활성화하거나 비활성화할 수 있습니다. 비활성화된 모델로 보내는 요청은 오류를 반환합니다.
|
||||
|
||||
이는 데이터 수집이 이루어지는 모델의 사용을 제한하려는 경우에 유용합니다.
|
||||
이는 데이터를 수집하는 모델의 사용을 비활성화하고 싶을 때 유용합니다.
|
||||
|
||||
---
|
||||
|
||||
## BYOK (Bring Your Own Key)
|
||||
### Bring your own key
|
||||
|
||||
Zen에서 다른 모델을 계속 이용하면서도, OpenAI 또는 Anthropic의 자체 API 키를 사용할 수 있습니다.
|
||||
Zen의 다른 모델도 계속 사용하면서, 자체 OpenAI 또는 Anthropic API 키를 사용할 수 있습니다.
|
||||
|
||||
자체 키를 사용하는 경우, 토큰 사용 요금은 Zen이 아닌 해당 제공자가 직접 청구합니다.
|
||||
자체 키를 사용하면 토큰 요금은 Zen이 아니라 provider가 직접 청구합니다.
|
||||
|
||||
예를 들어, 귀하의 조직이 이미 OpenAI 또는 Anthropic의 API 키를 보유하고 있는 경우, Zen에서 제공하는 키 대신 해당 키를 사용할 수 있습니다.
|
||||
예를 들어 조직에서 이미 OpenAI나 Anthropic 키를 가지고 있고, Zen이 제공하는 키 대신 그 키를 사용하고 싶을 수 있습니다.
|
||||
|
||||
---
|
||||
|
||||
## 목표
|
||||
|
||||
OpenCode Zen은 다음과 같은 목표를 바탕으로 개발되었습니다.
|
||||
OpenCode Zen을 만든 이유는 다음과 같습니다.
|
||||
|
||||
1. 코딩 에이전트에 적합한 최상의 모델 및 제공자를 **벤치마킹**합니다.
|
||||
2. 성능을 저하시키거나 더 저렴한 제공자로 우회하지 않고, **최고 품질의 옵션**에 접근할 수 있도록 합니다.
|
||||
3. 가격 인하가 있을 경우 이를 원가로 반영하여 제공하며, 당사의 마진은 처리 수수료를 충당하기 위한 최소 수준으로 제한합니다.
|
||||
4. 특정 서비스에 종속되지 않도록 하여, 다른 코딩 에이전트와도 자유롭게 함께 사용할 수 있도록 하며, OpenCode 내에서 다른 제공자 역시 언제든지 사용할 수 있도록 합니다.
|
||||
1. 코딩 에이전트에 가장 적합한 모델/provider를 **벤치마크**하기 위해.
|
||||
2. 성능을 낮추거나 더 저렴한 provider로 라우팅하지 않고, **최고 품질**의 선택지에 접근하기 위해.
|
||||
3. 원가로 판매해 **가격 인하**를 그대로 반영하기 위해. 따라서 붙는 마진은 처리 수수료를 충당하기 위한 수준뿐입니다.
|
||||
4. 다른 코딩 에이전트와도 함께 사용할 수 있게 해 **lock-in이 없도록** 하기 위해. 또한 OpenCode에서 언제나 다른 provider도 사용할 수 있게 하기 위해.
|
||||
|
||||
@@ -13,33 +13,33 @@ OpenCode Zen er en liste over testede og verifiserte modeller levert av OpenCode
|
||||
OpenCode Zen er for øyeblikket i beta.
|
||||
:::
|
||||
|
||||
Zen fungerer som alle andre leverandører i OpenCode. Du logger på OpenCode Zen og får
|
||||
din API nøkkel. Den er **helt valgfri** og du trenger ikke bruke den for å bruke
|
||||
Zen fungerer som enhver annen leverandør i OpenCode. Du logger inn på OpenCode Zen og får
|
||||
API-nøkkelen din. Det er **helt valgfritt**, og du trenger ikke bruke det for å bruke
|
||||
OpenCode.
|
||||
|
||||
---
|
||||
|
||||
## Bakgrunn
|
||||
|
||||
Det er et stort antall modeller der ute, men bare noen få av dem
|
||||
disse modellene fungerer godt som kodeagenter. I tillegg er de fleste tilbydere
|
||||
konfigurert veldig annerledes; slik at du får veldig forskjellig ytelse og kvalitet.
|
||||
Det finnes svært mange modeller der ute, men bare noen få av
|
||||
disse modellene fungerer godt som kodeagenter. I tillegg er de fleste leverandører
|
||||
konfigurert svært ulikt, så du får veldig ulik ytelse og kvalitet.
|
||||
|
||||
:::tip
|
||||
Vi testet en utvalgt gruppe modeller og leverandører som fungerer godt med OpenCode.
|
||||
:::
|
||||
|
||||
Så hvis du bruker en modell gjennom noe som OpenRouter, kan du aldri bli det
|
||||
sikker på om du får den beste versjonen av modellen du ønsker.
|
||||
Så hvis du bruker en modell gjennom noe som OpenRouter, kan du aldri være
|
||||
sikker på om du får den beste versjonen av modellen du vil ha.
|
||||
|
||||
For å fikse dette gjorde vi et par ting:
|
||||
For å løse dette gjorde vi et par ting:
|
||||
|
||||
1. Vi testet en utvalgt gruppe modeller og snakket med teamene deres om hvordan
|
||||
best kjøre dem.
|
||||
2. Vi samarbeidet deretter med noen få leverandører for å sikre at disse ble servert
|
||||
1. Vi testet en utvalgt gruppe modeller og snakket med teamene deres om hvordan de
|
||||
best kunne kjøres.
|
||||
2. Deretter samarbeidet vi med noen få leverandører for å sikre at disse ble levert
|
||||
riktig.
|
||||
3. Til slutt benchmarket vi kombinasjonen av modell/leverandør og kom frem
|
||||
med en liste som vi har lyst til å anbefale.
|
||||
3. Til slutt benchmarket vi kombinasjonen av modell/leverandør og laget
|
||||
en liste vi føler oss trygge på å anbefale.
|
||||
|
||||
OpenCode Zen er en AI gateway som gir deg tilgang til disse modellene.
|
||||
|
||||
@@ -47,14 +47,14 @@ OpenCode Zen er en AI gateway som gir deg tilgang til disse modellene.
|
||||
|
||||
## Slik fungerer det
|
||||
|
||||
OpenCode Zen fungerer som alle andre leverandører i OpenCode.
|
||||
OpenCode Zen fungerer som enhver annen leverandør i OpenCode.
|
||||
|
||||
1. Du logger på **<a href={console}>OpenCode Zen</a>**, legg til fakturering
|
||||
detaljer, og kopier API-nøkkelen.
|
||||
2. Du kjører kommandoen `/connect` i TUI, velger OpenCode Zen og limer inn API-nøkkelen.
|
||||
1. Du logger inn på **<a href={console}>OpenCode Zen</a>**, legger til
|
||||
faktureringsdetaljene dine og kopierer API-nøkkelen din.
|
||||
2. Du kjører kommandoen `/connect` i TUI, velger OpenCode Zen og limer inn API-nøkkelen din.
|
||||
3. Kjør `/models` i TUI for å se listen over modeller vi anbefaler.
|
||||
|
||||
Du belastes per forespørsel, og du kan legge til kreditt på kontoen din.
|
||||
Du blir belastet per forespørsel, og du kan legge til kreditt på kontoen din.
|
||||
|
||||
---
|
||||
|
||||
@@ -62,51 +62,52 @@ Du belastes per forespørsel, og du kan legge til kreditt på kontoen din.
|
||||
|
||||
Du kan også få tilgang til modellene våre gjennom følgende API-endepunkter.
|
||||
|
||||
| Modell | Modell ID | Endepunkt | AI SDK Pakke |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Modell | Modell-ID | Endepunkt | AI SDK-pakke |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
[modell-id](/docs/config/#models) i OpenCode-konfigurasjonen
|
||||
bruker formatet `opencode/<model-id>`. For eksempel, for GPT 5.2 Codex, ville du
|
||||
bruk `opencode/gpt-5.2-codex` i konfigurasjonen din.
|
||||
[modell-id](/docs/config/#models) i OpenCode-konfigurasjonen din
|
||||
bruker formatet `opencode/<model-id>`. For eksempel, for GPT 5.3 Codex, ville du
|
||||
brukt `opencode/gpt-5.3-codex` i konfigurasjonen din.
|
||||
|
||||
---
|
||||
|
||||
### Modeller
|
||||
|
||||
Du kan hente hele listen over tilgjengelige modeller og deres metadata fra:
|
||||
Du kan hente hele listen over tilgjengelige modeller og metadataene deres fra:
|
||||
|
||||
```
|
||||
https://opencode.ai/zen/v1/models
|
||||
@@ -116,150 +117,158 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
## Priser
|
||||
|
||||
Vi støtter en pay-as-you-go-modell. Nedenfor er prisene **per 1 million tokens**.
|
||||
Vi støtter en pay-as-you-go-modell. Nedenfor er prisene **per 1M tokens**.
|
||||
|
||||
| Modell | Inndata | Utdata | Bufret lesing | Bufret skriving |
|
||||
| --------------------------------- | ------- | ------ | ------------- | --------------- |
|
||||
| Big Pickle | Gratis | Gratis | Gratis | - |
|
||||
| MiniMax M2.5 Free | Gratis | Gratis | Gratis | - |
|
||||
| MiniMax M2.5 | $0,30 | $1,20 | $0,06 | $0,375 |
|
||||
| MiniMax M2.1 | $0,30 | $1,20 | $0,10 | - |
|
||||
| GLM 5 | $1,00 | $3,20 | $0,20 | - |
|
||||
| GLM 4.7 | $0,60 | $2,20 | $0,10 | - |
|
||||
| GLM 4.6 | $0,60 | $2,20 | $0,10 | - |
|
||||
| Kimi K2.5 | $0,60 | $3,00 | $0,08 | - |
|
||||
| Kimi K2 Thinking | $0,40 | $2,50 | - | - |
|
||||
| Kimi K2 | $0,40 | $2,50 | - | - |
|
||||
| Qwen3 Coder 480B | $0,45 | $1,50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K tokens) | $5,00 | $25,00 | $0,50 | $6,25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10,00 | $37,50 | $1,00 | $12,50 |
|
||||
| Claude Opus 4.5 | $5,00 | $25,00 | $0,50 | $6,25 |
|
||||
| Claude Opus 4.1 | $15,00 | $75,00 | $1,50 | $18,75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K tokens) | $3,00 | $15,00 | $0,30 | $3,75 |
|
||||
| Claude Sonnet 4.6 (> 200K tokens) | $6,00 | $22,50 | $0,60 | $7,50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3,00 | $15,00 | $0,30 | $3,75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6,00 | $22,50 | $0,60 | $7,50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3,00 | $15,00 | $0,30 | $3,75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6,00 | $22,50 | $0,60 | $7,50 |
|
||||
| Claude Haiku 4.5 | $1,00 | $5,00 | $0,10 | $1,25 |
|
||||
| Claude Haiku 3.5 | $0,80 | $4,00 | $0,08 | $1,00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2,00 | $12,00 | $0,20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4,00 | $18,00 | $0,40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2,00 | $12,00 | $0,20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4,00 | $18,00 | $0,40 | - |
|
||||
| Gemini 3 Flash | $0,50 | $3,00 | $0,05 | - |
|
||||
| GPT 5.4 | $2,50 | $15,00 | $0,25 | - |
|
||||
| GPT 5.3 Codex | $1,75 | $14,00 | $0,175 | - |
|
||||
| GPT 5.2 | $1,75 | $14,00 | $0,175 | - |
|
||||
| GPT 5.2 Codex | $1,75 | $14,00 | $0,175 | - |
|
||||
| GPT 5.1 | $1,07 | $8,50 | $0,107 | - |
|
||||
| GPT 5.1 Codex | $1,07 | $8,50 | $0,107 | - |
|
||||
| GPT 5.1 Codex Max | $1,25 | $10,00 | $0,125 | - |
|
||||
| GPT 5.1 Codex Mini | $0,25 | $2,00 | $0,025 | - |
|
||||
| GPT 5 | $1,07 | $8,50 | $0,107 | - |
|
||||
| GPT 5 Codex | $1,07 | $8,50 | $0,107 | - |
|
||||
| GPT 5 Nano | Gratis | Gratis | Gratis | - |
|
||||
| Modell | Inndata | Utdata | Bufret lesing | Bufret skriving |
|
||||
| --------------------------------- | ------- | ------- | ------------- | --------------- |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
Du legger kanskje merke til _Claude Haiku 3.5_ i brukshistorikken din. Dette er en [lavprismodell](/docs/config/#models) som brukes til å generere titlene på øktene dine.
|
||||
Du vil kanskje legge merke til _Claude Haiku 3.5_ i brukshistorikken din. Dette er en [lavprismodell](/docs/config/#models) som brukes til å generere titlene på øktene dine.
|
||||
|
||||
:::note
|
||||
Kredittkortgebyrer overføres til kostpris (4,4 % + $0,30 per transaksjon); vi belaster ikke noe utover det.
|
||||
Kredittkortgebyrer videreføres til kostpris (4.4% + $0.30 per transaction); vi tar ikke betalt noe utover det.
|
||||
:::
|
||||
|
||||
De gratis modellene:
|
||||
Gratis-modellene:
|
||||
|
||||
- MiniMax M2.5 Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle tilbakemeldinger og forbedre modellen.
|
||||
- Big Pickle er en stealth-modell som er gratis på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle tilbakemeldinger og forbedre modellen.
|
||||
- MiniMax M2.5 Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen.
|
||||
- MiMo V2 Pro Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen.
|
||||
- MiMo V2 Omni Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen.
|
||||
- Qwen3.6 Plus Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen.
|
||||
- Nemotron 3 Super Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen.
|
||||
- Big Pickle er en stealth-modell som er gratis på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen.
|
||||
|
||||
<a href={email}>Kontakt oss</a> hvis du har spørsmål.
|
||||
|
||||
---
|
||||
|
||||
### Automatisk påfylling
|
||||
### Automatisk påfyll
|
||||
|
||||
Hvis saldoen din går under $5, vil Zen automatisk fylle på med $20.
|
||||
Hvis saldoen din går under $5, vil Zen automatisk fylle på $20.
|
||||
|
||||
Du kan endre beløpet for automatisk påfylling. Du kan også deaktivere automatisk påfylling helt.
|
||||
Du kan endre beløpet for automatisk påfyll. Du kan også deaktivere automatisk påfyll helt.
|
||||
|
||||
---
|
||||
|
||||
### Månedlige grenser
|
||||
|
||||
Du kan også angi en månedlig bruksgrense for hele arbeidsområdet og for hver
|
||||
Du kan også sette en månedlig bruksgrense for hele arbeidsområdet og for hvert
|
||||
medlem av teamet ditt.
|
||||
|
||||
La oss for eksempel si at du setter en månedlig bruksgrense til $20, Zen vil ikke bruke
|
||||
mer enn $20 på en måned. Men hvis du har automatisk påfylling aktivert, kan Zen ende opp
|
||||
belaster deg mer enn $20 hvis saldoen din går under $5.
|
||||
La oss for eksempel si at du setter en månedlig bruksgrense på $20. Da vil Zen ikke bruke
|
||||
mer enn $20 i løpet av en måned. Men hvis du har automatisk påfyll aktivert, kan Zen ende opp
|
||||
med å belaste deg mer enn $20 hvis saldoen din går under $5.
|
||||
|
||||
---
|
||||
|
||||
### Utfasede modeller
|
||||
|
||||
| Modell | Utfasingdato |
|
||||
| ---------------- | ------------- |
|
||||
| Qwen3 Coder 480B | 6. feb. 2026 |
|
||||
| Kimi K2 Thinking | 6. mars 2026 |
|
||||
| Kimi K2 | 6. mars 2026 |
|
||||
| MiniMax M2.1 | 15. mars 2026 |
|
||||
| GLM 4.7 | 15. mars 2026 |
|
||||
| GLM 4.6 | 15. mars 2026 |
|
||||
| Modell | Utfasingsdato |
|
||||
| ---------------- | -------------- |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## Personvern
|
||||
|
||||
Alle våre modeller er hostet i USA. Leverandørene våre følger retningslinjer om ingen datalagring og bruker ikke dataene dine til modellopplæring, med følgende unntak:
|
||||
Alle modellene våre hostes i US. Leverandørene våre følger en policy for zero-retention og bruker ikke dataene dine til modelltrening, med følgende unntak:
|
||||
|
||||
- Big Pickle: I løpet av gratisperioden kan innsamlede data brukes til å forbedre modellen.
|
||||
- MiniMax M2.5 Free: I løpet av gratisperioden kan innsamlede data brukes til å forbedre modellen.
|
||||
- OpenAI APIer: Forespørsler oppbevares i 30 dager i samsvar med [OpenAIs datapolicyer](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIer: Forespørsler oppbevares i 30 dager i samsvar med [Anthropics datapolicyer](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
- Big Pickle: I gratisperioden kan innsamlede data brukes til å forbedre modellen.
|
||||
- MiniMax M2.5 Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen.
|
||||
- MiMo V2 Pro Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen.
|
||||
- MiMo V2 Omni Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen.
|
||||
- Qwen3.6 Plus Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen.
|
||||
- Nemotron 3 Super Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen.
|
||||
- OpenAI APIs: Forespørsler lagres i 30 dager i samsvar med [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: Forespørsler lagres i 30 dager i samsvar med [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
|
||||
---
|
||||
|
||||
## For lag
|
||||
## For team
|
||||
|
||||
Zen fungerer også utmerket for team. Du kan invitere lagkamerater, tildele roller, kuratere
|
||||
modellene laget ditt bruker, og mer.
|
||||
Zen fungerer også svært godt for team. Du kan invitere teammedlemmer, tildele roller, kuratere
|
||||
modellene teamet ditt bruker og mer.
|
||||
|
||||
:::note
|
||||
Arbeidsområder er for øyeblikket gratis for team som en del av betaversjonen.
|
||||
Arbeidsområder er for øyeblikket gratis for team som en del av betaen.
|
||||
:::
|
||||
|
||||
Å administrere arbeidsområdet ditt er for øyeblikket gratis for team som en del av betaversjonen. Det blir vi
|
||||
deler mer informasjon om prisene snart.
|
||||
Administrasjon av arbeidsområdet ditt er for øyeblikket gratis for team som en del av betaen. Vi kommer til
|
||||
å dele flere detaljer om prising snart.
|
||||
|
||||
---
|
||||
|
||||
### Roller
|
||||
|
||||
Du kan invitere lagkamerater til arbeidsområdet ditt og tildele roller:
|
||||
Du kan invitere teammedlemmer til arbeidsområdet ditt og tildele roller:
|
||||
|
||||
- **Admin**: Administrer modeller, medlemmer, API-nøkler og fakturering
|
||||
- **Medlem**: Administrer kun sine egne API-nøkler
|
||||
- **Admin**: Administrer modeller, medlemmer, API Nøkler og fakturering
|
||||
- **Member**: Administrer bare egne API Nøkler
|
||||
|
||||
Administratorer kan også sette månedlige forbruksgrenser for hvert medlem for å holde kostnadene under kontroll.
|
||||
Admins kan også sette månedlige forbruksgrenser for hvert medlem for å holde kostnadene under kontroll.
|
||||
|
||||
---
|
||||
|
||||
### Modelltilgang
|
||||
|
||||
Administratorer kan aktivere eller deaktivere spesifikke modeller for arbeidsområdet. Forespørsler til en deaktivert modell vil returnere en feil.
|
||||
Admins kan aktivere eller deaktivere bestemte modeller for arbeidsområdet. Forespørsler til en deaktivert modell vil returnere en feil.
|
||||
|
||||
Dette er nyttig for tilfeller der du ønsker å deaktivere bruken av en modell som
|
||||
Dette er nyttig i tilfeller der du vil deaktivere bruken av en modell som
|
||||
samler inn data.
|
||||
|
||||
---
|
||||
|
||||
### Ta med egen nøkkel
|
||||
|
||||
Du kan bruke dine egne OpenAI- eller Anthropic API-nøkler mens du fortsatt har tilgang til andre modeller i Zen.
|
||||
Du kan bruke dine egne OpenAI- eller Anthropic API Nøkler samtidig som du fortsatt har tilgang til andre modeller i Zen.
|
||||
|
||||
Når du bruker dine egne nøkler, faktureres tokens direkte av leverandøren, ikke av Zen.
|
||||
Når du bruker egne nøkler, faktureres tokens direkte av leverandøren, ikke av Zen.
|
||||
|
||||
For eksempel kan organisasjonen din allerede ha en nøkkel for OpenAI eller Anthropic
|
||||
og du vil bruke det i stedet for det Zen gir.
|
||||
For eksempel kan organisasjonen din allerede ha en nøkkel for OpenAI eller Anthropic,
|
||||
og du vil bruke den i stedet for den Zen tilbyr.
|
||||
|
||||
---
|
||||
|
||||
@@ -267,7 +276,7 @@ og du vil bruke det i stedet for det Zen gir.
|
||||
|
||||
Vi opprettet OpenCode Zen for å:
|
||||
|
||||
1. **Benchmark** de beste modellene/leverandørene for kodingsagenter.
|
||||
2. Ha tilgang til alternativene for **høyeste kvalitet** og ikke nedgrader ytelsen eller rute trafikk til billigere leverandører.
|
||||
3. Gi videre eventuelle **prisfall** ved å selge til kostpris; så det eneste påslaget er å dekke behandlingsgebyrene våre.
|
||||
4. Ha **ingen låsing** ved å la deg bruke den med en hvilken som helst annen kodeagent. Og la deg alltid bruke en hvilken som helst annen leverandør med OpenCode også.
|
||||
1. **Benchmarke** de beste modellene/leverandørene for kodeagenter.
|
||||
2. Ha tilgang til alternativene med **høyest kvalitet** og ikke nedgradere ytelsen eller rute til billigere leverandører.
|
||||
3. Videreføre eventuelle **prisreduksjoner** ved å selge til kostpris, slik at det eneste påslaget er for å dekke behandlingsgebyrene våre.
|
||||
4. Ha **ingen lock-in** ved å la deg bruke det med enhver annen kodeagent. Og alltid la deg bruke enhver annen leverandør med OpenCode også.
|
||||
|
||||
@@ -7,41 +7,41 @@ import config from "../../../../config.mjs"
|
||||
export const console = config.console
|
||||
export const email = `mailto:${config.email}`
|
||||
|
||||
OpenCode Zen to lista przetestowanych i zweryfikowanych modeli udostępniona przez zespół OpenCode.
|
||||
OpenCode Zen to lista przetestowanych i zweryfikowanych modeli udostępniana przez zespół OpenCode.
|
||||
|
||||
:::note
|
||||
OpenCode Zen jest obecnie w wersji beta.
|
||||
:::
|
||||
|
||||
Zen działa jak każdy inny dostawca w OpenCode. Logujesz się do OpenCode Zen i otrzymujesz
|
||||
swój klucz API. Jest to **całkowicie opcjonalne** i nie musisz tego używać, aby korzystać z
|
||||
swój klucz API. To **całkowicie opcjonalne** i nie musisz z tego korzystać, aby używać
|
||||
OpenCode.
|
||||
|
||||
---
|
||||
|
||||
## Tło
|
||||
|
||||
Istnieje ogromna liczba modeli, ale tylko kilka z nich
|
||||
działa dobrze jako agenci kodujący. Dodatkowo większość dostawców jest
|
||||
skonfigurowana bardzo różnie, więc otrzymujesz bardzo różną wydajność i jakość.
|
||||
Istnieje bardzo wiele modeli, ale tylko kilka z nich sprawdza się dobrze jako
|
||||
agenci kodujący. Dodatkowo większość dostawców jest skonfigurowana bardzo różnie,
|
||||
więc otrzymujesz bardzo różną wydajność i jakość.
|
||||
|
||||
:::tip
|
||||
Przetestowaliśmy wybraną grupę modeli i dostawców, którzy dobrze współpracują z OpenCode.
|
||||
:::
|
||||
|
||||
Jeśli więc używasz modelu za pośrednictwem czegoś takiego jak OpenRouter, nigdy nie możesz być
|
||||
pewien, czy otrzymujesz najlepszą wersję modelu, jaki chcesz.
|
||||
Jeśli więc używasz modelu przez coś takiego jak OpenRouter, nigdy nie masz
|
||||
pewności, czy otrzymujesz najlepszą wersję modelu, którego chcesz używać.
|
||||
|
||||
Aby to naprawić, zrobiliśmy kilka rzeczy:
|
||||
|
||||
1. Przetestowaliśmy wybraną grupę modeli i rozmawialiśmy z ich zespołami o tym, jak
|
||||
najlepiej je uruchamiać.
|
||||
2. Następnie współpracowaliśmy z kilkoma dostawcami, aby upewnić się, że są one obsługiwane
|
||||
poprawnie.
|
||||
3. Na koniec sprawdziliśmy wydajność kombinacji modelu/dostawcy i stworzyliśmy
|
||||
listę, którą z czystym sumieniem polecamy.
|
||||
1. Przetestowaliśmy wybraną grupę modeli i rozmawialiśmy z ich zespołami o tym,
|
||||
jak najlepiej je uruchamiać.
|
||||
2. Następnie współpracowaliśmy z kilkoma dostawcami, aby upewnić się, że są
|
||||
serwowane poprawnie.
|
||||
3. Na końcu porównaliśmy wydajność kombinacji model/dostawca i stworzyliśmy
|
||||
listę, którą z przekonaniem polecamy.
|
||||
|
||||
OpenCode Zen to brama AI, która zapewnia dostęp do tych modeli.
|
||||
OpenCode Zen to brama AI, która daje dostęp do tych modeli.
|
||||
|
||||
---
|
||||
|
||||
@@ -49,58 +49,59 @@ OpenCode Zen to brama AI, która zapewnia dostęp do tych modeli.
|
||||
|
||||
OpenCode Zen działa jak każdy inny dostawca w OpenCode.
|
||||
|
||||
1. Logujesz się do **<a href={console}>OpenCode Zen</a>**, dodajesz dane rozliczeniowe
|
||||
i kopiujesz swój klucz API.
|
||||
2. Uruchamiasz polecenie `/connect` w TUI, wybierasz OpenCode Zen i wklejasz klucz API.
|
||||
3. Uruchom `/models` w TUI, aby zobaczyć listę zalecanych przez nas modeli.
|
||||
1. Logujesz się do **<a href={console}>OpenCode Zen</a>**, dodajesz dane
|
||||
rozliczeniowe i kopiujesz swój klucz API.
|
||||
2. Uruchamiasz polecenie `/connect` w TUI, wybierasz OpenCode Zen i wklejasz swój klucz API.
|
||||
3. Uruchamiasz `/models` w TUI, aby zobaczyć listę modeli, które rekomendujemy.
|
||||
|
||||
Opłata jest pobierana za każde żądanie i możesz dodać środki do swojego konta.
|
||||
Płatność jest naliczana za każde żądanie i możesz doładować swoje konto.
|
||||
|
||||
---
|
||||
|
||||
## Punkty końcowe
|
||||
## Endpointy
|
||||
|
||||
Dostęp do naszych modeli można również uzyskać za pośrednictwem następujących punktów końcowych API.
|
||||
Możesz też uzyskać dostęp do naszych modeli przez poniższe endpointy API.
|
||||
|
||||
| Model | Identyfikator modelu | Punkt końcowy | Pakiet SDK AI |
|
||||
| ------------------ | -------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Model | ID modelu | Endpoint | Pakiet AI SDK |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
[Identyfikator modelu](/docs/config/#models) w konfiguracji OpenCode
|
||||
używa formatu `opencode/<model-id>`. Na przykład w przypadku GPT 5.2 Codex użyłbyś
|
||||
`opencode/gpt-5.2-codex` w swojej konfiguracji.
|
||||
[ID modelu](/docs/config/#models) w Twojej konfiguracji OpenCode używa formatu
|
||||
`opencode/<model-id>`. Na przykład dla GPT 5.3 Codex użyjesz w konfiguracji
|
||||
`opencode/gpt-5.3-codex`.
|
||||
|
||||
---
|
||||
|
||||
@@ -116,150 +117,158 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
## Cennik
|
||||
|
||||
Wspieramy model pay-as-you-go. Poniżej znajdują się ceny **za 1M tokenów**.
|
||||
Obsługujemy model pay-as-you-go. Poniżej znajdują się ceny **za 1M tokenów**.
|
||||
|
||||
| Model | Wejście | Wyjście | Odczyt w pamięci podręcznej | Zapis w pamięci podręcznej |
|
||||
| --------------------------------- | ------- | ------- | --------------------------- | -------------------------- |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
|
||||
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0.40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K tokens) | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37.50 | $1.00 | $12.50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.6 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
| Model | Wejście | Wyjście | Odczyt z cache | Zapis do cache |
|
||||
| --------------------------------- | ------- | ------- | -------------- | -------------- |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
Możesz zauważyć _Claude Haiku 3.5_ w swojej historii użytkowania. Jest to [tani model](/docs/config/#models), który jest używany do generowania tytułów Twoich sesji.
|
||||
Możesz zauważyć _Claude Haiku 3.5_ w historii użycia. To [niedrogi model](/docs/config/#models),
|
||||
który służy do generowania tytułów Twoich sesji.
|
||||
|
||||
:::note
|
||||
Opłaty za karty kredytowe są przenoszone po kosztach (4,4% + 0,30 USD za transakcję); nie pobieramy nic poza tym.
|
||||
Opłaty za karty kredytowe są przenoszone po kosztach (4.4% + $0.30 per transaction); nie naliczamy nic ponad to.
|
||||
:::
|
||||
|
||||
Darmowe modele:
|
||||
|
||||
- MiniMax M2.5 Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas na zbieranie opinii i ulepszanie modelu.
|
||||
- Big Pickle to ukryty model, który jest bezpłatny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas na zbieranie opinii i ulepszanie modelu.
|
||||
- MiniMax M2.5 Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu.
|
||||
- MiMo V2 Pro Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu.
|
||||
- MiMo V2 Omni Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu.
|
||||
- Qwen3.6 Plus Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu.
|
||||
- Nemotron 3 Super Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu.
|
||||
- Big Pickle to stealth model, który jest darmowy w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu.
|
||||
|
||||
<a href={email}>Skontaktuj się z nami</a>, jeśli masz jakieś pytania.
|
||||
<a href={email}>Skontaktuj się z nami</a>, jeśli masz pytania.
|
||||
|
||||
---
|
||||
|
||||
### Automatyczne doładowanie
|
||||
### Auto-reload
|
||||
|
||||
Jeśli Twoje saldo spadnie poniżej 5 USD, Zen automatycznie doładuje 20 USD.
|
||||
Jeśli Twoje saldo spadnie poniżej $5, Zen automatycznie doładuje $20.
|
||||
|
||||
Możesz zmienić kwotę automatycznego doładowania. Możesz także całkowicie wyłączyć automatyczne doładowanie.
|
||||
Możesz zmienić kwotę automatycznego doładowania. Możesz też całkowicie wyłączyć auto-reload.
|
||||
|
||||
---
|
||||
|
||||
### Limity miesięczne
|
||||
### Monthly limits
|
||||
|
||||
Możesz także ustawić miesięczny limit użytkowania dla całego obszaru roboczego i dla każdego
|
||||
członka Twojego zespołu.
|
||||
Możesz też ustawić miesięczny limit użycia dla całego workspace i dla każdego
|
||||
członka swojego zespołu.
|
||||
|
||||
Na przykład, jeśli ustawisz miesięczny limit użytkowania na 20 USD, Zen nie zużyje
|
||||
więcej niż 20 dolarów w miesiącu. Ale jeśli masz włączone automatyczne doładowanie, Zen może
|
||||
obciążyć Cię kwotą wyższą niż 20 USD, jeśli saldo spadnie poniżej 5 USD.
|
||||
Na przykład, jeśli ustawisz miesięczny limit użycia na $20, Zen nie zużyje
|
||||
więcej niż $20 w miesiącu. Ale jeśli masz włączony auto-reload, Zen może
|
||||
ostatecznie obciążyć Cię kwotą wyższą niż $20, jeśli Twoje saldo spadnie poniżej $5.
|
||||
|
||||
---
|
||||
|
||||
### Przestarzałe modele
|
||||
### Deprecated models
|
||||
|
||||
| Model | Data wycofania |
|
||||
| ---------------- | -------------- |
|
||||
| Qwen3 Coder 480B | 6 lutego 2026 |
|
||||
| Kimi K2 Thinking | 6 marca 2026 |
|
||||
| Kimi K2 | 6 marca 2026 |
|
||||
| MiniMax M2.1 | 15 marca 2026 |
|
||||
| GLM 4.7 | 15 marca 2026 |
|
||||
| GLM 4.6 | 15 marca 2026 |
|
||||
| Model | Deprecation date |
|
||||
| ---------------- | ---------------- |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## Prywatność
|
||||
|
||||
Wszystkie nasze modele są hostowane w USA. Nasi dostawcy przestrzegają polityki zerowego przechowywania i nie wykorzystują Twoich danych do szkolenia modeli, z następującymi wyjątkami:
|
||||
Wszystkie nasze modele są hostowane w US. Nasi dostawcy stosują politykę zero-retention i nie wykorzystują Twoich danych do trenowania modeli, z następującymi wyjątkami:
|
||||
|
||||
- Big Pickle: W okresie bezpłatnym zebrane dane mogą zostać wykorzystane do udoskonalenia modelu.
|
||||
- MiniMax M2.5 Free: W okresie bezpłatnym zebrane dane mogą zostać wykorzystane do udoskonalenia modelu.
|
||||
- API OpenAI: Żądania są przechowywane przez 30 dni zgodnie z [Zasadami dotyczącymi danych OpenAI](https://platform.openai.com/docs/guides/your-data).
|
||||
- API Anthropic: Żądania są przechowywane przez 30 dni zgodnie z [Zasadami dotyczącymi danych Anthropic](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
- Big Pickle: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu.
|
||||
- MiniMax M2.5 Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu.
|
||||
- MiMo V2 Pro Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu.
|
||||
- MiMo V2 Omni Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu.
|
||||
- Qwen3.6 Plus Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu.
|
||||
- Nemotron 3 Super Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu.
|
||||
- OpenAI APIs: Żądania są przechowywane przez 30 dni zgodnie z [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: Żądania są przechowywane przez 30 dni zgodnie z [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
|
||||
---
|
||||
|
||||
## Dla zespołów
|
||||
|
||||
Zen działa świetnie także dla zespołów. Możesz zapraszać członków zespołu, przypisywać role, dobierać
|
||||
modele, z których korzysta Twój zespół i nie tylko.
|
||||
Zen świetnie sprawdza się też w zespołach. Możesz zapraszać członków zespołu,
|
||||
przypisywać role, dobierać modele używane przez zespół i nie tylko.
|
||||
|
||||
:::note
|
||||
Obszary robocze są obecnie bezpłatne dla zespołów w ramach wersji beta.
|
||||
Workspaces są obecnie darmowe dla zespołów w ramach wersji beta.
|
||||
:::
|
||||
|
||||
Zarządzanie obszarem roboczym jest obecnie bezpłatne dla zespołów w ramach wersji beta.
|
||||
Wkrótce udostępnimy więcej szczegółów na temat cen.
|
||||
Zarządzanie workspace jest obecnie darmowe dla zespołów w ramach wersji beta. Wkrótce
|
||||
udostępnimy więcej szczegółów dotyczących cen.
|
||||
|
||||
---
|
||||
|
||||
### Role
|
||||
|
||||
Możesz zapraszać członków zespołu do swojego obszaru roboczego i przypisywać role:
|
||||
Możesz zapraszać członków zespołu do swojego workspace i przypisywać role:
|
||||
|
||||
- **Admin**: Zarządzanie modelami, członkami, kluczami API i rozliczeniami
|
||||
- **Członek**: Zarządzanie tylko własnymi kluczami API
|
||||
- **Member**: Zarządzanie tylko własnymi kluczami API
|
||||
|
||||
Administratorzy mogą także ustawić miesięczne limity wydatków dla każdego członka, aby utrzymać koszty pod kontrolą.
|
||||
Admini mogą też ustawiać miesięczne limity wydatków dla każdego członka, aby utrzymać koszty pod kontrolą.
|
||||
|
||||
---
|
||||
|
||||
### Dostęp do modelu
|
||||
### Dostęp do modeli
|
||||
|
||||
Administratorzy mogą włączać i wyłączać określone modele w obszarze roboczym. Żądania skierowane do wyłączonego modelu zwrócą błąd.
|
||||
Admini mogą włączać lub wyłączać konkretne modele dla workspace. Żądania wysłane do wyłączonego modelu zwrócą błąd.
|
||||
|
||||
Jest to przydatne w przypadkach, gdy chcesz wyłączyć korzystanie z modelu, który
|
||||
zbiera dane.
|
||||
To przydaje się wtedy, gdy chcesz wyłączyć używanie modelu, który zbiera dane.
|
||||
|
||||
---
|
||||
|
||||
### Przynieś swój własny klucz
|
||||
### Użyj własnego klucza
|
||||
|
||||
Możesz używać własnych kluczy OpenAI lub Anthropic API, jednocześnie uzyskując dostęp do innych modeli w Zen.
|
||||
Możesz używać własnych kluczy API OpenAI lub Anthropic, a jednocześnie mieć dostęp do innych modeli w Zen.
|
||||
|
||||
Kiedy używasz własnych kluczy, tokeny są rozliczane bezpośrednio przez dostawcę, a nie przez Zen.
|
||||
Gdy używasz własnych kluczy, tokeny są rozliczane bezpośrednio przez dostawcę, a nie przez Zen.
|
||||
|
||||
Na przykład Twoja organizacja może już mieć klucz do OpenAI lub Anthropic
|
||||
i chcesz go używać zamiast tego, który zapewnia Zen.
|
||||
Na przykład Twoja organizacja może już mieć klucz do OpenAI lub Anthropic i
|
||||
chcesz używać go zamiast tego, który udostępnia Zen.
|
||||
|
||||
---
|
||||
|
||||
@@ -267,7 +276,7 @@ i chcesz go używać zamiast tego, który zapewnia Zen.
|
||||
|
||||
Stworzyliśmy OpenCode Zen, aby:
|
||||
|
||||
1. **Testować** (Benchmark) najlepsze modele/dostawców dla agentów kodujących.
|
||||
2. Mieć dostęp do opcji **najwyższej jakości**, a nie obniżać wydajności ani nie kierować do tańszych dostawców.
|
||||
3. Przekazywać wszelkie **obniżki cen**, sprzedając po kosztach; więc jedyną marżą jest pokrycie naszych opłat manipulacyjnych.
|
||||
4. Nie **mieć blokady** (no lock-in), umożliwiając używanie go z dowolnym innym agentem kodującym. I zawsze pozwalać na korzystanie z dowolnego innego dostawcy w OpenCode.
|
||||
1. **Benchmarkować** najlepsze modele/dostawców dla agentów kodujących.
|
||||
2. Mieć dostęp do opcji o **najwyższej jakości** i nie obniżać wydajności ani nie kierować ruchu do tańszych dostawców.
|
||||
3. Przekazywać wszelkie **obniżki cen**, sprzedając po kosztach; jedyna marża pokrywa nasze opłaty za przetwarzanie.
|
||||
4. Nie tworzyć **lock-in**, pozwalając używać tego z dowolnym innym agentem kodującym. I zawsze pozwalać Ci używać w OpenCode także dowolnego innego dostawcy.
|
||||
|
||||
@@ -13,27 +13,27 @@ O OpenCode Zen é uma lista de modelos testados e verificados fornecidos pela eq
|
||||
O OpenCode Zen está atualmente em beta.
|
||||
:::
|
||||
|
||||
O Zen funciona como qualquer outro provedor no OpenCode. Você faz login no OpenCode Zen e obtém sua chave de API. É **completamente opcional** e você não precisa usá-lo para utilizar o OpenCode.
|
||||
O Zen funciona como qualquer outro provedor no OpenCode. Você faz login no OpenCode Zen e recebe sua chave de API. É **completamente opcional** e você não precisa usá-lo para usar o OpenCode.
|
||||
|
||||
---
|
||||
|
||||
## Contexto
|
||||
|
||||
Existe um grande número de modelos disponíveis, mas apenas alguns desses modelos funcionam bem como agentes de codificação. Além disso, a maioria dos provedores é configurada de maneira muito diferente; portanto, você obtém desempenhos e qualidades muito diferentes.
|
||||
Existe um grande número de modelos disponíveis, mas apenas alguns deles funcionam bem como agentes de codificação. Além disso, a maioria dos provedores é configurada de formas muito diferentes; portanto, você obtém desempenho e qualidade muito diferentes.
|
||||
|
||||
:::tip
|
||||
Testamos um grupo selecionado de modelos e provedores que funcionam bem com o OpenCode.
|
||||
:::
|
||||
|
||||
Portanto, se você estiver usando um modelo através de algo como OpenRouter, você nunca pode ter certeza se está obtendo a melhor versão do modelo que deseja.
|
||||
Então, se você estiver usando um modelo por meio de algo como OpenRouter, nunca pode ter certeza de que está obtendo a melhor versão do modelo que deseja.
|
||||
|
||||
Para resolver isso, fizemos algumas coisas:
|
||||
Para corrigir isso, fizemos algumas coisas:
|
||||
|
||||
1. Testamos um grupo selecionado de modelos e conversamos com suas equipes sobre como executá-los da melhor forma.
|
||||
2. Trabalhamos com alguns provedores para garantir que esses modelos estivessem sendo servidos corretamente.
|
||||
3. Finalmente, realizamos benchmarks da combinação modelo/provedor e elaboramos uma lista que nos sentimos bem em recomendar.
|
||||
1. Testamos um grupo selecionado de modelos e conversamos com suas equipes sobre a melhor forma de executá-los.
|
||||
2. Depois, trabalhamos com alguns provedores para garantir que esses modelos fossem servidos corretamente.
|
||||
3. Por fim, fizemos benchmark da combinação modelo/provedor e chegamos a uma lista que nos sentimos confortáveis em recomendar.
|
||||
|
||||
O OpenCode Zen é um gateway de IA que lhe dá acesso a esses modelos.
|
||||
O OpenCode Zen é um gateway de AI que dá a você acesso a esses modelos.
|
||||
|
||||
---
|
||||
|
||||
@@ -51,45 +51,46 @@ Você é cobrado por solicitação e pode adicionar créditos à sua conta.
|
||||
|
||||
## Endpoints
|
||||
|
||||
Você também pode acessar nossos modelos através dos seguintes endpoints da API.
|
||||
Você também pode acessar nossos modelos pelos seguintes endpoints de API.
|
||||
|
||||
| Modelo | ID do Modelo | Endpoint | Pacote AI SDK |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Modelo | ID do modelo | Endpoint | Pacote AI SDK |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
O [id do modelo](/docs/config/#models) na sua configuração do OpenCode usa o formato `opencode/<model-id>`. Por exemplo, para GPT 5.2 Codex, você usaria `opencode/gpt-5.2-codex` na sua configuração.
|
||||
O [model id](/docs/config/#models) na sua configuração do OpenCode usa o formato `opencode/<model-id>`. Por exemplo, para GPT 5.3 Codex, você usaria `opencode/gpt-5.3-codex` na sua configuração.
|
||||
|
||||
---
|
||||
|
||||
@@ -105,78 +106,81 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
## Preços
|
||||
|
||||
Nós suportamos um modelo de pagamento conforme o uso. Abaixo estão os preços **por 1M de tokens**.
|
||||
Oferecemos um modelo pay-as-you-go. Abaixo estão os preços **por 1M tokens**.
|
||||
|
||||
| Modelo | Entrada | Saída | Leitura em Cache | Escrita em Cache |
|
||||
| --------------------------------- | ------- | ------ | ---------------- | ---------------- |
|
||||
| Big Pickle | Grátis | Grátis | Grátis | - |
|
||||
| MiniMax M2.5 Free | Grátis | Grátis | Grátis | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | - |
|
||||
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
|
||||
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.08 | - |
|
||||
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0.40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K tokens) | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37.50 | $1.00 | $12.50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.6 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Grátis | Grátis | Grátis | - |
|
||||
| Modelo | Entrada | Saída | Leitura em cache | Escrita em cache |
|
||||
| --------------------------------- | ------- | ------- | ---------------- | ---------------- |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
Você pode notar _Claude Haiku 3.5_ em seu histórico de uso. Este é um [modelo de baixo custo](/docs/config/#models) que é usado para gerar os títulos de suas sessões.
|
||||
Você pode notar _Claude Haiku 3.5_ no seu histórico de uso. Este é um [low cost model](/docs/config/#models) usado para gerar os títulos das suas sessões.
|
||||
|
||||
:::note
|
||||
As taxas de cartão de crédito são repassadas ao custo (4,4% + $0,30 por transação); não cobramos nada além disso.
|
||||
As taxas de cartão de crédito são repassadas a preço de custo (4.4% + $0.30 por transação); não cobramos nada além disso.
|
||||
:::
|
||||
|
||||
Os modelos gratuitos:
|
||||
|
||||
- MiniMax M2.5 Free está disponível no OpenCode por tempo limitado. A equipe está usando esse tempo para coletar feedback e melhorar o modelo.
|
||||
- Big Pickle é um modelo oculto que está gratuito no OpenCode por tempo limitado. A equipe está usando esse tempo para coletar feedback e melhorar o modelo.
|
||||
- MiniMax M2.5 Free está disponível no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo.
|
||||
- MiMo V2 Pro Free está disponível no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo.
|
||||
- MiMo V2 Omni Free está disponível no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo.
|
||||
- Qwen3.6 Plus Free está disponível no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo.
|
||||
- Nemotron 3 Super Free está disponível no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo.
|
||||
- Big Pickle é um modelo stealth que está gratuito no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo.
|
||||
|
||||
<a href={email}>Entre em contato conosco</a> se você tiver alguma dúvida.
|
||||
<a href={email}>Entre em contato</a> se você tiver alguma dúvida.
|
||||
|
||||
---
|
||||
|
||||
### Recarga automática
|
||||
|
||||
Se seu saldo cair abaixo de $5, o Zen recarregará automaticamente $20.
|
||||
Se o seu saldo cair abaixo de $5, o Zen recarregará automaticamente $20.
|
||||
|
||||
Você pode alterar o valor da recarga automática. Você também pode desativar a recarga automática completamente.
|
||||
Você pode alterar o valor da recarga automática. Também pode desativar a recarga automática por completo.
|
||||
|
||||
---
|
||||
|
||||
### Limites mensais
|
||||
|
||||
Você também pode definir um limite de uso mensal para todo o espaço de trabalho e para cada membro de sua equipe.
|
||||
Você também pode definir um limite mensal de uso para todo o workspace e para cada membro da sua equipe.
|
||||
|
||||
Por exemplo, digamos que você defina um limite de uso mensal de $20, o Zen não usará mais de $20 em um mês. Mas se você tiver a recarga automática ativada, o Zen pode acabar cobrando mais de $20 se seu saldo cair abaixo de $5.
|
||||
Por exemplo, digamos que você defina um limite mensal de uso de $20; o Zen não usará mais de $20 em um mês. Mas, se você tiver a recarga automática ativada, o Zen pode acabar cobrando mais de $20 se o seu saldo cair abaixo de $5.
|
||||
|
||||
---
|
||||
|
||||
@@ -184,64 +188,69 @@ Por exemplo, digamos que você defina um limite de uso mensal de $20, o Zen não
|
||||
|
||||
| Modelo | Data de descontinuação |
|
||||
| ---------------- | ---------------------- |
|
||||
| Qwen3 Coder 480B | 6 de fev. de 2026 |
|
||||
| Kimi K2 Thinking | 6 de mar. de 2026 |
|
||||
| Kimi K2 | 6 de mar. de 2026 |
|
||||
| MiniMax M2.1 | 15 de mar. de 2026 |
|
||||
| GLM 4.7 | 15 de mar. de 2026 |
|
||||
| GLM 4.6 | 15 de mar. de 2026 |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## Privacidade
|
||||
|
||||
Todos os nossos modelos estão hospedados nos EUA. Nossos provedores seguem uma política de zero retenção e não usam seus dados para treinamento de modelos, com as seguintes exceções:
|
||||
Todos os nossos modelos são hospedados nos US. Nossos provedores seguem uma política de zero-retention e não usam seus dados para treinamento de modelos, com as seguintes exceções:
|
||||
|
||||
- Big Pickle: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo.
|
||||
- MiniMax M2.5 Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo.
|
||||
- APIs da OpenAI: As solicitações são retidas por 30 dias de acordo com as [Políticas de Dados da OpenAI](https://platform.openai.com/docs/guides/your-data).
|
||||
- APIs da Anthropic: As solicitações são retidas por 30 dias de acordo com as [Políticas de Dados da Anthropic](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
- MiMo V2 Pro Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo.
|
||||
- MiMo V2 Omni Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo.
|
||||
- Qwen3.6 Plus Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo.
|
||||
- Nemotron 3 Super Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo.
|
||||
- OpenAI APIs: As solicitações são retidas por 30 dias de acordo com [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: As solicitações são retidas por 30 dias de acordo com [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
|
||||
---
|
||||
|
||||
## Para Equipes
|
||||
## Para equipes
|
||||
|
||||
O Zen também funciona muito bem para equipes. Você pode convidar colegas de equipe, atribuir funções, selecionar os modelos que sua equipe usa e muito mais.
|
||||
O Zen também funciona muito bem para equipes. Você pode convidar colegas de equipe, atribuir papéis, selecionar os modelos que sua equipe usa e muito mais.
|
||||
|
||||
:::note
|
||||
Os espaços de trabalho estão atualmente gratuitos para equipes como parte do beta.
|
||||
Workspaces estão atualmente gratuitos para equipes como parte do beta.
|
||||
:::
|
||||
|
||||
Gerenciar seu espaço de trabalho é atualmente gratuito para equipes como parte do beta. Em breve, compartilharemos mais detalhes sobre os preços.
|
||||
Gerenciar seu workspace está atualmente gratuito para equipes como parte do beta. Em breve, compartilharemos mais detalhes sobre os preços.
|
||||
|
||||
---
|
||||
|
||||
### Funções
|
||||
### Papéis
|
||||
|
||||
Você pode convidar colegas de equipe para seu espaço de trabalho e atribuir funções:
|
||||
Você pode convidar colegas de equipe para o seu workspace e atribuir papéis:
|
||||
|
||||
- **Admin**: Gerenciar modelos, membros, chaves de API e cobrança
|
||||
- **Membro**: Gerenciar apenas suas próprias chaves de API
|
||||
- **Member**: Gerenciar apenas as próprias chaves de API
|
||||
|
||||
Os administradores também podem definir limites de gastos mensais para cada membro para manter os custos sob controle.
|
||||
Admins também podem definir limites mensais de gastos para cada membro para manter os custos sob controle.
|
||||
|
||||
---
|
||||
|
||||
### Acesso ao modelo
|
||||
### Acesso a modelos
|
||||
|
||||
Os administradores podem habilitar ou desabilitar modelos específicos para o espaço de trabalho. Solicitações feitas a um modelo desabilitado retornarão um erro.
|
||||
Admins podem habilitar ou desabilitar modelos específicos para o workspace. Solicitações feitas a um modelo desabilitado retornarão um erro.
|
||||
|
||||
Isso é útil para casos em que você deseja desabilitar o uso de um modelo que coleta dados.
|
||||
Isso é útil em casos nos quais você quer desabilitar o uso de um modelo que coleta dados.
|
||||
|
||||
---
|
||||
|
||||
### Traga sua própria chave
|
||||
|
||||
Você pode usar suas próprias chaves de API da OpenAI ou Anthropic enquanto ainda acessa outros modelos no Zen.
|
||||
Você pode usar suas próprias chaves de API da OpenAI ou Anthropic e ainda acessar outros modelos no Zen.
|
||||
|
||||
Quando você usa suas próprias chaves, os tokens são cobrados diretamente pelo provedor, não pelo Zen.
|
||||
|
||||
Por exemplo, sua organização pode já ter uma chave para OpenAI ou Anthropic e você deseja usar essa em vez da que o Zen fornece.
|
||||
Por exemplo, sua organização pode já ter uma chave para OpenAI ou Anthropic e você quer usar essa em vez da que o Zen fornece.
|
||||
|
||||
---
|
||||
|
||||
@@ -249,7 +258,7 @@ Por exemplo, sua organização pode já ter uma chave para OpenAI ou Anthropic e
|
||||
|
||||
Criamos o OpenCode Zen para:
|
||||
|
||||
1. **Benchmark** os melhores modelos/provedores para agentes de codificação.
|
||||
2. Ter acesso às opções de **mais alta qualidade** e não degradar o desempenho ou redirecionar para provedores mais baratos.
|
||||
3. Repassar quaisquer **reduções de preço** vendendo ao custo; assim, a única margem é para cobrir nossas taxas de processamento.
|
||||
4. Não ter **vinculação** permitindo que você o use com qualquer outro agente de codificação. E sempre permitir que você use qualquer outro provedor com o OpenCode também.
|
||||
1. Fazer **benchmark** dos melhores modelos/provedores para agentes de codificação.
|
||||
2. Ter acesso às opções de **mais alta qualidade** e não degradar o desempenho nem encaminhar para provedores mais baratos.
|
||||
3. Repassar quaisquer **quedas de preço** vendendo a preço de custo; assim, a única margem é para cobrir nossas taxas de processamento.
|
||||
4. Não ter **lock-in**, permitindo que você o use com qualquer outro agente de codificação. E sempre permitir que você use qualquer outro provedor com o OpenCode também.
|
||||
|
||||
@@ -10,50 +10,51 @@ export const email = `mailto:${config.email}`
|
||||
OpenCode Zen — это список протестированных и проверенных моделей, предоставленный командой OpenCode.
|
||||
|
||||
:::note
|
||||
OpenCode Zen в настоящее время находится в стадии бета-тестирования.
|
||||
OpenCode Zen сейчас находится в бета-версии.
|
||||
:::
|
||||
|
||||
Zen работает как любой другой провайдер в OpenCode. Вы входите в OpenCode Zen и получаете
|
||||
ваш ключ API. Это **совершенно необязательно**, и вам не обязательно использовать его для использования
|
||||
OpenCode.
|
||||
свой ключ API. Это **полностью необязательно**, и вам не нужно использовать его,
|
||||
чтобы пользоваться OpenCode.
|
||||
|
||||
---
|
||||
|
||||
## Предыстория
|
||||
|
||||
Существует большое количество моделей, но лишь некоторые из них
|
||||
хорошо работают в качестве кодинг-агентов. Кроме того, большинство провайдеров
|
||||
настроены совсем по-другому; так что вы получите совсем другую производительность и качество.
|
||||
Существует огромное количество моделей, но только немногие из
|
||||
них хорошо работают как агенты для программирования. Кроме того, большинство провайдеров
|
||||
настроены очень по-разному, поэтому производительность и качество могут сильно отличаться.
|
||||
|
||||
:::tip
|
||||
Мы протестировали избранную группу моделей и поставщиков, которые хорошо работают с opencode.
|
||||
Мы протестировали выбранную группу моделей и провайдеров, которые хорошо работают с OpenCode.
|
||||
:::
|
||||
|
||||
Поэтому, если вы используете модель через что-то вроде OpenRouter, вы никогда не сможете
|
||||
уверен, что вы получаете лучшую версию модели, которую хотите.
|
||||
Поэтому, если вы используете модель через что-то вроде OpenRouter, вы никогда не можете
|
||||
быть уверены, что получаете лучшую версию нужной вам модели.
|
||||
|
||||
Чтобы это исправить, мы сделали пару вещей:
|
||||
Чтобы это исправить, мы сделали несколько вещей:
|
||||
|
||||
1. Мы протестировали избранную группу моделей и поговорили с их командами о том, как
|
||||
лучше всего запустить их.
|
||||
2. Затем мы поработали с несколькими поставщиками услуг, чтобы убедиться, что они обслуживаются правильно.
|
||||
3. Наконец, мы сравнили комбинацию модель/провайдер и составили
|
||||
список, который мы с удовольствием рекомендуем.
|
||||
1. Мы протестировали выбранную группу моделей и обсудили с их командами, как
|
||||
лучше всего их запускать.
|
||||
2. Затем мы поработали с несколькими провайдерами, чтобы убедиться, что эти модели
|
||||
отдаются корректно.
|
||||
3. Наконец, мы сравнили комбинации модель/провайдер и составили
|
||||
список, который готовы рекомендовать.
|
||||
|
||||
OpenCode Zen — это шлюз искусственного интеллекта, который дает вам доступ к этим моделям.
|
||||
OpenCode Zen — это AI-шлюз, который дает вам доступ к этим моделям.
|
||||
|
||||
---
|
||||
|
||||
## Как это работает
|
||||
|
||||
OpenCode Zen работает так же, как и любой другой поставщик OpenCode.
|
||||
OpenCode Zen работает как любой другой провайдер в OpenCode.
|
||||
|
||||
1. Вы входите в систему **<a href={console}>OpenCode Zen</a>**, добавляете платежные
|
||||
1. Вы входите в **<a href={console}>OpenCode Zen</a>**, добавляете платежные
|
||||
данные и копируете свой ключ API.
|
||||
2. Вы запускаете команду `/connect` в TUI, выбираете OpenCode Zen и вставляете свой ключ API.
|
||||
3. Запустите `/models` в TUI, чтобы просмотреть список рекомендуемых нами моделей.
|
||||
3. Запустите `/models` в TUI, чтобы увидеть список моделей, которые мы рекомендуем.
|
||||
|
||||
С вас взимается плата за каждый запрос, и вы можете добавить кредиты на свой счет.
|
||||
Плата взимается за каждый запрос, и вы можете пополнять баланс своего аккаунта.
|
||||
|
||||
---
|
||||
|
||||
@@ -61,51 +62,52 @@ OpenCode Zen работает так же, как и любой другой п
|
||||
|
||||
Вы также можете получить доступ к нашим моделям через следующие конечные точки API.
|
||||
|
||||
| Модель | Идентификатор модели | Конечная точка | Пакет AI SDK |
|
||||
| ------------------ | -------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Модель | Идентификатор модели | Конечная точка | Пакет AI SDK |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
[модель id](/docs/config/#models) в вашей конфигурации opencode
|
||||
использует формат `opencode/<model-id>`. Например, для Кодекса GPT 5.2 вы должны
|
||||
используйте `opencode/gpt-5.2-codex` в вашей конфигурации.
|
||||
[идентификатор модели](/docs/config/#models) в вашей конфигурации OpenCode
|
||||
использует формат `opencode/<model-id>`. Например, для GPT 5.3 Codex вам нужно
|
||||
использовать `opencode/gpt-5.3-codex` в своей конфигурации.
|
||||
|
||||
---
|
||||
|
||||
### Модели
|
||||
|
||||
Полный список доступных моделей и их метаданные можно получить по адресу:
|
||||
Вы можете получить полный список доступных моделей и их метаданных по адресу:
|
||||
|
||||
```
|
||||
https://opencode.ai/zen/v1/models
|
||||
@@ -115,60 +117,63 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
## Цены
|
||||
|
||||
Мы поддерживаем модель оплаты по мере использования. Ниже приведены цены **за 1 миллион токенов**.
|
||||
Мы поддерживаем оплату по мере использования. Ниже указаны цены **за 1M токенов**.
|
||||
|
||||
| Модель | Вход | Выход | Кэшированное чтение | Кэшированная запись |
|
||||
| -------------------------------------- | --------- | --------- | ------------------- | ------------------- |
|
||||
| Big Pickle | Бесплатно | Бесплатно | Бесплатно | - |
|
||||
| MiniMax M2.5 Free | Бесплатно | Бесплатно | Бесплатно | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | - |
|
||||
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
|
||||
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.08 | - |
|
||||
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0.40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200 тыс. токенов) | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200 тыс. токенов) | $10.00 | $37.50 | $1.00 | $12.50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 (≤ 200 тыс. токенов) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.6 (> 200 тыс. токенов) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200 тыс. токенов) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200 тыс. токенов) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200 тыс. токенов) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200 тыс. токенов) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200 тыс. токенов) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200 тыс. токенов) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Pro (≤ 200 тыс. токенов) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3 Pro (> 200 тыс. токенов) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Бесплатно | Бесплатно | Бесплатно | - |
|
||||
| Модель | Вход | Выход | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------- | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
Вы можете заметить _Claude Haiku 3.5_ в своей истории использования. Это [недорогая модель](/docs/config/#models), которая используется для создания заголовков ваших сеансов.
|
||||
Вы можете заметить _Claude Haiku 3.5_ в истории использования. Это [недорогая модель](/docs/config/#models), которая используется для генерации заголовков ваших сессий.
|
||||
|
||||
:::note
|
||||
Комиссии по кредитной карте учитываются по себестоимости (4,4% + 0,30 доллара США за транзакцию); мы не взимаем ничего сверх этого.
|
||||
Комиссии по кредитным картам передаются по себестоимости (4.4% + $0.30 за транзакцию); мы ничего не начисляем сверх этого.
|
||||
:::
|
||||
|
||||
Бесплатные модели:
|
||||
|
||||
- MiniMax M2.5 Free доступен на OpenCode в течение ограниченного времени. Команда использует это время для сбора отзывов и улучшения модели.
|
||||
- Big Pickle — это стелс-модель, которая доступна бесплатно на OpenCode в течение ограниченного времени. Команда использует это время для сбора отзывов и улучшения модели.
|
||||
- MiniMax M2.5 Free доступна в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель.
|
||||
- MiMo V2 Pro Free доступна в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель.
|
||||
- MiMo V2 Omni Free доступна в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель.
|
||||
- Qwen3.6 Plus Free доступна в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель.
|
||||
- Nemotron 3 Super Free доступна в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель.
|
||||
- Big Pickle — это скрытая модель, которая доступна бесплатно в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель.
|
||||
|
||||
<a href={email}>Свяжитесь с нами</a>, если у вас есть вопросы.
|
||||
|
||||
@@ -176,88 +181,93 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
### Автопополнение
|
||||
|
||||
Если ваш баланс упадет ниже 5 долларов, Zen автоматически пополнит 20 долларов.
|
||||
Если ваш баланс опустится ниже $5, Zen автоматически пополнит его на $20.
|
||||
|
||||
Вы можете изменить сумму автопополнения. Вы также можете полностью отключить автопополнение.
|
||||
Вы можете изменить сумму автопополнения. Также можно полностью отключить автопополнение.
|
||||
|
||||
---
|
||||
|
||||
### Ежемесячные лимиты
|
||||
|
||||
Вы также можете установить месячный лимит использования для всего рабочего пространства и для каждого
|
||||
член вашей команды.
|
||||
Вы также можете установить ежемесячный лимит использования для всего рабочего пространства и для каждого
|
||||
участника вашей команды.
|
||||
|
||||
Например, предположим, что вы установили ежемесячный лимит использования в размере 20 долларов США, Zen не будет использовать
|
||||
более 20 долларов в месяц. Но если у вас включено автопополнение, Zen может
|
||||
взимать с вас более 20 долларов США, если ваш баланс опускается ниже 5 долларов США.
|
||||
Например, предположим, что вы установили ежемесячный лимит использования в $20. Zen не будет
|
||||
использовать больше $20 в месяц. Но если у вас включено автопополнение, Zen может
|
||||
в итоге списать с вас больше $20, если ваш баланс опустится ниже $5.
|
||||
|
||||
---
|
||||
|
||||
### Устаревшие модели
|
||||
|
||||
| Модель | Дата отключения |
|
||||
| Модель | Дата устаревания |
|
||||
| ---------------- | ---------------- |
|
||||
| Qwen3 Coder 480B | 6 февр. 2026 г. |
|
||||
| Kimi K2 Thinking | 6 марта 2026 г. |
|
||||
| Kimi K2 | 6 марта 2026 г. |
|
||||
| MiniMax M2.1 | 15 марта 2026 г. |
|
||||
| GLM 4.7 | 15 марта 2026 г. |
|
||||
| GLM 4.6 | 15 марта 2026 г. |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## Конфиденциальность
|
||||
|
||||
Все наши модели размещены в США. Наши поставщики придерживаются политики нулевого хранения и не используют ваши данные для обучения моделей, за следующими исключениями:
|
||||
Все наши модели размещены в US. Наши провайдеры придерживаются политики нулевого хранения и не используют ваши данные для обучения моделей, за следующими исключениями:
|
||||
|
||||
- Big Pickle: во время бесплатного периода собранные данные могут быть использованы для улучшения модели.
|
||||
- MiniMax M2.5 Free: в течение бесплатного периода собранные данные могут использоваться для улучшения модели.
|
||||
- API OpenAI: запросы хранятся в течение 30 дней в соответствии с [Политикой данных OpenAI](https://platform.openai.com/docs/guides/your-data).
|
||||
- API-интерфейсы Anthropic: запросы хранятся в течение 30 дней в соответствии с [Политикой данных Anthropic](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
- Big Pickle: во время бесплатного периода собранные данные могут использоваться для улучшения модели.
|
||||
- MiniMax M2.5 Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели.
|
||||
- MiMo V2 Pro Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели.
|
||||
- MiMo V2 Omni Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели.
|
||||
- Qwen3.6 Plus Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели.
|
||||
- Nemotron 3 Super Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели.
|
||||
- OpenAI APIs: запросы хранятся 30 дней в соответствии с [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: запросы хранятся 30 дней в соответствии с [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
|
||||
---
|
||||
|
||||
## Для команд
|
||||
|
||||
Zen также отлично подходит для команд. Вы можете приглашать товарищей по команде, назначать роли, выбирать
|
||||
модели, которые использует ваша команда, и многое другое.
|
||||
Zen также отлично подходит для команд. Вы можете приглашать коллег, назначать роли, выбирать,
|
||||
какие модели использует ваша команда, и многое другое.
|
||||
|
||||
:::note
|
||||
Рабочие пространства в настоящее время бесплатны для команд в рамках бета-тестирования.
|
||||
Рабочие пространства сейчас бесплатны для команд в рамках бета-версии.
|
||||
:::
|
||||
|
||||
Управление вашим рабочим пространством в настоящее время бесплатно для команд в рамках бета-тестирования. Мы вскоре
|
||||
Управление вашим рабочим пространством сейчас бесплатно для команд в рамках бета-версии. Скоро мы
|
||||
поделимся более подробной информацией о ценах.
|
||||
|
||||
---
|
||||
|
||||
### Роли
|
||||
|
||||
Вы можете приглашать товарищей по команде в свое рабочее пространство и распределять роли:
|
||||
Вы можете приглашать коллег в свое рабочее пространство и назначать роли:
|
||||
|
||||
- **Администратор**: управляйте моделями, участниками, ключами API и выставлением счетов.
|
||||
- **Участник**: Управляйте только своими собственными ключами API.
|
||||
- **Admin**: управляет моделями, участниками, ключами API и выставлением счетов
|
||||
- **Member**: управляет только своими ключами API
|
||||
|
||||
Администраторы также могут установить ежемесячные лимиты расходов для каждого участника, чтобы держать расходы под контролем.
|
||||
Администраторы также могут устанавливать ежемесячные лимиты расходов для каждого участника, чтобы держать затраты под контролем.
|
||||
|
||||
---
|
||||
|
||||
### Доступ к модели
|
||||
### Доступ к моделям
|
||||
|
||||
Администраторы могут включать или отключать определенные модели для рабочей области. Запросы, сделанные к отключенной модели, вернут ошибку.
|
||||
Администраторы могут включать или отключать определенные модели для рабочего пространства. Запросы к отключенной модели будут возвращать ошибку.
|
||||
|
||||
Это полезно в случаях, когда вы хотите отключить использование модели, которая
|
||||
собирает данные.
|
||||
|
||||
---
|
||||
|
||||
### Использование собственных API-ключей
|
||||
### Использование собственного ключа
|
||||
|
||||
Вы можете использовать свои собственные ключи API OpenAI или Anthropic, сохраняя при этом доступ к другим моделям в Zen.
|
||||
Вы можете использовать свои собственные ключи API OpenAI или Anthropic, сохраняя доступ к другим моделям в Zen.
|
||||
|
||||
Когда вы используете свои собственные ключи, счета за токены взимаются непосредственно провайдером, а не Zen.
|
||||
Когда вы используете собственные ключи, токены тарифицируются напрямую провайдером, а не Zen.
|
||||
|
||||
Например, у вашей организации уже может быть ключ для OpenAI или Anthropic.
|
||||
Например, у вашей организации уже может быть ключ для OpenAI или Anthropic,
|
||||
и вы хотите использовать его вместо того, который предоставляет Zen.
|
||||
|
||||
---
|
||||
@@ -266,7 +276,7 @@ Zen также отлично подходит для команд. Вы мож
|
||||
|
||||
Мы создали OpenCode Zen, чтобы:
|
||||
|
||||
1. **Сравнить** лучшие модели/поставщики кодинг-агентов.
|
||||
2. Получить доступ к вариантам **наивысшего качества**, не снижая производительность и не обращаясь к более дешевым поставщикам.
|
||||
3. Передавать **снижение цен**, продавая по себестоимости; поэтому единственная наценка предназначена для покрытия наших комиссий за обработку.
|
||||
4. Исключить **привязку**, позволяя использовать его с любым другим кодинг-агентом. И всегда позволяя вам использовать любого другого провайдера с OpenCode.
|
||||
1. **Сравнить** лучшие комбинации модель/провайдер для агентов для программирования.
|
||||
2. Иметь доступ к вариантам **наивысшего качества** и не снижать производительность, а также не маршрутизировать запросы к более дешевым провайдерам.
|
||||
3. Передавать любые **снижения цен**, продавая по себестоимости; так что единственная наценка нужна для покрытия наших комиссий за обработку.
|
||||
4. Обеспечить **отсутствие привязки**, позволяя вам использовать его с любым другим агентом для программирования. И всегда позволять вам использовать любого другого провайдера с OpenCode.
|
||||
|
||||
@@ -7,106 +7,98 @@ import config from "../../../../config.mjs"
|
||||
export const console = config.console
|
||||
export const email = `mailto:${config.email}`
|
||||
|
||||
OpenCode Zen คือรายการโมเดลที่ได้รับการทดสอบและตรวจสอบแล้วโดยทีมงาน OpenCode
|
||||
OpenCode Zen คือรายการโมเดลที่ผ่านการทดสอบและยืนยันแล้วโดยทีม OpenCode
|
||||
|
||||
:::note
|
||||
OpenCode Zen ปัจจุบันอยู่ในช่วงเบต้า
|
||||
OpenCode Zen อยู่ในช่วงเบต้าในขณะนี้
|
||||
:::
|
||||
|
||||
Zen ทำงานเหมือนกับผู้ให้บริการรายอื่นๆ ใน OpenCode คุณเข้าสู่ระบบ OpenCode Zen และรับ
|
||||
รหัส API ของคุณ มันเป็น **ทางเลือกโดยสมบูรณ์** และคุณไม่จำเป็นต้องใช้มันเพื่อใช้งาน
|
||||
โอเพ่นโค้ด
|
||||
Zen ทำงานเหมือน provider อื่น ๆ ใน OpenCode คุณล็อกอินเข้า OpenCode Zen แล้วรับ
|
||||
API key ของคุณได้เลย มันเป็น **ทางเลือกทั้งหมด** และคุณไม่จำเป็นต้องใช้มันเพื่อใช้งาน
|
||||
OpenCode
|
||||
|
||||
---
|
||||
|
||||
## ภูมิหลัง
|
||||
## ข้อมูลเบื้องหลัง
|
||||
|
||||
มีหลายรุ่นครับ แต่มีเพียงไม่กี่รุ่นเท่านั้น
|
||||
โมเดลเหล่านี้ทำงานได้ดีในฐานะตัวแทนการเข้ารหัส นอกจากนี้ผู้ให้บริการส่วนใหญ่ก็มี
|
||||
กำหนดค่าแตกต่างกันมาก ดังนั้นคุณจึงได้รับประสิทธิภาพและคุณภาพที่แตกต่างกันมาก
|
||||
ปัจจุบันมีโมเดลจำนวนมาก แต่มีเพียงไม่กี่โมเดลเท่านั้นที่ทำงานได้ดีในฐานะ coding agent นอกจากนี้ provider ส่วนใหญ่ยังถูกตั้งค่าแตกต่างกันมาก จึงทำให้ประสิทธิภาพและคุณภาพที่ได้ต่างกันมากเช่นกัน
|
||||
|
||||
:::tip
|
||||
เราได้ทดสอบกลุ่มโมเดลและผู้ให้บริการที่เลือกซึ่งทำงานได้ดีกับ OpenCode
|
||||
เราได้ทดสอบโมเดลและ provider ที่คัดมาแล้วกลุ่มหนึ่งซึ่งทำงานได้ดีกับ OpenCode
|
||||
:::
|
||||
|
||||
ดังนั้นหากคุณใช้โมเดลผ่าน OpenRouter คุณจะไม่มีวันเป็นเช่นนั้น
|
||||
แน่ใจว่าคุณได้รับรุ่นที่ดีที่สุดของรุ่นที่คุณต้องการหรือไม่
|
||||
ดังนั้นหากคุณใช้โมเดลผ่านอะไรอย่าง OpenRouter คุณจะไม่มีทางแน่ใจได้ว่าคุณกำลังได้โมเดลเวอร์ชันที่ดีที่สุดที่คุณต้องการจริง ๆ
|
||||
|
||||
เพื่อแก้ไขปัญหานี้ เราได้ทำสองสิ่ง:
|
||||
เพื่อแก้ปัญหานี้ เราทำอยู่สองสามอย่าง:
|
||||
|
||||
1. เราได้ทดสอบกลุ่มโมเดลที่ได้รับการคัดเลือกและพูดคุยกับทีมของพวกเขาเกี่ยวกับวิธีการ
|
||||
ดีที่สุดเรียกใช้พวกเขา
|
||||
2. จากนั้นเราทำงานร่วมกับผู้ให้บริการบางรายเพื่อให้แน่ใจว่าผู้ให้บริการเหล่านี้ได้รับบริการแล้ว
|
||||
อย่างถูกต้อง
|
||||
3. ในที่สุด เราก็เปรียบเทียบการรวมกันของ model/provider และได้ผลลัพธ์ออกมา
|
||||
กับรายการที่เรารู้สึกดีมาแนะนำ
|
||||
1. เราทดสอบโมเดลที่คัดมาแล้วกลุ่มหนึ่ง และพูดคุยกับทีมของพวกเขาเกี่ยวกับวิธีรันให้ดีที่สุด
|
||||
2. จากนั้นเราทำงานร่วมกับ provider บางรายเพื่อให้แน่ใจว่าโมเดลเหล่านี้ถูกเสิร์ฟอย่างถูกต้อง
|
||||
3. สุดท้าย เรา benchmark ชุดจับคู่ model/provider และสรุปออกมาเป็นรายการที่เรามั่นใจพอจะแนะนำ
|
||||
|
||||
OpenCode Zen เป็นเกตเวย์ AI ที่ให้คุณเข้าถึงโมเดลเหล่านี้
|
||||
OpenCode Zen คือ AI gateway ที่ให้คุณเข้าถึงโมเดลเหล่านี้
|
||||
|
||||
---
|
||||
|
||||
## มันทำงานอย่างไร
|
||||
## วิธีการทำงาน
|
||||
|
||||
OpenCode Zen ทำงานเหมือนกับผู้ให้บริการรายอื่นๆ ใน OpenCode
|
||||
OpenCode Zen ทำงานเหมือน provider อื่น ๆ ใน OpenCode
|
||||
|
||||
1. คุณลงชื่อเข้าใช้ **<a href={console}>OpenCode Zen</a>** เพิ่มการเรียกเก็บเงินของคุณ
|
||||
รายละเอียดและคัดลอกรหัส API ของคุณ
|
||||
2. คุณรันคำสั่ง `/connect` ใน TUI เลือก OpenCode Zen และวางคีย์ API ของคุณ
|
||||
3. เรียกใช้ `/models` ใน TUI เพื่อดูรายการรุ่นที่เราแนะนำ
|
||||
1. คุณลงชื่อเข้าใช้ **<a href={console}>OpenCode Zen</a>** เพิ่มรายละเอียดการเรียกเก็บเงิน แล้วคัดลอก API key ของคุณ
|
||||
2. รันคำสั่ง `/connect` ใน TUI เลือก OpenCode Zen แล้ววาง API key ของคุณ
|
||||
3. รัน `/models` ใน TUI เพื่อดูรายการโมเดลที่เราแนะนำ
|
||||
|
||||
คุณจะถูกเรียกเก็บเงินตามคำขอและคุณสามารถเพิ่มเครดิตให้กับบัญชีของคุณได้
|
||||
คุณจะถูกคิดค่าบริการตามแต่ละ request และสามารถเติมเครดิตเข้าบัญชีได้
|
||||
|
||||
---
|
||||
|
||||
## จุดสิ้นสุด
|
||||
## Endpoints
|
||||
|
||||
คุณยังสามารถเข้าถึงโมเดลของเราผ่านทางจุดสิ้นสุด API ต่อไปนี้
|
||||
คุณยังสามารถเข้าถึงโมเดลของเราผ่าน API endpoints ต่อไปนี้ได้
|
||||
|
||||
| Model | Model ID | Endpoint | แพ็คเกจ AI SDK |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Model | Model ID | Endpoint | AI SDK Package |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
[model id](/docs/config/#models) ในการกำหนดค่า OpenCode ของคุณ
|
||||
ใช้รูปแบบ `opencode/<model-id>` ตัวอย่างเช่น สำหรับ GPT 5.2 Codex คุณจะต้อง
|
||||
ใช้ `opencode/gpt-5.2-codex` ในการกำหนดค่าของคุณ
|
||||
[model id](/docs/config/#models) ใน OpenCode config ของคุณใช้รูปแบบ `opencode/<model-id>` ตัวอย่างเช่น สำหรับ GPT 5.3 Codex คุณจะใช้ `opencode/gpt-5.3-codex` ใน config ของคุณ
|
||||
|
||||
---
|
||||
|
||||
### โมเดล
|
||||
### Models
|
||||
|
||||
คุณสามารถดึงรายชื่อรุ่นที่มีจำหน่ายและข้อมูลเมตาทั้งหมดได้จาก:
|
||||
คุณสามารถดึงรายการโมเดลทั้งหมดที่พร้อมใช้งานและ metadata ของมันได้จาก:
|
||||
|
||||
```
|
||||
https://opencode.ai/zen/v1/models
|
||||
@@ -116,158 +108,159 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
## ราคา
|
||||
|
||||
เราสนับสนุนรูปแบบการจ่ายเงินตามการใช้งาน ด้านล่างนี้คือราคา **ต่อtokens 1M**
|
||||
เรารองรับรูปแบบจ่ายตามการใช้งาน ด้านล่างคือราคา **ต่อ 1M tokens**
|
||||
|
||||
| Model | ป้อนข้อมูล | เอาท์พุต | แคชอ่าน | เขียนในแคช |
|
||||
| --------------------------------- | ---------- | -------- | ------- | ---------- |
|
||||
| Big Pickle | ฟรี | ฟรี | ฟรี | - |
|
||||
| MiniMax M2.5 Free | ฟรี | ฟรี | ฟรี | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
|
||||
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0.40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K tokens) | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37.50 | $1.00 | $12.50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.6 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | ฟรี | ฟรี | ฟรี | - |
|
||||
| Model | Input | Output | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------- | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
คุณอาจสังเกตเห็น _Claude Haiku 3.5_ ในประวัติการใช้งานของคุณ นี่คือ [โมเดลราคาประหยัด](/docs/config/#models) ที่ใช้ในการสร้างชื่อเซสชันของคุณ
|
||||
คุณอาจสังเกตเห็น _Claude Haiku 3.5_ ในประวัติการใช้งานของคุณ นี่คือ [low cost model](/docs/config/#models) ที่ใช้สร้างชื่อ session ของคุณ
|
||||
|
||||
:::note
|
||||
ค่าธรรมเนียมบัตรเครดิตจะถูกส่งต่อในราคาต้นทุน (4.4% + 0.30 ดอลลาร์สหรัฐฯ ต่อธุรกรรม) เราไม่คิดค่าใช้จ่ายใดๆ นอกเหนือจากนั้น
|
||||
ค่าธรรมเนียมบัตรเครดิตจะถูกส่งต่อในราคาทุน (4.4% + $0.30 ต่อธุรกรรม); เราไม่ได้คิดค่าใช้จ่ายเพิ่มจากนั้น
|
||||
:::
|
||||
|
||||
รุ่นฟรี:
|
||||
โมเดลฟรี:
|
||||
|
||||
- MiniMax M2.5 Free พร้อมใช้งานบน OpenCode ในระยะเวลาจำกัด ทีมงานใช้เวลานี้เพื่อรวบรวมคำติชมและปรับปรุงโมเดล
|
||||
- Big Pickle เป็นโมเดลล่องหนที่ให้บริการฟรีบน OpenCode ในระยะเวลาจำกัด ทีมงานใช้เวลานี้เพื่อรวบรวมคำติชมและปรับปรุงโมเดล
|
||||
- MiniMax M2.5 Free เปิดให้ใช้บน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล
|
||||
- MiMo V2 Pro Free เปิดให้ใช้บน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล
|
||||
- MiMo V2 Omni Free เปิดให้ใช้บน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล
|
||||
- Qwen3.6 Plus Free เปิดให้ใช้บน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล
|
||||
- Nemotron 3 Super Free เปิดให้ใช้บน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล
|
||||
- Big Pickle เป็น stealth model ที่ใช้งานฟรีบน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล
|
||||
|
||||
<a href={email}>Contact us</a> if you have any questions.
|
||||
<a href={email}>ติดต่อเรา</a> หากคุณมีคำถาม
|
||||
|
||||
---
|
||||
|
||||
### โหลดซ้ำอัตโนมัติ
|
||||
### Auto-reload
|
||||
|
||||
หากยอดคงเหลือของคุณต่ำกว่า $5 Zen จะโหลด $20 อีกครั้งโดยอัตโนมัติ
|
||||
หากยอดคงเหลือของคุณต่ำกว่า $5 Zen จะเติมเงิน $20 ให้อัตโนมัติ
|
||||
|
||||
คุณสามารถเปลี่ยนจำนวนการโหลดอัตโนมัติได้ คุณยังสามารถปิดการโหลดอัตโนมัติทั้งหมดได้อีกด้วย
|
||||
คุณสามารถเปลี่ยนจำนวนเงินสำหรับ auto-reload ได้ และยังสามารถปิด auto-reload ทั้งหมดได้ด้วย
|
||||
|
||||
---
|
||||
|
||||
### ขีดจำกัดรายเดือน
|
||||
### Monthly limits
|
||||
|
||||
คุณยังสามารถกำหนดขีดจำกัดการใช้งานรายเดือนสำหรับพื้นที่ทำงานทั้งหมดและสำหรับแต่ละรายการได้
|
||||
สมาชิกในทีมของคุณ
|
||||
คุณยังสามารถตั้งขีดจำกัดการใช้งานรายเดือนสำหรับทั้ง workspace และสำหรับสมาชิกแต่ละคนในทีมของคุณได้
|
||||
|
||||
ตัวอย่างเช่น สมมติว่าคุณกำหนดขีดจำกัดการใช้งานรายเดือนไว้ที่ 20 ดอลลาร์ Zen จะไม่ใช้
|
||||
มากกว่า $20 ในหนึ่งเดือน แต่ถ้าคุณเปิดใช้งานการโหลดซ้ำอัตโนมัติ Zen อาจจะจบลง
|
||||
เรียกเก็บเงินคุณมากกว่า $20 หากยอดคงเหลือของคุณต่ำกว่า $5
|
||||
ตัวอย่างเช่น หากคุณตั้งขีดจำกัดการใช้งานรายเดือนไว้ที่ $20 Zen จะไม่ใช้งานเกิน $20 ในหนึ่งเดือน แต่ถ้าคุณเปิด auto-reload ไว้ Zen อาจลงเอยด้วยการเรียกเก็บเงินคุณเกิน $20 หากยอดคงเหลือของคุณต่ำกว่า $5
|
||||
|
||||
---
|
||||
|
||||
### โมเดลที่เลิกใช้แล้ว
|
||||
### Deprecated models
|
||||
|
||||
| Model | วันที่เลิกใช้ |
|
||||
| ---------------- | ------------- |
|
||||
| Qwen3 Coder 480B | 6 ก.พ. 2026 |
|
||||
| Kimi K2 Thinking | 6 มี.ค. 2026 |
|
||||
| Kimi K2 | 6 มี.ค. 2026 |
|
||||
| MiniMax M2.1 | 15 มี.ค. 2026 |
|
||||
| GLM 4.7 | 15 มี.ค. 2026 |
|
||||
| GLM 4.6 | 15 มี.ค. 2026 |
|
||||
| Model | Deprecation date |
|
||||
| ---------------- | ---------------- |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## ความเป็นส่วนตัว
|
||||
|
||||
โมเดลทั้งหมดของเราโฮสต์ในสหรัฐอเมริกา ผู้ให้บริการของเราปฏิบัติตามนโยบายการเก็บรักษาเป็นศูนย์ และไม่ใช้ข้อมูลของคุณสำหรับการฝึกโมเดล โดยมีข้อยกเว้นต่อไปนี้:
|
||||
โมเดลทั้งหมดของเราโฮสต์อยู่ใน US provider ของเราปฏิบัติตามนโยบาย zero-retention และจะไม่ใช้ข้อมูลของคุณเพื่อฝึกโมเดล ยกเว้นกรณีต่อไปนี้:
|
||||
|
||||
- Big Pickle: ในช่วงระยะเวลาฟรี ข้อมูลที่รวบรวมอาจนำไปใช้ในการปรับปรุงโมเดลได้
|
||||
- MiniMax M2.5 Free: ในช่วงระยะเวลาฟรี ข้อมูลที่รวบรวมอาจนำไปใช้ในการปรับปรุงโมเดล
|
||||
- OpenAI API: คำขอจะถูกเก็บไว้เป็นเวลา 30 วันตาม [นโยบายข้อมูลของ OpenAI](https://platform.openai.com/docs/guides/your-data)
|
||||
- Anthropic API: คำขอจะถูกเก็บไว้เป็นเวลา 30 วันตาม [นโยบายข้อมูลของ Anthropic](https://docs.anthropic.com/en/docs/claude-code/data-usage)
|
||||
- Big Pickle: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล
|
||||
- MiniMax M2.5 Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล
|
||||
- MiMo V2 Pro Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล
|
||||
- MiMo V2 Omni Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล
|
||||
- Qwen3.6 Plus Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล
|
||||
- Nemotron 3 Super Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล
|
||||
- OpenAI APIs: คำขอจะถูกเก็บไว้เป็นเวลา 30 วันตาม [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: คำขอจะถูกเก็บไว้เป็นเวลา 30 วันตาม [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
|
||||
---
|
||||
|
||||
## สำหรับทีม
|
||||
|
||||
Zen ยังใช้งานได้ดีกับทีมอีกด้วย คุณสามารถเชิญเพื่อนร่วมทีม กำหนดบทบาท ดูแลจัดการได้
|
||||
โมเดลที่ทีมของคุณใช้ และอื่นๆ
|
||||
Zen ทำงานได้ดีกับทีมเช่นกัน คุณสามารถเชิญเพื่อนร่วมทีม กำหนด roles คัดเลือกโมเดลที่ทีมของคุณใช้ และทำอย่างอื่นได้อีกมาก
|
||||
|
||||
:::note
|
||||
ขณะนี้พื้นที่ทำงานให้บริการฟรีสำหรับทีมโดยเป็นส่วนหนึ่งของเวอร์ชันเบต้า
|
||||
Workspace สำหรับทีมยังใช้งานได้ฟรีในช่วงเบต้า
|
||||
:::
|
||||
|
||||
ขณะนี้การจัดการพื้นที่ทำงานของคุณให้บริการฟรีสำหรับทีมโดยเป็นส่วนหนึ่งของเวอร์ชันเบต้า เราจะเป็น
|
||||
แบ่งปันรายละเอียดเพิ่มเติมเกี่ยวกับราคาเร็ว ๆ นี้
|
||||
การจัดการ workspace ของคุณยังใช้งานได้ฟรีสำหรับทีมในช่วงเบต้า เราจะแชร์รายละเอียดเรื่องราคาเพิ่มเติมเร็ว ๆ นี้
|
||||
|
||||
---
|
||||
|
||||
### บทบาท
|
||||
### Roles
|
||||
|
||||
คุณสามารถเชิญเพื่อนร่วมทีมเข้ามาในพื้นที่ทำงานของคุณและมอบหมายบทบาทได้:
|
||||
คุณสามารถเชิญเพื่อนร่วมทีมเข้ามาใน workspace และกำหนด roles ได้:
|
||||
|
||||
- **ผู้ดูแลระบบ**: จัดการโมเดล สมาชิก คีย์ API และการเรียกเก็บเงิน
|
||||
- **สมาชิก**: จัดการเฉพาะคีย์ API ของตนเองเท่านั้น
|
||||
- **Admin**: จัดการโมเดล สมาชิก API keys และการเรียกเก็บเงิน
|
||||
- **Member**: จัดการได้เฉพาะ API keys ของตัวเอง
|
||||
|
||||
ผู้ดูแลระบบยังสามารถกำหนดวงเงินการใช้จ่ายรายเดือนสำหรับสมาชิกแต่ละคนเพื่อควบคุมค่าใช้จ่ายได้
|
||||
Admins ยังสามารถตั้งขีดจำกัดค่าใช้จ่ายรายเดือนสำหรับสมาชิกแต่ละคน เพื่อช่วยควบคุมต้นทุนได้ด้วย
|
||||
|
||||
---
|
||||
|
||||
### การเข้าถึงโมเดล
|
||||
### Model access
|
||||
|
||||
ผู้ดูแลระบบสามารถเปิดหรือปิดใช้งานโมเดลเฉพาะสำหรับพื้นที่ทำงานได้ คำขอที่ทำกับโมเดลที่ถูกปิดใช้งานจะส่งคืนข้อผิดพลาด
|
||||
Admins สามารถเปิดหรือปิดโมเดลบางตัวสำหรับ workspace ได้ request ที่ส่งไปยังโมเดลที่ถูกปิดจะส่งกลับเป็น error
|
||||
|
||||
สิ่งนี้มีประโยชน์สำหรับกรณีที่คุณต้องการปิดการใช้งานโมเดลนั้น
|
||||
รวบรวมข้อมูล
|
||||
สิ่งนี้มีประโยชน์ในกรณีที่คุณต้องการปิดการใช้งานโมเดลที่มีการเก็บข้อมูล
|
||||
|
||||
---
|
||||
|
||||
### นำคีย์ของคุณมาเอง
|
||||
### Bring your own key
|
||||
|
||||
คุณสามารถใช้คีย์ OpenAI หรือ Anthropic API ของคุณเองในขณะที่ยังเข้าถึงรุ่นอื่นๆ ใน Zen ได้
|
||||
คุณสามารถใช้ OpenAI หรือ Anthropic API keys ของคุณเองได้ ขณะเดียวกันก็ยังเข้าถึงโมเดลอื่น ๆ ใน Zen ได้อยู่
|
||||
|
||||
เมื่อคุณใช้คีย์ของคุณเอง tokensจะถูกเรียกเก็บเงินโดยตรงจากผู้ให้บริการ ไม่ใช่โดย Zen
|
||||
เมื่อคุณใช้ keys ของคุณเอง ค่า tokens จะถูกคิดโดย provider โดยตรง ไม่ได้คิดโดย Zen
|
||||
|
||||
ตัวอย่างเช่น องค์กรของคุณอาจมีคีย์สำหรับ OpenAI หรือ Anthropic อยู่แล้ว
|
||||
และคุณต้องการใช้สิ่งนั้นแทนอันที่ Zen มอบให้
|
||||
ตัวอย่างเช่น องค์กรของคุณอาจมี key สำหรับ OpenAI หรือ Anthropic อยู่แล้ว และคุณต้องการใช้ key นั้นแทน key ที่ Zen จัดให้
|
||||
|
||||
---
|
||||
|
||||
## เป้าหมาย
|
||||
|
||||
เราสร้าง OpenCode Zen เพื่อ:
|
||||
เราสร้าง OpenCode Zen ขึ้นมาเพื่อ:
|
||||
|
||||
1. **เกณฑ์มาตรฐาน** โมเดลที่ดีที่สุด/providers สำหรับตัวแทนการเข้ารหัส
|
||||
2. เข้าถึงตัวเลือก **คุณภาพMax** และไม่ดาวน์เกรดประสิทธิภาพหรือเปลี่ยนเส้นทางไปยังผู้ให้บริการที่ราคาถูกกว่า
|
||||
3. ส่งต่อ **ราคาที่ลดลง** โดยการขายในราคาต้นทุน ดังนั้นมาร์กอัปเพียงอย่างเดียวคือครอบคลุมค่าธรรมเนียมการดำเนินการของเรา
|
||||
4. **ไม่มีการล็อคอิน** โดยอนุญาตให้คุณใช้กับเอเจนต์การเขียนโค้ดอื่นๆ และให้คุณใช้ผู้ให้บริการรายอื่นกับ OpenCode ได้เช่นกัน
|
||||
1. **Benchmark** โมเดล/provider ที่ดีที่สุดสำหรับ coding agents
|
||||
2. เข้าถึงตัวเลือกที่มี **คุณภาพสูงสุด** โดยไม่ลดประสิทธิภาพหรือ route ไปยัง provider ที่ถูกกว่า
|
||||
3. ส่งต่อ **การลดราคา** ด้วยการขายในราคาทุน ดังนั้น markup เพียงอย่างเดียวคือเพื่อครอบคลุมค่าธรรมเนียมการประมวลผลของเรา
|
||||
4. ทำให้เกิด **no lock-in** โดยให้คุณใช้มันร่วมกับ coding agent อื่น ๆ ได้ และยังเปิดให้คุณใช้ provider อื่น ๆ กับ OpenCode ได้เสมอ
|
||||
|
||||
@@ -7,31 +7,31 @@ import config from "../../../../config.mjs"
|
||||
export const console = config.console
|
||||
export const email = `mailto:${config.email}`
|
||||
|
||||
OpenCode Zen, opencode ekibi tarafından test edilip doğrulanmış modellerin bir listesidir.
|
||||
OpenCode Zen, OpenCode ekibi tarafından test edilip doğrulanmış modellerin bir listesidir.
|
||||
|
||||
:::note
|
||||
OpenCode Zen şu anda beta aşamasındadır.
|
||||
OpenCode Zen şu anda beta olarak mevcut.
|
||||
:::
|
||||
|
||||
Zen, opencode'daki diğer sağlayıcılar gibi çalışır. OpenCode Zen'e giriş yapar ve API anahtarınızı alırsınız. Tamamen isteğe bağlıdır ve opencode kullanmak için bunu kullanmanıza gerek yoktur.
|
||||
Zen, OpenCode'daki diğer sağlayıcılar gibi çalışır. OpenCode Zen'de oturum açıp API anahtarınızı alırsınız. Bu **tamamen isteğe bağlıdır** ve OpenCode kullanmak için buna ihtiyacınız yoktur.
|
||||
|
||||
---
|
||||
|
||||
## Arka plan
|
||||
|
||||
Piyasada çok sayıda model var ancak bu modellerden sadece birkaçı kodlama ajanı olarak iyi çalışır. Ayrıca çoğu sağlayıcı çok farklı şekilde yapılandırılmıştır; bu nedenle çok farklı performans ve kalite elde edersiniz.
|
||||
Piyasada çok sayıda model var, ancak bunların yalnızca birkaçı kodlama ajanı olarak iyi çalışıyor. Ayrıca çoğu sağlayıcı çok farklı şekilde yapılandırılıyor; bu yüzden performans ve kalite de ciddi ölçüde değişiyor.
|
||||
|
||||
:::tip
|
||||
opencode ile iyi çalışan seçkin bir grup model ve sağlayıcıyı test ettik.
|
||||
OpenCode ile iyi çalışan seçili bir model ve sağlayıcı grubunu test ettik.
|
||||
:::
|
||||
|
||||
Bu nedenle, OpenRouter gibi bir şey üzerinden bir model kullanıyorsanız, istediğiniz modelin en iyi sürümünü alıp almadığınızdan asla emin olamazsınız.
|
||||
Bu yüzden, OpenRouter gibi bir şey üzerinden model kullanıyorsanız istediğiniz modelin en iyi sürümünü alıp almadığınızdan asla emin olamazsınız.
|
||||
|
||||
Bunu düzeltmek için birkaç şey yaptık:
|
||||
|
||||
1. Seçkin bir grup modeli test ettik ve ekipleriyle bunları en iyi nasıl çalıştıracakları hakkında konuştuk.
|
||||
2. Daha sonra bunların doğru şekilde sunulduğundan emin olmak için birkaç sağlayıcıyla çalıştık.
|
||||
3. Son olarak model/sağlayıcı kombinasyonunu karşılaştırdık ve önermekten memnuniyet duyduğumuz bir liste oluşturduk.
|
||||
1. Seçili bir model grubunu test ettik ve bunları en iyi şekilde nasıl çalıştıracaklarımız konusunda ekipleriyle görüştük.
|
||||
2. Ardından, bunların doğru şekilde sunulduğundan emin olmak için birkaç sağlayıcıyla birlikte çalıştık.
|
||||
3. Son olarak, model/sağlayıcı kombinasyonlarını benchmark edip gönül rahatlığıyla önerebileceğimiz bir liste çıkardık.
|
||||
|
||||
OpenCode Zen, bu modellere erişmenizi sağlayan bir AI ağ geçididir.
|
||||
|
||||
@@ -39,9 +39,9 @@ OpenCode Zen, bu modellere erişmenizi sağlayan bir AI ağ geçididir.
|
||||
|
||||
## Nasıl çalışır
|
||||
|
||||
OpenCode Zen, opencode'daki diğer sağlayıcılar gibi çalışır.
|
||||
OpenCode Zen, OpenCode'daki diğer sağlayıcılar gibi çalışır.
|
||||
|
||||
1. **<a href={console}>OpenCode Zen</a>**'de oturum açın, fatura ayrıntılarınızı ekleyin ve API anahtarınızı kopyalayın.
|
||||
1. **<a href={console}>OpenCode Zen</a>**'de oturum açın, faturalandırma bilgilerinizi ekleyin ve API anahtarınızı kopyalayın.
|
||||
2. TUI'de `/connect` komutunu çalıştırın, OpenCode Zen'i seçin ve API anahtarınızı yapıştırın.
|
||||
3. Önerdiğimiz modellerin listesini görmek için TUI'de `/models` komutunu çalıştırın.
|
||||
|
||||
@@ -49,53 +49,54 @@ OpenCode Zen, opencode'daki diğer sağlayıcılar gibi çalışır.
|
||||
|
||||
---
|
||||
|
||||
## Uç Noktalar
|
||||
## Uç noktalar
|
||||
|
||||
Modellerimize aşağıdaki API uç noktaları aracılığıyla da erişebilirsiniz.
|
||||
|
||||
| Model | Model ID | Endpoint | AI SDK Package |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Model | Model ID | Endpoint | AI SDK Package |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
opencode yapılandırmanızdaki [model kimliği](/docs/config/#models) `opencode/<model-id>` biçimini kullanır. Örneğin, GPT 5.2 Codex için yapılandırmanızda `opencode/gpt-5.2-codex` kullanırsınız.
|
||||
OpenCode yapılandırmanızdaki [model id](/docs/config/#models) `opencode/<model-id>` biçimini kullanır. Örneğin, GPT 5.3 Codex için yapılandırmanızda `opencode/gpt-5.3-codex` kullanırsınız.
|
||||
|
||||
---
|
||||
|
||||
### Modeller
|
||||
|
||||
Mevcut modellerin tam listesini ve meta verilerini şuradan alabilirsiniz:
|
||||
Mevcut modellerin tam listesini ve metadata'larını şuradan alabilirsiniz:
|
||||
|
||||
```
|
||||
https://opencode.ai/zen/v1/models
|
||||
@@ -105,59 +106,62 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
## Fiyatlandırma
|
||||
|
||||
Kullandıkça öde modelini destekliyoruz. Aşağıda **1 milyon token başına** fiyatlar verilmiştir.
|
||||
Kullandıkça öde modelini destekliyoruz. Aşağıda **1M token başına** fiyatlar yer alıyor.
|
||||
|
||||
| Model | Input | Output | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------ | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | - |
|
||||
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
|
||||
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.08 | - |
|
||||
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0.40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K tokens) | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37.50 | $1.00 | $12.50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.6 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
| Model | Input | Output | Cached Read | Cached Write |
|
||||
| --------------------------------- | ------ | ------- | ----------- | ------------ |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
Kullanım geçmişinizde _Claude Haiku 3.5_ fark edebilirsiniz. Bu, oturumlarınızın başlıklarını oluşturmak için kullanılan [düşük maliyetli bir modeldir](/docs/config/#models).
|
||||
Kullanım geçmişinizde _Claude Haiku 3.5_ görebilirsiniz. Bu, oturum başlıklarınızı oluşturmak için kullanılan [düşük maliyetli bir modeldir](/docs/config/#models).
|
||||
|
||||
:::note
|
||||
Kredi kartı ücretleri maliyetine yansıtılır (işlem başına %4,4 + 0,30$); bunun ötesinde hiçbir ücret talep etmiyoruz.
|
||||
Kredi kartı ücretleri maliyet üzerinden yansıtılır (%4.4 + işlem başına $0.30); bunun dışında ekstra bir ücret almıyoruz.
|
||||
:::
|
||||
|
||||
Ücretsiz modeller:
|
||||
|
||||
- MiniMax M2.5 Free, sınırlı bir süre için OpenCode'da ücretsizdir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor.
|
||||
- MiMo V2 Pro Free, sınırlı bir süre için OpenCode'da ücretsizdir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor.
|
||||
- MiMo V2 Omni Free, sınırlı bir süre için OpenCode'da ücretsizdir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor.
|
||||
- Qwen3.6 Plus Free, sınırlı bir süre için OpenCode'da ücretsizdir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor.
|
||||
- Nemotron 3 Super Free, sınırlı bir süre için OpenCode'da ücretsizdir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor.
|
||||
- Big Pickle, sınırlı bir süre için OpenCode'da ücretsiz olan gizli bir modeldir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor.
|
||||
|
||||
Sorularınız varsa <a href={email}>bizimle iletişime geçin</a>.
|
||||
@@ -166,7 +170,7 @@ Sorularınız varsa <a href={email}>bizimle iletişime geçin</a>.
|
||||
|
||||
### Otomatik yükleme
|
||||
|
||||
Bakiyeniz 5$'ın altına düşerse, Zen otomatik olarak 20$ yükler.
|
||||
Bakiyeniz $5'in altına düşerse, Zen otomatik olarak $20 yükler.
|
||||
|
||||
Otomatik yükleme tutarını değiştirebilirsiniz. Otomatik yüklemeyi tamamen devre dışı da bırakabilirsiniz.
|
||||
|
||||
@@ -174,9 +178,9 @@ Otomatik yükleme tutarını değiştirebilirsiniz. Otomatik yüklemeyi tamamen
|
||||
|
||||
### Aylık limitler
|
||||
|
||||
Ayrıca tüm çalışma alanı ve ekibinizin her üyesi için aylık kullanım limiti belirleyebilirsiniz.
|
||||
Ayrıca tüm çalışma alanı için ve ekibinizdeki her üye için aylık kullanım limiti belirleyebilirsiniz.
|
||||
|
||||
Örneğin, aylık kullanım limitini 20$ olarak ayarladığınızı varsayalım, Zen bir ayda 20$'dan fazla kullanmaz. Ancak otomatik yüklemeyi etkinleştirdiyseniz, bakiyeniz 5$'ın altına düşerse Zen sizden 20$'dan fazla ücret alabilir.
|
||||
Örneğin, aylık kullanım limitini $20 olarak ayarladığınızı düşünelim; Zen bir ay içinde $20'den fazla kullanmaz. Ancak otomatik yükleme açıksa, bakiyeniz $5'in altına düşerse Zen sizden $20'den fazla ücret çekebilir.
|
||||
|
||||
---
|
||||
|
||||
@@ -184,27 +188,32 @@ Ayrıca tüm çalışma alanı ve ekibinizin her üyesi için aylık kullanım l
|
||||
|
||||
| Model | Kullanımdan kaldırılma tarihi |
|
||||
| ---------------- | ----------------------------- |
|
||||
| Qwen3 Coder 480B | 6 Şub 2026 |
|
||||
| Kimi K2 Thinking | 6 Mar 2026 |
|
||||
| Kimi K2 | 6 Mar 2026 |
|
||||
| MiniMax M2.1 | 15 Mar 2026 |
|
||||
| GLM 4.7 | 15 Mar 2026 |
|
||||
| GLM 4.6 | 15 Mar 2026 |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## Gizlilik
|
||||
|
||||
Tüm modellerimiz ABD'de barındırılmaktadır. Sağlayıcılarımız sıfır saklama politikasını izler ve aşağıdaki istisnalar dışında verilerinizi model eğitimi için kullanmaz:
|
||||
Tüm modellerimiz US'de barındırılıyor. Sağlayıcılarımız zero-retention politikası uygular ve aşağıdaki istisnalar dışında verilerinizi model eğitimi için kullanmaz:
|
||||
|
||||
- Big Pickle: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir.
|
||||
- MiniMax M2.5 Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir.
|
||||
- OpenAI API'leri: İstekler [OpenAI'nin Veri Politikaları](https://platform.openai.com/docs/guides/your-data) uyarınca 30 gün boyunca saklanır.
|
||||
- Anthropic API'leri: İstekler [Anthropic'in Veri Politikaları](https://docs.anthropic.com/en/docs/claude-code/data-usage) uyarınca 30 gün boyunca saklanır.
|
||||
- MiMo V2 Pro Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir.
|
||||
- MiMo V2 Omni Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir.
|
||||
- Qwen3.6 Plus Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir.
|
||||
- Nemotron 3 Super Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir.
|
||||
- OpenAI APIs: İstekler [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) uyarınca 30 gün boyunca saklanır.
|
||||
- Anthropic APIs: İstekler [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) uyarınca 30 gün boyunca saklanır.
|
||||
|
||||
---
|
||||
|
||||
## Ekipler İçin
|
||||
## Ekipler için
|
||||
|
||||
Zen ekipler için de harika çalışır. Ekip arkadaşlarınızı davet edebilir, roller atayabilir, ekibinizin kullandığı modelleri düzenleyebilir ve daha fazlasını yapabilirsiniz.
|
||||
|
||||
@@ -212,26 +221,26 @@ Zen ekipler için de harika çalışır. Ekip arkadaşlarınızı davet edebilir
|
||||
Çalışma alanları şu anda beta'nın bir parçası olarak ekipler için ücretsizdir.
|
||||
:::
|
||||
|
||||
Çalışma alanınızı yönetmek şu anda beta'nın bir parçası olarak ekipler için ücretsizdir. Yakında fiyatlandırma hakkında daha fazla ayrıntı paylaşacağız.
|
||||
Çalışma alanınızı yönetmek şu anda beta'nın bir parçası olarak ekipler için ücretsizdir. Fiyatlandırma hakkında daha fazla ayrıntıyı yakında paylaşacağız.
|
||||
|
||||
---
|
||||
|
||||
### Roller
|
||||
|
||||
Ekip arkadaşlarınızı çalışma alanınıza davet edebilir ve roller atayabilirsiniz:
|
||||
Ekip arkadaşlarınızı çalışma alanınıza davet edip rol atayabilirsiniz:
|
||||
|
||||
- **Admin**: Modelleri, üyeleri, API anahtarlarını ve faturalandırmayı yönetin
|
||||
- **Member**: Yalnızca kendi API anahtarlarını yönetin
|
||||
- **Admin**: Modelleri, üyeleri, API anahtarlarını ve faturalandırmayı yönetir
|
||||
- **Member**: Yalnızca kendi API anahtarlarını yönetir
|
||||
|
||||
Yöneticiler, maliyetleri kontrol altında tutmak için her üye için aylık harcama limitleri de belirleyebilir.
|
||||
Admin'ler ayrıca maliyetleri kontrol altında tutmak için her üye için aylık harcama limiti belirleyebilir.
|
||||
|
||||
---
|
||||
|
||||
### Model erişimi
|
||||
|
||||
Yöneticiler çalışma alanı için belirli modelleri etkinleştirebilir veya devre dışı bırakabilir. Devre dışı bırakılmış bir modele yapılan istekler bir hata döndürür.
|
||||
Admin'ler çalışma alanı için belirli modelleri etkinleştirebilir veya devre dışı bırakabilir. Devre dışı bırakılmış bir modele yapılan istekler hata döndürür.
|
||||
|
||||
Bu, veri toplayan bir modelin kullanımını devre dışı bırakmak istediğiniz durumlarda kullanışlıdır.
|
||||
Bu, veri toplayan bir modelin kullanımını kapatmak istediğiniz durumlarda kullanışlıdır.
|
||||
|
||||
---
|
||||
|
||||
@@ -239,17 +248,17 @@ Bu, veri toplayan bir modelin kullanımını devre dışı bırakmak istediğini
|
||||
|
||||
Zen'deki diğer modellere erişmeye devam ederken kendi OpenAI veya Anthropic API anahtarlarınızı kullanabilirsiniz.
|
||||
|
||||
Kendi anahtarlarınızı kullandığınızda, tokenler Zen tarafından değil, doğrudan sağlayıcı tarafından faturalandırılır.
|
||||
Kendi anahtarlarınızı kullandığınızda token'lar Zen tarafından değil, doğrudan sağlayıcı tarafından faturalandırılır.
|
||||
|
||||
Örneğin, kuruluşunuzun halihazırda OpenAI veya Anthropic için bir anahtarı olabilir ve Zen'in sağladığı anahtar yerine onu kullanmak isteyebilirsiniz.
|
||||
Örneğin, kuruluşunuzun OpenAI veya Anthropic için zaten bir anahtarı olabilir ve Zen'in sağladığı anahtar yerine onu kullanmak isteyebilirsiniz.
|
||||
|
||||
---
|
||||
|
||||
## Hedefler
|
||||
|
||||
OpenCode Zen'i şu amaçlarla oluşturduk:
|
||||
OpenCode Zen'i şunlar için oluşturduk:
|
||||
|
||||
1. Kodlama ajanları için en iyi modelleri/sağlayıcıları **kıyaslamak**.
|
||||
2. **En yüksek kaliteli** seçeneklere erişmek ve performansı düşürmemek veya daha ucuz sağlayıcılara yönlendirmemek.
|
||||
3. Maliyetine satış yaparak herhangi bir **fiyat düşüşünü** yansıtmak; böylece tek kâr marjı işlem ücretlerimizi karşılamaktır.
|
||||
4. Başka bir kodlama ajanıyla kullanmanıza izin vererek **kilitlenmeyi önlemek**. Ve her zaman OpenCode ile başka bir sağlayıcıyı kullanmanıza izin vermek.
|
||||
2. **En yüksek kaliteli** seçeneklere erişmek ve performansı düşürmemek ya da daha ucuz sağlayıcılara yönlendirmemek.
|
||||
3. Maliyetine satış yaparak tüm **fiyat düşüşlerini** yansıtmak; yani tek ek ücretimiz işlem ücretlerimizi karşılamak içindir.
|
||||
4. Başka herhangi bir kodlama ajanıyla kullanmanıza izin vererek **kilitlenme olmamasını** sağlamak. Ayrıca OpenCode ile her zaman başka herhangi bir sağlayıcıyı da kullanabilmenizi sağlamak.
|
||||
|
||||
@@ -96,6 +96,7 @@ You can also access our models through the following API endpoints.
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
The [model id](/docs/config/#models) in your OpenCode config
|
||||
@@ -123,6 +124,7 @@ We support a pay-as-you-go model. Below are the prices **per 1M tokens**.
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
@@ -169,6 +171,7 @@ The free models:
|
||||
- MiniMax M2.5 Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model.
|
||||
- MiMo V2 Pro Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model.
|
||||
- MiMo V2 Omni Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model.
|
||||
- Qwen3.6 Plus Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model.
|
||||
- Nemotron 3 Super Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model.
|
||||
- Big Pickle is a stealth model that's free on OpenCode for a limited time. The team is using this time to collect feedback and improve the model.
|
||||
|
||||
@@ -217,6 +220,7 @@ All our models are hosted in the US. Our providers follow a zero-retention polic
|
||||
- MiniMax M2.5 Free: During its free period, collected data may be used to improve the model.
|
||||
- MiMo V2 Pro Free: During its free period, collected data may be used to improve the model.
|
||||
- MiMo V2 Omni Free: During its free period, collected data may be used to improve the model.
|
||||
- Qwen3.6 Plus Free: During its free period, collected data may be used to improve the model.
|
||||
- Nemotron 3 Super Free: During its free period, collected data may be used to improve the model.
|
||||
- OpenAI APIs: Requests are retained for 30 days in accordance with [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
|
||||
- Anthropic APIs: Requests are retained for 30 days in accordance with [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
|
||||
|
||||
@@ -7,31 +7,31 @@ import config from "../../../../config.mjs"
|
||||
export const console = config.console
|
||||
export const email = `mailto:${config.email}`
|
||||
|
||||
OpenCode Zen 是由 OpenCode 团队提供的一组经过测试和验证的模型列表。
|
||||
OpenCode Zen 是由 OpenCode 团队提供的一组经过测试和验证的模型。
|
||||
|
||||
:::note
|
||||
OpenCode Zen 目前处于测试阶段。
|
||||
OpenCode Zen 目前处于测试版阶段。
|
||||
:::
|
||||
|
||||
Zen 的工作方式与 OpenCode 中的任何其他提供商相同。你只需登录 OpenCode Zen 并获取你的 API 密钥。它是**完全可选的**,你无需使用它也能正常使用 OpenCode。
|
||||
Zen 的工作方式与 OpenCode 中的任何其他提供商相同。你登录 OpenCode Zen 并获取 API 密钥。它是**完全可选的**,即使不用它,你也可以照常使用 OpenCode。
|
||||
|
||||
---
|
||||
|
||||
## 背景
|
||||
|
||||
市面上有大量的模型,但其中只有少数能够很好地充当编码代理。此外,大多数提供商的配置方式差异很大,因此你获得的性能和质量也会截然不同。
|
||||
现在市面上有大量模型,但其中只有少数模型适合作为编码代理使用。此外,大多数提供商的配置方式差异很大,因此你获得的性能和质量也会非常不同。
|
||||
|
||||
:::tip
|
||||
我们测试了一组与 OpenCode 配合良好的精选模型和提供商。
|
||||
:::
|
||||
|
||||
所以如果你通过 OpenRouter 之类的服务使用模型,你永远无法确定是否获得了你想要的模型的最佳版本。
|
||||
所以,如果你通过 OpenRouter 之类的服务使用模型,你无法确定自己拿到的是否是目标模型的最佳版本。
|
||||
|
||||
为了解决这个问题,我们做了以下几件事:
|
||||
为了解决这个问题,我们做了几件事:
|
||||
|
||||
1. 我们测试了一组精选的模型,并与它们的团队讨论了最佳运行方式。
|
||||
2. 然后我们与几家提供商合作,确保这些模型能被正确地提供服务。
|
||||
3. 最后,我们对模型与提供商的组合进行了基准测试,整理出了一份我们有信心推荐的列表。
|
||||
1. 我们测试了一组选定的模型,并与它们的团队讨论了如何以最佳方式运行这些模型。
|
||||
2. 然后我们与几家提供商合作,确保这些模型被正确提供。
|
||||
3. 最后,我们对模型和提供商的组合进行了基准测试,并整理出了一份我们认为值得推荐的列表。
|
||||
|
||||
OpenCode Zen 是一个 AI 网关,让你可以访问这些模型。
|
||||
|
||||
@@ -43,53 +43,54 @@ OpenCode Zen 的工作方式与 OpenCode 中的任何其他提供商相同。
|
||||
|
||||
1. 登录 **<a href={console}>OpenCode Zen</a>**,添加你的账单信息,然后复制你的 API 密钥。
|
||||
2. 在 TUI 中运行 `/connect` 命令,选择 OpenCode Zen,然后粘贴你的 API 密钥。
|
||||
3. 在 TUI 中运行 `/models` 查看我们推荐的模型列表。
|
||||
3. 在 TUI 中运行 `/models`,查看我们推荐的模型列表。
|
||||
|
||||
你按请求付费,并且可以向你的账户中充值。
|
||||
你按请求付费,也可以向账户充值。
|
||||
|
||||
---
|
||||
|
||||
## 端点
|
||||
|
||||
你还可以通过以下 API 端点访问我们的模型。
|
||||
你也可以通过以下 API 端点访问我们的模型。
|
||||
|
||||
| 模型 | 模型 ID | 端点 | AI SDK 包 |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| 模型 | 模型 ID | 端点 | AI SDK 包 |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
在 OpenCode 配置中,[模型 ID](/docs/config/#models) 使用 `opencode/<model-id>` 格式。例如,对于 GPT 5.2 Codex,你需要在配置中使用 `opencode/gpt-5.2-codex`。
|
||||
在你的 OpenCode 配置中,[模型 ID](/docs/config/#models) 使用 `opencode/<model-id>` 格式。例如,对于 GPT 5.3 Codex,你需要在配置中使用 `opencode/gpt-5.3-codex`。
|
||||
|
||||
---
|
||||
|
||||
@@ -105,62 +106,65 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
## 定价
|
||||
|
||||
我们支持按量付费模式。以下是**每 100 万 Token** 的价格。
|
||||
我们支持按量付费模式。以下是**每 1M tokens** 的价格。
|
||||
|
||||
| 模型 | 输入 | 输出 | 缓存读取 | 缓存写入 |
|
||||
| --------------------------------- | ------ | ------ | -------- | -------- |
|
||||
| Big Pickle | 免费 | 免费 | 免费 | - |
|
||||
| MiniMax M2.5 Free | 免费 | 免费 | 免费 | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | - |
|
||||
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
|
||||
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.08 | - |
|
||||
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0.40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K tokens) | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37.50 | $1.00 | $12.50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.6 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | 免费 | 免费 | 免费 | - |
|
||||
| 模型 | 输入 | 输出 | 缓存读取 | 缓存写入 |
|
||||
| --------------------------------- | ------ | ------- | -------- | -------- |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
你可能会在使用记录中看到 _Claude Haiku 3.5_。这是一个[低成本模型](/docs/config/#models),用于生成会话标题。
|
||||
|
||||
:::note
|
||||
信用卡手续费按成本转嫁(每笔交易 4.4% + $0.30);除此之外我们不收取任何额外费用。
|
||||
信用卡手续费按成本转嫁(每笔交易 4.4% + $0.30);除此之外我们不会额外收费。
|
||||
:::
|
||||
|
||||
免费模型说明:
|
||||
免费模型:
|
||||
|
||||
- MiniMax M2.5 Free 在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。
|
||||
- Big Pickle 是一个隐身模型,在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。
|
||||
- MiniMax M2.5 Free 目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。
|
||||
- MiMo V2 Pro Free 目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。
|
||||
- MiMo V2 Omni Free 目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。
|
||||
- Qwen3.6 Plus Free 目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。
|
||||
- Nemotron 3 Super Free 目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。
|
||||
- Big Pickle 是一个隐身模型,目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。
|
||||
|
||||
如有任何疑问,请<a href={email}>联系我们</a>。
|
||||
如果你有任何问题,请<a href={email}>联系我们</a>。
|
||||
|
||||
---
|
||||
|
||||
@@ -168,88 +172,93 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
如果你的余额低于 $5,Zen 将自动充值 $20。
|
||||
|
||||
你可以更改自动充值的金额,也可以完全禁用自动充值功能。
|
||||
你可以更改自动充值金额,也可以完全禁用自动充值。
|
||||
|
||||
---
|
||||
|
||||
### 月度限额
|
||||
|
||||
你还可以为整个工作区以及团队中的每个成员设置月度使用限额。
|
||||
你还可以为整个工作区以及团队中的每位成员设置月度使用限额。
|
||||
|
||||
例如,假设你将月度使用限额设为 $20,Zen 在一个月内的使用量将不会超过 $20。但如果你启用了自动充值,当余额低于 $5 时,Zen 可能会向你收取超过 $20 的费用。
|
||||
例如,假设你将月度使用限额设置为 $20,那么 Zen 在一个月内的使用金额不会超过 $20。但如果你启用了自动充值,当余额低于 $5 时,Zen 最终向你收取的金额可能会超过 $20。
|
||||
|
||||
---
|
||||
|
||||
### 已弃用模型
|
||||
|
||||
| 模型 | 弃用日期 |
|
||||
| ---------------- | ------------------ |
|
||||
| Qwen3 Coder 480B | 2026 年 2 月 6 日 |
|
||||
| Kimi K2 Thinking | 2026 年 3 月 6 日 |
|
||||
| Kimi K2 | 2026 年 3 月 6 日 |
|
||||
| MiniMax M2.1 | 2026 年 3 月 15 日 |
|
||||
| GLM 4.7 | 2026 年 3 月 15 日 |
|
||||
| GLM 4.6 | 2026 年 3 月 15 日 |
|
||||
| 模型 | 弃用日期 |
|
||||
| ---------------- | -------------- |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## 隐私
|
||||
|
||||
我们所有的模型都托管在美国。我们的提供商遵循零保留政策,不会将你的数据用于模型训练,但以下情况除外:
|
||||
我们所有模型都托管在 US。我们的提供商遵循零保留政策,不会将你的数据用于模型训练,但以下情况除外:
|
||||
|
||||
- Big Pickle:在免费期间,收集的数据可能会被用于改进模型。
|
||||
- MiniMax M2.5 Free:在免费期间,收集的数据可能会被用于改进模型。
|
||||
- OpenAI API:请求会根据 [OpenAI 数据政策](https://platform.openai.com/docs/guides/your-data)保留 30 天。
|
||||
- Anthropic API:请求会根据 [Anthropic 数据政策](https://docs.anthropic.com/en/docs/claude-code/data-usage)保留 30 天。
|
||||
- MiMo V2 Pro Free:在免费期间,收集的数据可能会被用于改进模型。
|
||||
- MiMo V2 Omni Free:在免费期间,收集的数据可能会被用于改进模型。
|
||||
- Qwen3.6 Plus Free:在免费期间,收集的数据可能会被用于改进模型。
|
||||
- Nemotron 3 Super Free:在免费期间,收集的数据可能会被用于改进模型。
|
||||
- OpenAI APIs:请求会根据 [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) 保留 30 天。
|
||||
- Anthropic APIs:请求会根据 [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) 保留 30 天。
|
||||
|
||||
---
|
||||
|
||||
## 团队版
|
||||
## 团队
|
||||
|
||||
Zen 也非常适合团队使用。你可以邀请队友、分配角色、管理团队使用的模型等。
|
||||
Zen 也非常适合团队使用。你可以邀请队友、分配角色、管理团队使用的模型,等等。
|
||||
|
||||
:::note
|
||||
作为测试版的一部分,工作区功能目前对团队免费开放。
|
||||
作为测试版的一部分,工作区目前对团队免费开放。
|
||||
:::
|
||||
|
||||
作为测试版的一部分,管理工作区目前对团队免费。我们将很快公布更多定价详情。
|
||||
作为测试版的一部分,团队目前可以免费管理工作区。我们很快会分享更多定价细节。
|
||||
|
||||
---
|
||||
|
||||
### 角色
|
||||
|
||||
你可以邀请团队成员加入你的工作区并分配角色:
|
||||
你可以邀请队友加入工作区并分配角色:
|
||||
|
||||
- **管理员**:管理模型、成员、API 密钥和账单
|
||||
- **成员**:仅管理自己的 API 密钥
|
||||
- **Admin**:管理模型、成员、API 密钥和账单
|
||||
- **Member**:仅管理自己的 API 密钥
|
||||
|
||||
管理员还可以为每个成员设置月度支出限额,以控制成本。
|
||||
Admin 还可以为每位成员设置月度支出限额,以便控制成本。
|
||||
|
||||
---
|
||||
|
||||
### 模型访问
|
||||
|
||||
管理员可以启用或禁用工作区中的特定模型。对已禁用模型发出的请求将返回错误。
|
||||
Admin 可以为工作区启用或禁用特定模型。向已禁用模型发出的请求会返回错误。
|
||||
|
||||
这在你想要禁用某个会收集数据的模型时非常有用。
|
||||
这在你想禁用会收集数据的模型时很有用。
|
||||
|
||||
---
|
||||
|
||||
### 自带密钥
|
||||
|
||||
你可以使用自己的 OpenAI 或 Anthropic API 密钥,同时仍然可以访问 Zen 中的其他模型。
|
||||
你可以使用自己的 OpenAI 或 Anthropic API 密钥,同时仍然访问 Zen 中的其他模型。
|
||||
|
||||
当你使用自己的密钥时,Token 费用由提供商直接计费,而非通过 Zen 计费。
|
||||
当你使用自己的密钥时,tokens 由提供商直接计费,而不是由 Zen 计费。
|
||||
|
||||
例如,你的组织可能已经拥有 OpenAI 或 Anthropic 的密钥,你希望使用它们而不是 Zen 提供的密钥。
|
||||
例如,你的组织可能已经拥有 OpenAI 或 Anthropic 的密钥,并且你想使用它,而不是使用 Zen 提供的密钥。
|
||||
|
||||
---
|
||||
|
||||
## 目标
|
||||
|
||||
我们创建 OpenCode Zen 的目的是:
|
||||
我们创建 OpenCode Zen,是为了:
|
||||
|
||||
1. 为编码代理**基准测试**最佳的模型和提供商组合。
|
||||
2. 提供**最高质量**的选项,不降低性能或路由到更廉价的提供商。
|
||||
3. 以成本价销售来传递任何**降价优惠**;唯一的加价仅用于覆盖我们的处理费用。
|
||||
4. **无锁定**,允许你将其与任何其他编码代理配合使用,同时也始终允许你在 OpenCode 中使用任何其他提供商。
|
||||
1. 为编码代理**基准测试**最佳模型和提供商。
|
||||
2. 提供**最高质量**的选项,而不是降低性能或路由到更便宜的提供商。
|
||||
3. 通过按成本销售来传递任何**降价**;因此唯一的加价只是为了覆盖我们的处理费用。
|
||||
4. 保持**无锁定**,允许你将它与任何其他编码代理一起使用。同时也始终允许你在 OpenCode 中使用任何其他提供商。
|
||||
|
||||
@@ -7,95 +7,101 @@ import config from "../../../../config.mjs"
|
||||
export const console = config.console
|
||||
export const email = `mailto:${config.email}`
|
||||
|
||||
OpenCode Zen 是由 OpenCode 團隊提供的一組經過測試和驗證的模型列表。
|
||||
OpenCode Zen 是由 OpenCode 團隊提供、經過測試與驗證的模型清單。
|
||||
|
||||
:::note
|
||||
OpenCode Zen 目前處於測試階段。
|
||||
OpenCode Zen 目前仍處於 beta。
|
||||
:::
|
||||
|
||||
Zen 的工作方式與 OpenCode 中的任何其他供應商相同。你只需登入 OpenCode Zen 並取得你的 API 金鑰。它是**完全選用的**,你無需使用它也能正常使用 OpenCode。
|
||||
Zen 的運作方式和 OpenCode 中的其他供應商一樣。你登入 OpenCode Zen 並取得
|
||||
你的 API 金鑰。它是 **完全可選的**,你不需要使用它也能使用
|
||||
OpenCode。
|
||||
|
||||
---
|
||||
|
||||
## 背景
|
||||
|
||||
市面上有大量的模型,但其中只有少數能夠很好地充當編碼代理。此外,大多數供應商的設定方式差異很大,因此你獲得的效能和品質也會截然不同。
|
||||
市面上有大量模型,但其中只有少數能作為編碼代理良好運作。此外,大多數供應商的
|
||||
設定方式差異很大,因此你得到的效能與品質也會非常不同。
|
||||
|
||||
:::tip
|
||||
我們測試了一組與 OpenCode 配合良好的精選模型和供應商。
|
||||
我們測試了一組與 OpenCode 搭配良好的精選模型與供應商。
|
||||
:::
|
||||
|
||||
所以如果你透過 OpenRouter 之類的服務使用模型,你永遠無法確定是否獲得了你想要的模型的最佳版本。
|
||||
所以如果你是透過 OpenRouter 之類的服務使用模型,你永遠無法確定自己拿到的是不是
|
||||
你想要那個模型的最佳版本。
|
||||
|
||||
為了解決這個問題,我們做了以下幾件事:
|
||||
為了改善這件事,我們做了幾件事:
|
||||
|
||||
1. 我們測試了一組精選的模型,並與它們的團隊討論了最佳執行方式。
|
||||
2. 然後我們與幾家供應商合作,確保這些模型能被正確地提供服務。
|
||||
3. 最後,我們對模型與供應商的組合進行了基準測試,整理出了一份我們有信心推薦的列表。
|
||||
1. 我們測試了一組精選模型,並和它們的團隊討論如何以最佳方式執行。
|
||||
2. 接著我們與幾家供應商合作,確保這些模型能被正確提供。
|
||||
3. 最後,我們針對模型與供應商的組合進行基準測試,整理出一份我們有信心推薦的清單。
|
||||
|
||||
OpenCode Zen 是一個 AI 閘道,讓你可以存取這些模型。
|
||||
|
||||
---
|
||||
|
||||
## 工作原理
|
||||
## 運作方式
|
||||
|
||||
OpenCode Zen 的工作方式與 OpenCode 中的任何其他供應商相同。
|
||||
OpenCode Zen 的運作方式和 OpenCode 中的其他供應商一樣。
|
||||
|
||||
1. 登入 **<a href={console}>OpenCode Zen</a>**,新增你的帳單資訊,然後複製你的 API 金鑰。
|
||||
2. 在 TUI 中執行 `/connect` 指令,選擇 OpenCode Zen,然後貼上你的 API 金鑰。
|
||||
3. 在 TUI 中執行 `/models` 查看我們推薦的模型列表。
|
||||
1. 你登入 **<a href={console}>OpenCode Zen</a>**,加入帳單資訊,然後複製你的 API 金鑰。
|
||||
2. 你在 TUI 中執行 `/connect` 指令,選擇 OpenCode Zen,然後貼上你的 API 金鑰。
|
||||
3. 在 TUI 中執行 `/models`,查看我們推薦的模型清單。
|
||||
|
||||
你按請求付費,並且可以向你的帳戶中儲值。
|
||||
你會依請求計費,也可以為帳戶儲值。
|
||||
|
||||
---
|
||||
|
||||
## 端點
|
||||
|
||||
你還可以透過以下 API 端點存取我們的模型。
|
||||
你也可以透過以下 API 端點存取我們的模型。
|
||||
|
||||
| 模型 | 模型 ID | 端點 | AI SDK 套件 |
|
||||
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| 模型 | Model ID | 端點 | AI SDK Package |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- | --------------------------- |
|
||||
| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Mini | gpt-5.4-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.4 Nano | gpt-5.4-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
|
||||
| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
|
||||
| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
|
||||
| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
|
||||
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
|
||||
|
||||
在 OpenCode 設定中,[模型 ID](/docs/config/#models) 使用 `opencode/<model-id>` 格式。例如,對於 GPT 5.2 Codex,你需要在設定中使用 `opencode/gpt-5.2-codex`。
|
||||
OpenCode 設定中的 [模型 ID](/docs/config/#models) 會使用 `opencode/<model-id>`
|
||||
格式。例如,如果是 GPT 5.3 Codex,你會在設定中使用 `opencode/gpt-5.3-codex`。
|
||||
|
||||
---
|
||||
|
||||
### 模型
|
||||
|
||||
你可以從以下位址取得可用模型及其中繼資料的完整列表:
|
||||
你可以從以下位置取得所有可用模型及其中繼資料的完整清單:
|
||||
|
||||
```
|
||||
https://opencode.ai/zen/v1/models
|
||||
@@ -105,151 +111,161 @@ https://opencode.ai/zen/v1/models
|
||||
|
||||
## 定價
|
||||
|
||||
我們支援按量付費模式。以下是**每 100 萬 Token** 的價格。
|
||||
我們支援按量付費模式。以下是 **每 1M tokens** 的價格。
|
||||
|
||||
| 模型 | 輸入 | 輸出 | 快取讀取 | 快取寫入 |
|
||||
| -------------------------------- | ------ | ------ | -------- | -------- |
|
||||
| Big Pickle | 免費 | 免費 | 免費 | - |
|
||||
| MiniMax M2.5 Free | 免費 | 免費 | 免費 | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | - |
|
||||
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
|
||||
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.08 | - |
|
||||
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
|
||||
| Kimi K2 | $0.40 | $2.50 | - | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 (≤ 200K Token) | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.6 (> 200K Token) | $10.00 | $37.50 | $1.00 | $12.50 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 (≤ 200K Token) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.6 (> 200K Token) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4.5 (≤ 200K Token) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K Token) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K Token) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K Token) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K Token) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K Token) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Pro (≤ 200K Token) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3 Pro (> 200K Token) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | 免費 | 免費 | 免費 | - |
|
||||
| 模型 | 輸入 | 輸出 | 快取讀取 | 快取寫入 |
|
||||
| --------------------------------- | ------ | ------- | -------- | -------- |
|
||||
| Big Pickle | Free | Free | Free | - |
|
||||
| MiMo V2 Pro Free | Free | Free | Free | - |
|
||||
| MiMo V2 Omni Free | Free | Free | Free | - |
|
||||
| Qwen3.6 Plus Free | Free | Free | Free | - |
|
||||
| Nemotron 3 Super Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 Free | Free | Free | Free | - |
|
||||
| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
|
||||
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
|
||||
| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
|
||||
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
|
||||
| Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
|
||||
| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
|
||||
| Claude Sonnet 4.6 | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
|
||||
| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
|
||||
| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
|
||||
| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
|
||||
| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
|
||||
| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
|
||||
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
|
||||
| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
|
||||
| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
|
||||
| GPT 5.4 Mini | $0.75 | $4.50 | $0.075 | - |
|
||||
| GPT 5.4 Nano | $0.20 | $1.25 | $0.02 | - |
|
||||
| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
|
||||
| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
|
||||
| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
|
||||
| GPT 5 | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
|
||||
| GPT 5 Nano | Free | Free | Free | - |
|
||||
|
||||
你可能會在使用記錄中看到 _Claude Haiku 3.5_。這是一個[低成本模型](/docs/config/#models),用於產生工作階段標題。
|
||||
你可能會在使用紀錄中看到 _Claude Haiku 3.5_。這是一個[低成本模型](/docs/config/#models),
|
||||
會用來產生工作階段的標題。
|
||||
|
||||
:::note
|
||||
信用卡手續費按成本轉嫁(每筆交易 4.4% + $0.30);除此之外我們不收取任何額外費用。
|
||||
信用卡手續費會以成本轉嫁(每筆交易 4.4% + $0.30);除此之外,我們不會額外收費。
|
||||
:::
|
||||
|
||||
免費模型說明:
|
||||
免費模型:
|
||||
|
||||
- MiniMax M2.5 Free 在 OpenCode 上限時免費提供。團隊正在利用這段時間收集回饋並改進模型。
|
||||
- MiniMax M2.5 Free 在 OpenCode 上限時提供。團隊正在利用這段時間收集回饋並改進模型。
|
||||
- MiMo V2 Pro Free 在 OpenCode 上限時提供。團隊正在利用這段時間收集回饋並改進模型。
|
||||
- MiMo V2 Omni Free 在 OpenCode 上限時提供。團隊正在利用這段時間收集回饋並改進模型。
|
||||
- Qwen3.6 Plus Free 在 OpenCode 上限時提供。團隊正在利用這段時間收集回饋並改進模型。
|
||||
- Nemotron 3 Super Free 在 OpenCode 上限時提供。團隊正在利用這段時間收集回饋並改進模型。
|
||||
- Big Pickle 是一個隱身模型,在 OpenCode 上限時免費提供。團隊正在利用這段時間收集回饋並改進模型。
|
||||
|
||||
如有任何疑問,請<a href={email}>聯絡我們</a>。
|
||||
如果你有任何問題,請<a href={email}>聯絡我們</a>。
|
||||
|
||||
---
|
||||
|
||||
### 自動儲值
|
||||
|
||||
如果你的餘額低於 $5,Zen 將自動儲值 $20。
|
||||
如果你的餘額低於 $5,Zen 會自動儲值 $20。
|
||||
|
||||
你可以更改自動儲值的金額,也可以完全停用自動儲值功能。
|
||||
你可以調整自動儲值金額,也可以完全停用自動儲值。
|
||||
|
||||
---
|
||||
|
||||
### 月度限額
|
||||
### 每月限額
|
||||
|
||||
你還可以為整個工作區以及團隊中的每個成員設定月度使用限額。
|
||||
你也可以為整個工作區以及團隊中的每位成員設定每月使用限額。
|
||||
|
||||
例如,假設你將月度使用限額設為 $20,Zen 在一個月內的使用量將不會超過 $20。但如果你啟用了自動儲值,當餘額低於 $5 時,Zen 可能會向你收取超過 $20 的費用。
|
||||
例如,假設你把每月使用限額設為 $20,Zen 在一個月內就不會使用超過 $20。
|
||||
但如果你啟用了自動儲值,當餘額低於 $5 時,Zen 最終仍可能向你收取超過 $20。
|
||||
|
||||
---
|
||||
|
||||
### 已棄用的模型
|
||||
### 已棄用模型
|
||||
|
||||
| 模型 | 棄用日期 |
|
||||
| ---------------- | ------------------ |
|
||||
| Qwen3 Coder 480B | 2026 年 2 月 6 日 |
|
||||
| Kimi K2 Thinking | 2026 年 3 月 6 日 |
|
||||
| Kimi K2 | 2026 年 3 月 6 日 |
|
||||
| MiniMax M2.1 | 2026 年 3 月 15 日 |
|
||||
| GLM 4.7 | 2026 年 3 月 15 日 |
|
||||
| GLM 4.6 | 2026 年 3 月 15 日 |
|
||||
| 模型 | 棄用日期 |
|
||||
| ---------------- | -------------- |
|
||||
| MiniMax M2.1 | March 15, 2026 |
|
||||
| GLM 4.7 | March 15, 2026 |
|
||||
| GLM 4.6 | March 15, 2026 |
|
||||
| Gemini 3 Pro | March 9, 2026 |
|
||||
| Kimi K2 Thinking | March 6, 2026 |
|
||||
| Kimi K2 | March 6, 2026 |
|
||||
| Qwen3 Coder 480B | Feb 6, 2026 |
|
||||
|
||||
---
|
||||
|
||||
## 隱私
|
||||
|
||||
我們所有的模型都託管在美國。我們的供應商遵循零保留政策,不會將你的資料用於模型訓練,但以下情況除外:
|
||||
我們所有模型都託管於美國。我們的供應商遵循零保留政策,且不會將你的資料用於模型訓練,但以下情況除外:
|
||||
|
||||
- Big Pickle:在免費期間,收集的資料可能會被用於改進模型。
|
||||
- MiniMax M2.5 Free:在免費期間,收集的資料可能會被用於改進模型。
|
||||
- OpenAI API:請求會根據 [OpenAI 資料政策](https://platform.openai.com/docs/guides/your-data)保留 30 天。
|
||||
- Anthropic API:請求會根據 [Anthropic 資料政策](https://docs.anthropic.com/en/docs/claude-code/data-usage)保留 30 天。
|
||||
- Big Pickle: 在免費期間,收集到的資料可能會用於改進模型。
|
||||
- MiniMax M2.5 Free: 在免費期間,收集到的資料可能會用於改進模型。
|
||||
- MiMo V2 Pro Free: 在免費期間,收集到的資料可能會用於改進模型。
|
||||
- MiMo V2 Omni Free: 在免費期間,收集到的資料可能會用於改進模型。
|
||||
- Qwen3.6 Plus Free: 在免費期間,收集到的資料可能會用於改進模型。
|
||||
- Nemotron 3 Super Free: 在免費期間,收集到的資料可能會用於改進模型。
|
||||
- OpenAI APIs: 請求會依據 [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) 保留 30 天。
|
||||
- Anthropic APIs: 請求會依據 [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) 保留 30 天。
|
||||
|
||||
---
|
||||
|
||||
## 團隊版
|
||||
## 團隊
|
||||
|
||||
Zen 也非常適合團隊使用。你可以邀請隊友、分配角色、管理團隊使用的模型等。
|
||||
Zen 也很適合團隊使用。你可以邀請隊友、指派角色、管理團隊要使用的模型等等。
|
||||
|
||||
:::note
|
||||
作為測試版的一部分,工作區功能目前對團隊免費開放。
|
||||
作為 beta 的一部分,工作區目前對團隊免費。
|
||||
:::
|
||||
|
||||
作為測試版的一部分,管理工作區目前對團隊免費。我們將很快公布更多定價詳情。
|
||||
作為 beta 的一部分,管理你的工作區目前對團隊免費。我們很快會分享更多定價細節。
|
||||
|
||||
---
|
||||
|
||||
### 角色
|
||||
|
||||
你可以邀請團隊成員加入你的工作區並分配角色:
|
||||
你可以邀請隊友加入你的工作區,並指派角色:
|
||||
|
||||
- **管理員**:管理模型、成員、API 金鑰和帳單
|
||||
- **成員**:僅管理自己的 API 金鑰
|
||||
- **Admin**:管理模型、成員、API 金鑰與帳單
|
||||
- **Member**:只能管理自己的 API 金鑰
|
||||
|
||||
管理員還可以為每個成員設定月度支出限額,以控制成本。
|
||||
Admin 也可以為每位成員設定每月支出上限,協助控制成本。
|
||||
|
||||
---
|
||||
|
||||
### 模型存取
|
||||
|
||||
管理員可以啟用或停用工作區中的特定模型。對已停用模型發出的請求將回傳錯誤。
|
||||
Admin 可以為工作區啟用或停用特定模型。對已停用模型發出的請求會回傳錯誤。
|
||||
|
||||
這在你想要停用某個會收集資料的模型時非常有用。
|
||||
這在你想停用會收集資料的模型時很有用。
|
||||
|
||||
---
|
||||
|
||||
### 自帶金鑰
|
||||
### 自備金鑰
|
||||
|
||||
你可以使用自己的 OpenAI 或 Anthropic API 金鑰,同時仍然可以存取 Zen 中的其他模型。
|
||||
你可以使用自己的 OpenAI 或 Anthropic API 金鑰,同時仍然存取 Zen 中的其他模型。
|
||||
|
||||
當你使用自己的金鑰時,Token 費用由供應商直接計費,而非透過 Zen 計費。
|
||||
當你使用自己的金鑰時,Token 會直接由供應商計費,而不是由 Zen 計費。
|
||||
|
||||
例如,你的組織可能已經擁有 OpenAI 或 Anthropic 的金鑰,你希望使用它們而不是 Zen 提供的金鑰。
|
||||
例如,你的組織可能已經有 OpenAI 或 Anthropic 的金鑰,而你想改用那把金鑰,而不是 Zen 提供的金鑰。
|
||||
|
||||
---
|
||||
|
||||
## 目標
|
||||
|
||||
我們建立 OpenCode Zen 的目的是:
|
||||
我們建立 OpenCode Zen,是為了:
|
||||
|
||||
1. 為編碼代理**基準測試**最佳的模型和供應商組合。
|
||||
2. 提供**最高品質**的選項,不降低效能或路由到更廉價的供應商。
|
||||
3. 以成本價銷售來傳遞任何**降價優惠**;唯一的加價僅用於覆蓋我們的處理費用。
|
||||
4. **無鎖定**,允許你將其與任何其他編碼代理配合使用,同時也始終允許你在 OpenCode 中使用任何其他供應商。
|
||||
1. 為編碼代理對最佳模型與供應商進行 **基準測試**。
|
||||
2. 取得 **最高品質** 的選項,而不是降低效能或改路由到更便宜的供應商。
|
||||
3. 透過以成本價銷售,把任何 **降價** 回饋給你;唯一的加價只用來支付我們的處理費。
|
||||
4. 保持 **無鎖定**,讓你能把它與任何其他編碼代理一起使用,同時也始終能在 OpenCode 中使用任何其他供應商。
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "opencode",
|
||||
"displayName": "opencode",
|
||||
"description": "opencode for VS Code",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"publisher": "sst-dev",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
Reference in New Issue
Block a user