Compare commits

..

10 Commits

Author SHA1 Message Date
Ryan Vogel
3a47b0ed90 fix(tui): remove feature flag, fix stale suggestion, limit to 110 chars
- Remove OPENCODE_EXPERIMENTAL_NEXT_PROMPT gate so suggest always runs
- Use reconcile() in sync store to clear stale suggestion on status change
- Limit suggestion to 110 characters and instruct model to be concise
- Remove temporary debug sidebar panel and plumbing
2026-04-06 14:31:03 +00:00
Ryan Vogel
722904fe4f feat(tui): add sidebar debug panel for suggest feature status
Adds a temporary debug indicator in the sidebar showing the suggest
lifecycle: generating, done, refused, error. Helps diagnose whether
the suggestion pipeline is running and what results it produces.
2026-04-06 14:15:21 +00:00
Ryan Vogel
93cef701c0 fix(tui): address review feedback for next-prompt suggestion
- Send full chat history + system prompt instead of last 8 messages for
  prompt-cache hit on the conversation prefix
- Use the same model (not small) so the KV cache is shared
- Add SessionStatus.suggest() that publishes Status event without firing
  the Idle hook, avoiding spurious plugin notifications
2026-04-06 02:38:48 +00:00
Ryan Vogel
0c3ff84f44 fix(tui): remove invalid renderer config option 2026-04-06 02:33:46 +00:00
Ryan Vogel
ba2e3c16b2 feat(tui): add experimental next-prompt suggestion
Generate an ephemeral user-style next step suggestion after assistant responses and let users accept it with Right Arrow in the prompt. Keep suggestions out of message history and support NO_SUGGESTION refusal.
2026-04-06 02:33:46 +00:00
Gautier DI FOLCO
4712c18a58 feat(tui): make the mouse disablable (#6824, #7926) (#13748) 2026-04-05 21:14:11 -05:00
opencode-agent[bot]
9e156ea168 chore: update nix node_modules hashes 2026-04-06 01:18:03 +00:00
Luke Parker
68f4aa220e fix(plugin): parse package specifiers with npm-package-arg and sanitize win32 cache paths (#21135) 2026-04-06 00:26:40 +00:00
Aiden Cline
3a0e00dd7f tweak: add newline between <content> and first line of read tool output to prevent confusion (#21070) 2026-04-05 04:55:22 +00:00
Frank
66b4e5e020 doc: udpate doc 2026-04-05 00:35:40 -04:00
29 changed files with 811 additions and 656 deletions

View File

@@ -371,6 +371,7 @@
"jsonc-parser": "3.3.1",
"mime-types": "3.0.2",
"minimatch": "10.0.3",
"npm-package-arg": "13.0.2",
"open": "10.1.2",
"opencode-gitlab-auth": "2.0.1",
"opencode-poe-auth": "0.0.1",
@@ -412,6 +413,7 @@
"@types/bun": "catalog:",
"@types/cross-spawn": "catalog:",
"@types/mime-types": "3.0.1",
"@types/npm-package-arg": "6.1.4",
"@types/npmcli__arborist": "6.3.3",
"@types/semver": "^7.5.8",
"@types/turndown": "5.0.5",

View File

@@ -1,8 +1,8 @@
{
"nodeModules": {
"x86_64-linux": "sha256-0jwPCu2Lod433GPQLHN8eEkhfpPviDFfkFJmuvkRdlE=",
"aarch64-linux": "sha256-Qi0IkGkaIBKZsPLTO8kaTbCVL0cEfVOm/Y/6VUVI9TY=",
"aarch64-darwin": "sha256-1eZBBLgYVkjg5RYN/etR1Mb5UjU3VelElBB5ug5hQdc=",
"x86_64-darwin": "sha256-jdXgA+kZb/foFHR40UiPif6rsA2GDVCCVHnJR3jBUGI="
"x86_64-linux": "sha256-LRhPPrOKCGUSCEWTpAxPdWKTKVNkg82WrvD25cP3jts=",
"aarch64-linux": "sha256-sbNxkil47n+B7v6ds5EYFybLytXUyRlu0Cpka0ZmDx4=",
"aarch64-darwin": "sha256-5+99gtpIHGygMW3VBAexNhmaORgI8LCxPk/Gf1fW/ds=",
"x86_64-darwin": "sha256-LqnvZGGnQaRxIoowOr5gf6lFgDhbgQhVPiAcRTtU6fE="
}
}

View File

@@ -54,6 +54,7 @@
"@types/bun": "catalog:",
"@types/cross-spawn": "catalog:",
"@types/mime-types": "3.0.1",
"@types/npm-package-arg": "6.1.4",
"@types/npmcli__arborist": "6.3.3",
"@types/semver": "^7.5.8",
"@types/turndown": "5.0.5",
@@ -135,6 +136,7 @@
"jsonc-parser": "3.3.1",
"mime-types": "3.0.2",
"minimatch": "10.0.3",
"npm-package-arg": "13.0.2",
"open": "10.1.2",
"opencode-gitlab-auth": "2.0.1",
"opencode-poe-auth": "0.0.1",

View File

@@ -125,14 +125,16 @@ import type { EventSource } from "./context/sdk"
import { DialogVariant } from "./component/dialog-variant"
function rendererConfig(_config: TuiConfig.Info): CliRendererConfig {
const mouseEnabled = !Flag.OPENCODE_DISABLE_MOUSE && (_config.mouse ?? true)
return {
externalOutputMode: "passthrough",
targetFps: 60,
gatherStats: false,
exitOnCtrlC: false,
useKittyKeyboard: { events: process.platform === "win32" },
autoFocus: false,
openConsoleOnError: false,
useMouse: mouseEnabled,
consoleOptions: {
keyBindings: [{ name: "y", ctrl: true, action: "copy-selection" }],
onCopySelection: (text) => {

View File

@@ -841,8 +841,20 @@ export function Prompt(props: PromptProps) {
return !!current
})
const suggestion = createMemo(() => {
if (!props.sessionID) return
if (store.mode !== "normal") return
if (store.prompt.input) return
const current = status()
if (current.type !== "idle") return
const value = current.suggestion?.trim()
if (!value) return
return value
})
const placeholderText = createMemo(() => {
if (props.showPlaceholder === false) return undefined
if (suggestion()) return suggestion()
if (store.mode === "shell") {
if (!shell().length) return undefined
const example = shell()[store.placeholder % shell().length]
@@ -933,6 +945,16 @@ export function Prompt(props: PromptProps) {
e.preventDefault()
return
}
if (!store.prompt.input && e.name === "right" && !e.ctrl && !e.meta && !e.shift && !e.super) {
const value = suggestion()
if (value) {
input.setText(value)
setStore("prompt", "input", value)
input.gotoBufferEnd()
e.preventDefault()
return
}
}
// Check clipboard for images before terminal-handled paste runs.
// This helps terminals that forward Ctrl+V to the app; Windows
// Terminal 1.25+ usually handles Ctrl+V before this path.

View File

@@ -233,7 +233,7 @@ export const { use: useSync, provider: SyncProvider } = createSimpleContext({
}
case "session.status": {
setStore("session_status", event.properties.sessionID, event.properties.status)
setStore("session_status", event.properties.sessionID, reconcile(event.properties.status))
break
}

View File

@@ -22,6 +22,7 @@ export const TuiOptions = z.object({
.enum(["auto", "stacked"])
.optional()
.describe("Control diff rendering style: 'auto' adapts to terminal width, 'stacked' always shows single column"),
mouse: z.boolean().optional().describe("Enable or disable mouse capture (default: true)"),
})
export const TuiInfo = z

View File

@@ -1,10 +1,10 @@
import { Cause, Deferred, Effect, Exit, Fiber, Schema, Scope, SynchronizedRef } from "effect"
import { Cause, Deferred, Effect, Exit, Fiber, Option, Schema, Scope, SynchronizedRef } from "effect"
export interface Runner<A, E = never> {
readonly state: Runner.State<A, E>
readonly busy: boolean
readonly ensureRunning: (work: Effect.Effect<A, E>) => Effect.Effect<A, E>
readonly startShell: (work: Effect.Effect<A, E>) => Effect.Effect<A, E>
readonly startShell: (work: (signal: AbortSignal) => Effect.Effect<A, E>) => Effect.Effect<A, E>
readonly cancel: Effect.Effect<void>
}
@@ -20,6 +20,7 @@ export namespace Runner {
interface ShellHandle<A, E> {
id: number
fiber: Fiber.Fiber<A, E>
abort: AbortController
}
interface PendingHandle<A, E> {
@@ -101,7 +102,9 @@ export namespace Runner {
const stopShell = (shell: ShellHandle<A, E>) =>
Effect.gen(function* () {
yield* Fiber.interrupt(shell.fiber)
shell.abort.abort()
const exit = yield* Fiber.await(shell.fiber).pipe(Effect.timeoutOption("100 millis"))
if (Option.isNone(exit)) yield* Fiber.interrupt(shell.fiber)
yield* Fiber.await(shell.fiber).pipe(Effect.exit, Effect.asVoid)
})
@@ -135,7 +138,7 @@ export namespace Runner {
),
)
const startShell = (work: Effect.Effect<A, E>) =>
const startShell = (work: (signal: AbortSignal) => Effect.Effect<A, E>) =>
SynchronizedRef.modifyEffect(
ref,
Effect.fnUntraced(function* (st) {
@@ -150,8 +153,9 @@ export namespace Runner {
}
yield* busy
const id = next()
const fiber = yield* work.pipe(Effect.ensuring(finishShell(id)), Effect.forkChild)
const shell = { id, fiber } satisfies ShellHandle<A, E>
const abort = new AbortController()
const fiber = yield* work(abort.signal).pipe(Effect.ensuring(finishShell(id)), Effect.forkChild)
const shell = { id, fiber, abort } satisfies ShellHandle<A, E>
return [
Effect.gen(function* () {
const exit = yield* Fiber.await(fiber)

View File

@@ -31,6 +31,7 @@ export namespace Flag {
export const OPENCODE_ENABLE_EXPERIMENTAL_MODELS = truthy("OPENCODE_ENABLE_EXPERIMENTAL_MODELS")
export const OPENCODE_DISABLE_AUTOCOMPACT = truthy("OPENCODE_DISABLE_AUTOCOMPACT")
export const OPENCODE_DISABLE_MODELS_FETCH = truthy("OPENCODE_DISABLE_MODELS_FETCH")
export const OPENCODE_DISABLE_MOUSE = truthy("OPENCODE_DISABLE_MOUSE")
export const OPENCODE_DISABLE_CLAUDE_CODE = truthy("OPENCODE_DISABLE_CLAUDE_CODE")
export const OPENCODE_DISABLE_CLAUDE_CODE_PROMPT =
OPENCODE_DISABLE_CLAUDE_CODE || truthy("OPENCODE_DISABLE_CLAUDE_CODE_PROMPT")
@@ -72,6 +73,7 @@ export namespace Flag {
export const OPENCODE_EXPERIMENTAL_PLAN_MODE = OPENCODE_EXPERIMENTAL || truthy("OPENCODE_EXPERIMENTAL_PLAN_MODE")
export const OPENCODE_EXPERIMENTAL_WORKSPACES = OPENCODE_EXPERIMENTAL || truthy("OPENCODE_EXPERIMENTAL_WORKSPACES")
export const OPENCODE_EXPERIMENTAL_MARKDOWN = !falsy("OPENCODE_EXPERIMENTAL_MARKDOWN")
export const OPENCODE_EXPERIMENTAL_NEXT_PROMPT = truthy("OPENCODE_EXPERIMENTAL_NEXT_PROMPT")
export const OPENCODE_MODELS_URL = process.env["OPENCODE_MODELS_URL"]
export const OPENCODE_MODELS_PATH = process.env["OPENCODE_MODELS_PATH"]
export const OPENCODE_DISABLE_EMBEDDED_WEB_UI = truthy("OPENCODE_DISABLE_EMBEDDED_WEB_UI")

View File

@@ -11,6 +11,7 @@ import { Arborist } from "@npmcli/arborist"
export namespace Npm {
const log = Log.create({ service: "npm" })
const illegal = process.platform === "win32" ? new Set(["<", ">", ":", '"', "|", "?", "*"]) : undefined
export const InstallFailedError = NamedError.create(
"NpmInstallFailedError",
@@ -19,8 +20,13 @@ export namespace Npm {
}),
)
export function sanitize(pkg: string) {
if (!illegal) return pkg
return Array.from(pkg, (char) => (illegal.has(char) || char.charCodeAt(0) < 32 ? "_" : char)).join("")
}
function directory(pkg: string) {
return path.join(Global.Path.cache, "packages", pkg)
return path.join(Global.Path.cache, "packages", sanitize(pkg))
}
function resolveEntryPoint(name: string, dir: string) {

View File

@@ -1,5 +1,6 @@
import path from "path"
import { fileURLToPath, pathToFileURL } from "url"
import npa from "npm-package-arg"
import semver from "semver"
import { Npm } from "@/npm"
import { Filesystem } from "@/util/filesystem"
@@ -12,11 +13,24 @@ export function isDeprecatedPlugin(spec: string) {
return DEPRECATED_PLUGIN_PACKAGES.some((pkg) => spec.includes(pkg))
}
function parse(spec: string) {
try {
return npa(spec)
} catch {}
}
export function parsePluginSpecifier(spec: string) {
const lastAt = spec.lastIndexOf("@")
const pkg = lastAt > 0 ? spec.substring(0, lastAt) : spec
const version = lastAt > 0 ? spec.substring(lastAt + 1) : "latest"
return { pkg, version }
const hit = parse(spec)
if (hit?.type === "alias" && !hit.name) {
const sub = (hit as npa.AliasResult).subSpec
if (sub?.name) {
const version = !sub.rawSpec || sub.rawSpec === "*" ? "latest" : sub.rawSpec
return { pkg: sub.name, version }
}
}
if (!hit?.name) return { pkg: spec, version: "" }
if (hit.raw === hit.name) return { pkg: hit.name, version: "latest" }
return { pkg: hit.name, version: hit.rawSpec }
}
export type PluginSource = "file" | "npm"
@@ -190,9 +204,11 @@ export async function checkPluginCompatibility(target: string, opencodeVersion:
}
}
export async function resolvePluginTarget(spec: string, parsed = parsePluginSpecifier(spec)) {
export async function resolvePluginTarget(spec: string) {
if (isPathPluginSpec(spec)) return resolvePathPluginTarget(spec)
const result = await Npm.add(parsed.pkg + "@" + parsed.version)
const hit = parse(spec)
const pkg = hit?.name && hit.raw === hit.name ? `${hit.name}@latest` : spec
const result = await Npm.add(pkg)
return result.directory
}

View File

@@ -20,10 +20,12 @@ import { Plugin } from "../plugin"
import PROMPT_PLAN from "../session/prompt/plan.txt"
import BUILD_SWITCH from "../session/prompt/build-switch.txt"
import MAX_STEPS from "../session/prompt/max-steps.txt"
import PROMPT_SUGGEST_NEXT from "../session/prompt/suggest-next.txt"
import { ToolRegistry } from "../tool/registry"
import { Runner } from "@/effect/runner"
import { MCP } from "../mcp"
import { LSP } from "../lsp"
import { ReadTool } from "../tool/read"
import { FileTime } from "../file/time"
import { Flag } from "../flag/flag"
import { ulid } from "ulid"
@@ -32,11 +34,11 @@ import * as CrossSpawnSpawner from "@/effect/cross-spawn-spawner"
import * as Stream from "effect/Stream"
import { Command } from "../command"
import { pathToFileURL, fileURLToPath } from "url"
import { Config } from "../config/config"
import { ConfigMarkdown } from "../config/markdown"
import { SessionSummary } from "./summary"
import { NamedError } from "@opencode-ai/util/error"
import { SessionProcessor } from "./processor"
import { TaskTool } from "@/tool/task"
import { Tool } from "@/tool/tool"
import { Permission } from "@/permission"
import { SessionStatus } from "./status"
@@ -46,8 +48,6 @@ import { AppFileSystem } from "@/filesystem"
import { Truncate } from "@/tool/truncate"
import { decodeDataUrl } from "@/util/data-url"
import { Process } from "@/util/process"
import { run as read } from "@/tool/read"
import { output as subtaskOutput, run as subtask } from "@/tool/subtask"
import { Cause, Effect, Exit, Layer, Option, Scope, ServiceMap } from "effect"
import { InstanceState } from "@/effect/instance-state"
import { makeRuntime } from "@/effect/run-service"
@@ -102,7 +102,6 @@ export namespace SessionPrompt {
const spawner = yield* ChildProcessSpawner.ChildProcessSpawner
const scope = yield* Scope.Scope
const instruction = yield* Instruction.Service
const llm = yield* LLM.Service
const state = yield* InstanceState.make(
Effect.fn("SessionPrompt.state")(function* () {
@@ -220,29 +219,26 @@ export namespace SessionPrompt {
const msgs = onlySubtasks
? [{ role: "user" as const, content: subtasks.map((p) => p.prompt).join("\n") }]
: yield* MessageV2.toModelMessagesEffect(context, mdl)
const text = yield* llm
.stream({
const text = yield* Effect.promise(async (signal) => {
const result = await LLM.stream({
agent: ag,
user: firstInfo,
system: [],
small: true,
tools: {},
model: mdl,
abort: signal,
sessionID: input.session.id,
retries: 2,
messages: [{ role: "user", content: "Generate a title for this conversation:\n" }, ...msgs],
})
.pipe(
Stream.runFold(
() => "",
(text: string, event: LLM.Event) => (event.type === "text-delta" ? text + event.text : text),
),
)
return result.text
})
const cleaned = text
.replace(/<think>[\s\S]*?<\/think>\s*/g, "")
.split("\n")
.map((line: string) => line.trim())
.find((line: string) => line.length > 0)
.map((line) => line.trim())
.find((line) => line.length > 0)
if (!cleaned) return
const t = cleaned.length > 100 ? cleaned.substring(0, 97) + "..." : cleaned
yield* sessions
@@ -254,6 +250,80 @@ export namespace SessionPrompt {
)
})
const suggest = Effect.fn("SessionPrompt.suggest")(function* (input: {
session: Session.Info
sessionID: SessionID
message: MessageV2.WithParts
}) {
if (input.session.parentID) return
const message = input.message.info
if (message.role !== "assistant") return
if (message.error) return
if (!message.finish) return
if (["tool-calls", "unknown"].includes(message.finish)) return
if ((yield* status.get(input.sessionID)).type !== "idle") return
// Use the same model for prompt-cache hit on the conversation prefix
const model = yield* Effect.promise(async () =>
Provider.getModel(message.providerID, message.modelID).catch(() => undefined),
)
if (!model) return
const ag = yield* agents.get(message.agent ?? "code")
if (!ag) return
// Full message history so the cached KV from the main conversation is reused
const msgs = yield* MessageV2.filterCompactedEffect(input.sessionID)
const real = (item: MessageV2.WithParts) =>
item.info.role === "user" && !item.parts.every((part) => "synthetic" in part && part.synthetic)
const parent = msgs.find((item) => item.info.id === message.parentID)
const user = parent && real(parent) ? parent.info : msgs.findLast((item) => real(item))?.info
if (!user || user.role !== "user") return
// Rebuild system prompt identical to the main loop for cache hit
const skills = yield* Effect.promise(() => SystemPrompt.skills(ag))
const env = yield* Effect.promise(() => SystemPrompt.environment(model))
const instructions = yield* instruction.system().pipe(Effect.orDie)
const modelMsgs = yield* Effect.promise(() => MessageV2.toModelMessages(msgs, model))
const system = [...env, ...(skills ? [skills] : []), ...instructions]
const text = yield* Effect.promise(async (signal) => {
const result = await LLM.stream({
agent: ag,
user,
system,
small: false,
tools: {},
model,
abort: signal,
sessionID: input.sessionID,
retries: 1,
toolChoice: "none",
// Append suggestion instruction after the full conversation
messages: [...modelMsgs, { role: "user" as const, content: PROMPT_SUGGEST_NEXT }],
})
return result.text
})
const line = text
.replace(/<think>[\s\S]*?<\/think>\s*/g, "")
.split("\n")
.map((item) => item.trim())
.find((item) => item.length > 0)
?.replace(/^["'`]+|["'`]+$/g, "")
if (!line) return
const tag = line
.toUpperCase()
.replace(/[\s-]+/g, "_")
.replace(/[^A-Z_]/g, "")
if (tag === "NO_SUGGESTION") return
const suggestion = line.length > 110 ? line.slice(0, 107) + "..." : line
if ((yield* status.get(input.sessionID)).type !== "idle") return
yield* status.suggest(input.sessionID, suggestion)
})
const insertReminders = Effect.fn("SessionPrompt.insertReminders")(function* (input: {
messages: MessageV2.WithParts[]
agent: Agent.Info
@@ -402,42 +472,41 @@ NOTE: At any point in time through this workflow you should feel free to ask the
using _ = log.time("resolveTools")
const tools: Record<string, AITool> = {}
const context = (args: any, options: ToolExecutionOptions): Tool.Context =>
Tool.context({
abort: options.abortSignal,
callID: options.toolCallId,
sessionID: input.session.id,
messageID: input.processor.message.id,
extra: { model: input.model, bypassAgentCheck: input.bypassAgentCheck },
agent: input.agent.name,
messages: input.messages,
metadata: (val) =>
Effect.runPromise(
Effect.gen(function* () {
const match = input.processor.partFromToolCall(options.toolCallId)
if (!match || !["running", "pending"].includes(match.state.status)) return
yield* sessions.updatePart({
...match,
state: {
title: val.title,
metadata: val.metadata,
status: "running",
input: args,
time: { start: Date.now() },
},
})
}),
),
ask: (req) =>
Effect.runPromise(
permission.ask({
...req,
sessionID: input.session.id,
tool: { messageID: input.processor.message.id, callID: options.toolCallId },
ruleset: Permission.merge(input.agent.permission, input.session.permission ?? []),
}),
),
})
const context = (args: any, options: ToolExecutionOptions): Tool.Context => ({
sessionID: input.session.id,
abort: options.abortSignal!,
messageID: input.processor.message.id,
callID: options.toolCallId,
extra: { model: input.model, bypassAgentCheck: input.bypassAgentCheck },
agent: input.agent.name,
messages: input.messages,
metadata: (val) =>
Effect.runPromise(
Effect.gen(function* () {
const match = input.processor.partFromToolCall(options.toolCallId)
if (!match || !["running", "pending"].includes(match.state.status)) return
yield* sessions.updatePart({
...match,
state: {
title: val.title,
metadata: val.metadata,
status: "running",
input: args,
time: { start: Date.now() },
},
})
}),
),
ask: (req) =>
Effect.runPromise(
permission.ask({
...req,
sessionID: input.session.id,
tool: { messageID: input.processor.message.id, callID: options.toolCallId },
ruleset: Permission.merge(input.agent.permission, input.session.permission ?? []),
}),
),
})
for (const item of yield* registry.tools(
{ modelID: ModelID.make(input.model.api.id), providerID: input.model.providerID },
@@ -561,20 +630,13 @@ NOTE: At any point in time through this workflow you should feel free to ask the
model: Provider.Model
lastUser: MessageV2.User
sessionID: SessionID
session: Session.Info
msgs: MessageV2.WithParts[]
}) {
const { task, model, lastUser, sessionID } = input
const { task, model, lastUser, sessionID, session, msgs } = input
const ctx = yield* InstanceState.context
const taskAgent = yield* agents.get(task.agent)
if (!taskAgent) {
const available = (yield* agents.list()).filter((a) => !a.hidden).map((a) => a.name)
const hint = available.length ? ` Available agents: ${available.join(", ")}` : ""
const error = new NamedError.Unknown({ message: `Agent not found: "${task.agent}".${hint}` })
yield* bus.publish(Session.Event.Error, { sessionID, error: error.toObject() })
throw error
}
const taskTool = yield* Effect.promise(() => registry.named.task.init())
const taskModel = task.model ? yield* getModel(task.model.providerID, task.model.modelID, sessionID) : model
const taskRef = { providerID: taskModel.providerID, modelID: taskModel.id }
const assistantMessage: MessageV2.Assistant = yield* sessions.updateMessage({
id: MessageID.ascending(),
role: "assistant",
@@ -614,71 +676,57 @@ NOTE: At any point in time through this workflow you should feel free to ask the
subagent_type: task.agent,
command: task.command,
}
yield* plugin.trigger(
"tool.execute.before",
{ tool: "task", sessionID, callID: part.callID },
{ args: taskArgs },
)
yield* plugin.trigger("tool.execute.before", { tool: "task", sessionID, callID: part.id }, { args: taskArgs })
const taskAgent = yield* agents.get(task.agent)
if (!taskAgent) {
const available = (yield* agents.list()).filter((a) => !a.hidden).map((a) => a.name)
const hint = available.length ? ` Available agents: ${available.join(", ")}` : ""
const error = new NamedError.Unknown({ message: `Agent not found: "${task.agent}".${hint}` })
yield* bus.publish(Session.Event.Error, { sessionID, error: error.toObject() })
throw error
}
let child: SessionID | undefined
let error: Error | undefined
const result = yield* subtask(
{
cfg: Effect.promise(() => Config.get()),
get: (taskID) => sessions.get(SessionID.make(taskID)).pipe(Effect.catch(() => Effect.succeed(undefined))),
create: (input) => sessions.create(input),
resolve: resolvePromptParts,
prompt: (input) => prompt({ ...input, messageID: MessageID.ascending() }),
},
{
parentID: sessionID,
description: task.description,
prompt: task.prompt,
agent: taskAgent,
model: taskRef,
start(sessionID, model) {
child = sessionID
const metadata = { sessionId: sessionID, model }
return Effect.runPromise(
sessions.updatePart({
...part,
state: {
status: "running",
input: part.state.input,
time: part.state.status === "running" ? part.state.time : { start: Date.now() },
title: task.description,
metadata,
},
} satisfies MessageV2.ToolPart),
).then((next) => {
part = next
})
},
},
const result = yield* Effect.promise((signal) =>
taskTool
.execute(taskArgs, {
agent: task.agent,
messageID: assistantMessage.id,
sessionID,
abort: signal,
callID: part.callID,
extra: { bypassAgentCheck: true },
messages: msgs,
metadata(val: { title?: string; metadata?: Record<string, any> }) {
return Effect.runPromise(
Effect.gen(function* () {
part = yield* sessions.updatePart({
...part,
type: "tool",
state: { ...part.state, ...val },
} satisfies MessageV2.ToolPart)
}),
)
},
ask(req: any) {
return Effect.runPromise(
permission.ask({
...req,
sessionID,
ruleset: Permission.merge(taskAgent.permission, session.permission ?? []),
}),
)
},
})
.catch((e) => {
error = e instanceof Error ? e : new Error(String(e))
log.error("subtask execution failed", { error, agent: task.agent, description: task.description })
return undefined
}),
).pipe(
Effect.flatMap((sub) =>
truncate.output(subtaskOutput(sub.sessionID, sub.text), {}).pipe(
Effect.map((truncated) => ({
title: task.description,
metadata: {
sessionId: sub.sessionID,
model: sub.model,
truncated: truncated.truncated,
...(truncated.truncated && { outputPath: truncated.outputPath }),
},
output: truncated.content,
})),
),
),
Effect.catchCause((cause) => {
const err = Cause.squash(cause)
error = err instanceof Error ? err : new Error(String(err))
log.error("subtask execution failed", { error, agent: task.agent, description: task.description })
return Effect.succeed(undefined)
}),
Effect.onInterrupt(() =>
Effect.gen(function* () {
if (child) yield* cancel(child)
assistantMessage.finish = "tool-calls"
assistantMessage.time.completed = Date.now()
yield* sessions.updateMessage(assistantMessage)
@@ -698,9 +746,16 @@ NOTE: At any point in time through this workflow you should feel free to ask the
),
)
const attachments = result?.attachments?.map((attachment) => ({
...attachment,
id: PartID.ascending(),
sessionID,
messageID: assistantMessage.id,
}))
yield* plugin.trigger(
"tool.execute.after",
{ tool: "task", sessionID, callID: part.callID, args: taskArgs },
{ tool: "task", sessionID, callID: part.id, args: taskArgs },
result,
)
@@ -717,6 +772,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
title: result.title,
metadata: result.metadata,
output: result.output,
attachments,
time: { ...part.state.time, end: Date.now() },
},
} satisfies MessageV2.ToolPart)
@@ -759,7 +815,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
} satisfies MessageV2.TextPart)
})
const shellImpl = Effect.fn("SessionPrompt.shellImpl")(function* (input: ShellInput) {
const shellImpl = Effect.fn("SessionPrompt.shellImpl")(function* (input: ShellInput, signal: AbortSignal) {
const ctx = yield* InstanceState.context
const session = yield* sessions.get(input.sessionID)
if (session.revert) {
@@ -1092,15 +1148,6 @@ NOTE: At any point in time through this workflow you should feel free to ask the
log.info("file", { mime: part.mime })
const filepath = fileURLToPath(part.url)
if (yield* fsys.isDir(filepath)) part.mime = "application/x-directory"
const readCtx = Tool.context({
sessionID: input.sessionID,
agent: info.agent,
messageID: info.id,
extra: { bypassCwdCheck: true },
messages: [],
metadata: () => {},
ask: async () => {},
})
if (part.mime === "text/plain") {
let offset: number | undefined
@@ -1138,13 +1185,29 @@ NOTE: At any point in time through this workflow you should feel free to ask the
text: `Called the Read tool with the following input: ${JSON.stringify(args)}`,
},
]
const readResult = yield* read(
{ fs: fsys, instruction, lsp, time: filetime, scope },
args,
readCtx,
).pipe(Effect.exit)
if (Exit.isSuccess(readResult)) {
const result = readResult.value
const read = yield* Effect.promise(() => registry.named.read.init()).pipe(
Effect.flatMap((t) =>
provider.getModel(info.model.providerID, info.model.modelID).pipe(
Effect.flatMap((mdl) =>
Effect.promise(() =>
t.execute(args, {
sessionID: input.sessionID,
abort: new AbortController().signal,
agent: input.agent!,
messageID: info.id,
extra: { bypassCwdCheck: true, model: mdl },
messages: [],
metadata: async () => {},
ask: async () => {},
}),
),
),
),
),
Effect.exit,
)
if (Exit.isSuccess(read)) {
const result = read.value
pieces.push({
messageID: info.id,
sessionID: input.sessionID,
@@ -1157,7 +1220,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
...result.attachments.map((a) => ({
...a,
synthetic: true,
filename: "filename" in a && typeof a.filename === "string" ? a.filename : part.filename,
filename: a.filename ?? part.filename,
messageID: info.id,
sessionID: input.sessionID,
})),
@@ -1166,7 +1229,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
pieces.push({ ...part, messageID: info.id, sessionID: input.sessionID })
}
} else {
const error = Cause.squash(readResult.cause)
const error = Cause.squash(read.cause)
log.error("failed to read file", { error })
const message = error instanceof Error ? error.message : String(error)
yield* bus.publish(Session.Event.Error, {
@@ -1186,8 +1249,21 @@ NOTE: At any point in time through this workflow you should feel free to ask the
if (part.mime === "application/x-directory") {
const args = { filePath: filepath }
const result = yield* read({ fs: fsys, instruction, lsp, time: filetime, scope }, args, readCtx).pipe(
Effect.orDie,
const result = yield* Effect.promise(() => registry.named.read.init()).pipe(
Effect.flatMap((t) =>
Effect.promise(() =>
t.execute(args, {
sessionID: input.sessionID,
abort: new AbortController().signal,
agent: input.agent!,
messageID: info.id,
extra: { bypassCwdCheck: true },
messages: [],
metadata: async () => {},
ask: async () => {},
}),
),
),
)
return [
{
@@ -1318,7 +1394,13 @@ NOTE: At any point in time through this workflow you should feel free to ask the
}
if (input.noReply === true) return message
return yield* loop({ sessionID: input.sessionID })
const result = yield* loop({ sessionID: input.sessionID })
yield* suggest({
session,
sessionID: input.sessionID,
message: result,
}).pipe(Effect.ignore, Effect.forkIn(scope))
return result
},
)
@@ -1331,7 +1413,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
}
if (latest) return latest
throw new Error("Impossible")
}).pipe(Effect.orDie)
})
const runLoop: (sessionID: SessionID) => Effect.Effect<MessageV2.WithParts> = Effect.fn("SessionPrompt.run")(
function* (sessionID: SessionID) {
@@ -1392,7 +1474,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
const task = tasks.pop()
if (task?.type === "subtask") {
yield* handleSubtask({ task, model, lastUser, sessionID })
yield* handleSubtask({ task, model, lastUser, sessionID, session, msgs })
continue
}
@@ -1576,7 +1658,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
function* (input: ShellInput) {
const s = yield* InstanceState.get(state)
const runner = getRunner(s.runners, input.sessionID)
return yield* runner.startShell(shellImpl(input))
return yield* runner.startShell((signal) => shellImpl(input, signal))
},
)
@@ -1721,7 +1803,6 @@ NOTE: At any point in time through this workflow you should feel free to ask the
Layer.provide(FileTime.defaultLayer),
Layer.provide(ToolRegistry.defaultLayer),
Layer.provide(Truncate.layer),
Layer.provide(LLM.defaultLayer),
Layer.provide(Provider.defaultLayer),
Layer.provide(Instruction.defaultLayer),
Layer.provide(AppFileSystem.defaultLayer),

View File

@@ -0,0 +1,21 @@
You are generating a suggested next user message for the current conversation.
Goal:
- Suggest a useful next step that keeps momentum.
Rules:
- Output exactly one line, 110 characters max. Be concise.
- Write as the user speaking to the assistant (for example: "Can you...", "Help me...", "Let's...").
- Match the user's tone and language; keep it natural and human.
- Prefer a concrete action over a broad question.
- If the conversation is vague or small-talk, steer toward a practical starter request.
- If there is no meaningful or appropriate next step to suggest, output exactly: NO_SUGGESTION
- Avoid corporate or robotic phrasing.
- Avoid asking multiple discovery questions in one sentence.
- Do not include quotes, labels, markdown, or explanations.
Examples:
- Greeting context -> "Can you scan this repo and suggest the best first task to tackle?"
- Bug-fix context -> "Can you reproduce this bug and propose the smallest safe fix?"
- Feature context -> "Let's implement this incrementally; start with the MVP version first."
- Conversation is complete -> "NO_SUGGESTION"

View File

@@ -11,6 +11,7 @@ export namespace SessionStatus {
.union([
z.object({
type: z.literal("idle"),
suggestion: z.string().optional(),
}),
z.object({
type: z.literal("retry"),
@@ -48,6 +49,7 @@ export namespace SessionStatus {
readonly get: (sessionID: SessionID) => Effect.Effect<Info>
readonly list: () => Effect.Effect<Map<SessionID, Info>>
readonly set: (sessionID: SessionID, status: Info) => Effect.Effect<void>
readonly suggest: (sessionID: SessionID, suggestion: string) => Effect.Effect<void>
}
export class Service extends ServiceMap.Service<Service, Interface>()("@opencode/SessionStatus") {}
@@ -81,7 +83,17 @@ export namespace SessionStatus {
data.set(sessionID, status)
})
return Service.of({ get, list, set })
const suggest = Effect.fn("SessionStatus.suggest")(function* (sessionID: SessionID, suggestion: string) {
const data = yield* InstanceState.get(state)
const current = data.get(sessionID)
if (current && current.type !== "idle") return
const status: Info = { type: "idle", suggestion }
// only publish Status so the TUI sees the suggestion;
// skip Event.Idle to avoid spurious plugin notifications
yield* bus.publish(Event.Status, { sessionID, status })
})
return Service.of({ get, list, set, suggest })
}),
)
@@ -99,4 +111,8 @@ export namespace SessionStatus {
export async function set(sessionID: SessionID, status: Info) {
return runPromise((svc) => svc.set(sessionID, status))
}
export async function suggest(sessionID: SessionID, suggestion: string) {
return runPromise((svc) => svc.suggest(sessionID, suggestion))
}
}

View File

@@ -11,9 +11,7 @@ type Options = {
kind?: Kind
}
type Ctx = Pick<Tool.Context, "ask">
export async function assertExternalDirectory(ctx: Ctx, target?: string, options?: Options) {
export async function assertExternalDirectory(ctx: Tool.Context, target?: string, options?: Options) {
if (!target) return
if (options?.bypass) return
@@ -40,7 +38,7 @@ export async function assertExternalDirectory(ctx: Ctx, target?: string, options
}
export const assertExternalDirectoryEffect = Effect.fn("Tool.assertExternalDirectory")(function* (
ctx: Ctx,
ctx: Tool.Context,
target?: string,
options?: Options,
) {

View File

@@ -25,197 +25,6 @@ const parameters = z.object({
limit: z.coerce.number().describe("The maximum number of lines to read (defaults to 2000)").optional(),
})
type Ctx = Omit<Tool.Context, "abort">
type Deps = {
fs: AppFileSystem.Interface
instruction: Instruction.Interface
lsp: LSP.Interface
time: FileTime.Interface
scope: Scope.Scope
}
export const run = Effect.fn("ReadTool.run")(function* (deps: Deps, params: z.infer<typeof parameters>, ctx: Ctx) {
const miss = Effect.fn("ReadTool.miss")(function* (filepath: string) {
const dir = path.dirname(filepath)
const base = path.basename(filepath)
const items = yield* deps.fs.readDirectory(dir).pipe(
Effect.map((items) =>
items
.filter(
(item) =>
item.toLowerCase().includes(base.toLowerCase()) || base.toLowerCase().includes(item.toLowerCase()),
)
.map((item) => path.join(dir, item))
.slice(0, 3),
),
Effect.catch(() => Effect.succeed([] as string[])),
)
if (items.length > 0) {
return yield* Effect.fail(
new Error(`File not found: ${filepath}\n\nDid you mean one of these?\n${items.join("\n")}`),
)
}
return yield* Effect.fail(new Error(`File not found: ${filepath}`))
})
const list = Effect.fn("ReadTool.list")(function* (filepath: string) {
const items = yield* deps.fs.readDirectoryEntries(filepath)
return yield* Effect.forEach(
items,
Effect.fnUntraced(function* (item) {
if (item.type === "directory") return item.name + "/"
if (item.type !== "symlink") return item.name
const target = yield* deps.fs
.stat(path.join(filepath, item.name))
.pipe(Effect.catch(() => Effect.succeed(undefined)))
if (target?.type === "Directory") return item.name + "/"
return item.name
}),
{ concurrency: "unbounded" },
).pipe(Effect.map((items: string[]) => items.sort((a, b) => a.localeCompare(b))))
})
const warm = Effect.fn("ReadTool.warm")(function* (filepath: string, sessionID: Ctx["sessionID"]) {
yield* deps.lsp.touchFile(filepath, false).pipe(Effect.ignore, Effect.forkIn(deps.scope))
yield* deps.time.read(sessionID, filepath)
})
if (params.offset !== undefined && params.offset < 1) {
return yield* Effect.fail(new Error("offset must be greater than or equal to 1"))
}
let filepath = params.filePath
if (!path.isAbsolute(filepath)) {
filepath = path.resolve(Instance.directory, filepath)
}
if (process.platform === "win32") {
filepath = AppFileSystem.normalizePath(filepath)
}
const title = path.relative(Instance.worktree, filepath)
const stat = yield* deps.fs.stat(filepath).pipe(
Effect.catchIf(
(err) => "reason" in err && err.reason._tag === "NotFound",
() => Effect.succeed(undefined),
),
)
yield* assertExternalDirectoryEffect(ctx, filepath, {
bypass: Boolean(ctx.extra?.["bypassCwdCheck"]),
kind: stat?.type === "Directory" ? "directory" : "file",
})
yield* Effect.promise(() =>
ctx.ask({
permission: "read",
patterns: [filepath],
always: ["*"],
metadata: {},
}),
)
if (!stat) return yield* miss(filepath)
if (stat.type === "Directory") {
const items = yield* list(filepath)
const limit = params.limit ?? DEFAULT_READ_LIMIT
const offset = params.offset ?? 1
const start = offset - 1
const sliced = items.slice(start, start + limit)
const truncated = start + sliced.length < items.length
return {
title,
output: [
`<path>${filepath}</path>`,
`<type>directory</type>`,
`<entries>`,
sliced.join("\n"),
truncated
? `\n(Showing ${sliced.length} of ${items.length} entries. Use 'offset' parameter to read beyond entry ${offset + sliced.length})`
: `\n(${items.length} entries)`,
`</entries>`,
].join("\n"),
metadata: {
preview: sliced.slice(0, 20).join("\n"),
truncated,
loaded: [] as string[],
},
}
}
const loaded = yield* deps.instruction.resolve(ctx.messages, filepath, ctx.messageID)
const mime = AppFileSystem.mimeType(filepath)
const isImage = mime.startsWith("image/") && mime !== "image/svg+xml" && mime !== "image/vnd.fastbidsheet"
const isPdf = mime === "application/pdf"
if (isImage || isPdf) {
const msg = `${isImage ? "Image" : "PDF"} read successfully`
return {
title,
output: msg,
metadata: {
preview: msg,
truncated: false,
loaded: loaded.map((item) => item.filepath),
},
attachments: [
{
type: "file" as const,
mime,
url: `data:${mime};base64,${Buffer.from(yield* deps.fs.readFile(filepath)).toString("base64")}`,
},
],
}
}
if (yield* Effect.promise(() => isBinaryFile(filepath, Number(stat.size)))) {
return yield* Effect.fail(new Error(`Cannot read binary file: ${filepath}`))
}
const file = yield* Effect.promise(() =>
lines(filepath, { limit: params.limit ?? DEFAULT_READ_LIMIT, offset: params.offset ?? 1 }),
)
if (file.count < file.offset && !(file.count === 0 && file.offset === 1)) {
return yield* Effect.fail(new Error(`Offset ${file.offset} is out of range for this file (${file.count} lines)`))
}
let output = [`<path>${filepath}</path>`, `<type>file</type>`, "<content>"].join("\n")
output += file.raw.map((line, i) => `${i + file.offset}: ${line}`).join("\n")
const last = file.offset + file.raw.length - 1
const next = last + 1
const truncated = file.more || file.cut
if (file.cut) {
output += `\n\n(Output capped at ${MAX_BYTES_LABEL}. Showing lines ${file.offset}-${last}. Use offset=${next} to continue.)`
} else if (file.more) {
output += `\n\n(Showing lines ${file.offset}-${last} of ${file.count}. Use offset=${next} to continue.)`
} else {
output += `\n\n(End of file - total ${file.count} lines)`
}
output += "\n</content>"
yield* warm(filepath, ctx.sessionID)
if (loaded.length > 0) {
output += `\n\n<system-reminder>\n${loaded.map((item) => item.content).join("\n\n")}\n</system-reminder>`
}
return {
title,
output,
metadata: {
preview: file.raw.slice(0, 20).join("\n"),
truncated,
loaded: loaded.map((item) => item.filepath),
},
}
})
export const ReadTool = Tool.defineEffect(
"read",
Effect.gen(function* () {
@@ -224,13 +33,195 @@ export const ReadTool = Tool.defineEffect(
const lsp = yield* LSP.Service
const time = yield* FileTime.Service
const scope = yield* Scope.Scope
const deps = { fs, instruction, lsp, time, scope } satisfies Deps
const miss = Effect.fn("ReadTool.miss")(function* (filepath: string) {
const dir = path.dirname(filepath)
const base = path.basename(filepath)
const items = yield* fs.readDirectory(dir).pipe(
Effect.map((items) =>
items
.filter(
(item) =>
item.toLowerCase().includes(base.toLowerCase()) || base.toLowerCase().includes(item.toLowerCase()),
)
.map((item) => path.join(dir, item))
.slice(0, 3),
),
Effect.catch(() => Effect.succeed([] as string[])),
)
if (items.length > 0) {
return yield* Effect.fail(
new Error(`File not found: ${filepath}\n\nDid you mean one of these?\n${items.join("\n")}`),
)
}
return yield* Effect.fail(new Error(`File not found: ${filepath}`))
})
const list = Effect.fn("ReadTool.list")(function* (filepath: string) {
const items = yield* fs.readDirectoryEntries(filepath)
return yield* Effect.forEach(
items,
Effect.fnUntraced(function* (item) {
if (item.type === "directory") return item.name + "/"
if (item.type !== "symlink") return item.name
const target = yield* fs
.stat(path.join(filepath, item.name))
.pipe(Effect.catch(() => Effect.succeed(undefined)))
if (target?.type === "Directory") return item.name + "/"
return item.name
}),
{ concurrency: "unbounded" },
).pipe(Effect.map((items: string[]) => items.sort((a, b) => a.localeCompare(b))))
})
const warm = Effect.fn("ReadTool.warm")(function* (filepath: string, sessionID: Tool.Context["sessionID"]) {
yield* lsp.touchFile(filepath, false).pipe(Effect.ignore, Effect.forkIn(scope))
yield* time.read(sessionID, filepath)
})
const run = Effect.fn("ReadTool.execute")(function* (params: z.infer<typeof parameters>, ctx: Tool.Context) {
if (params.offset !== undefined && params.offset < 1) {
return yield* Effect.fail(new Error("offset must be greater than or equal to 1"))
}
let filepath = params.filePath
if (!path.isAbsolute(filepath)) {
filepath = path.resolve(Instance.directory, filepath)
}
if (process.platform === "win32") {
filepath = AppFileSystem.normalizePath(filepath)
}
const title = path.relative(Instance.worktree, filepath)
const stat = yield* fs.stat(filepath).pipe(
Effect.catchIf(
(err) => "reason" in err && err.reason._tag === "NotFound",
() => Effect.succeed(undefined),
),
)
yield* assertExternalDirectoryEffect(ctx, filepath, {
bypass: Boolean(ctx.extra?.["bypassCwdCheck"]),
kind: stat?.type === "Directory" ? "directory" : "file",
})
yield* Effect.promise(() =>
ctx.ask({
permission: "read",
patterns: [filepath],
always: ["*"],
metadata: {},
}),
)
if (!stat) return yield* miss(filepath)
if (stat.type === "Directory") {
const items = yield* list(filepath)
const limit = params.limit ?? DEFAULT_READ_LIMIT
const offset = params.offset ?? 1
const start = offset - 1
const sliced = items.slice(start, start + limit)
const truncated = start + sliced.length < items.length
return {
title,
output: [
`<path>${filepath}</path>`,
`<type>directory</type>`,
`<entries>`,
sliced.join("\n"),
truncated
? `\n(Showing ${sliced.length} of ${items.length} entries. Use 'offset' parameter to read beyond entry ${offset + sliced.length})`
: `\n(${items.length} entries)`,
`</entries>`,
].join("\n"),
metadata: {
preview: sliced.slice(0, 20).join("\n"),
truncated,
loaded: [] as string[],
},
}
}
const loaded = yield* instruction.resolve(ctx.messages, filepath, ctx.messageID)
const mime = AppFileSystem.mimeType(filepath)
const isImage = mime.startsWith("image/") && mime !== "image/svg+xml" && mime !== "image/vnd.fastbidsheet"
const isPdf = mime === "application/pdf"
if (isImage || isPdf) {
const msg = `${isImage ? "Image" : "PDF"} read successfully`
return {
title,
output: msg,
metadata: {
preview: msg,
truncated: false,
loaded: loaded.map((item) => item.filepath),
},
attachments: [
{
type: "file" as const,
mime,
url: `data:${mime};base64,${Buffer.from(yield* fs.readFile(filepath)).toString("base64")}`,
},
],
}
}
if (yield* Effect.promise(() => isBinaryFile(filepath, Number(stat.size)))) {
return yield* Effect.fail(new Error(`Cannot read binary file: ${filepath}`))
}
const file = yield* Effect.promise(() =>
lines(filepath, { limit: params.limit ?? DEFAULT_READ_LIMIT, offset: params.offset ?? 1 }),
)
if (file.count < file.offset && !(file.count === 0 && file.offset === 1)) {
return yield* Effect.fail(
new Error(`Offset ${file.offset} is out of range for this file (${file.count} lines)`),
)
}
let output = [`<path>${filepath}</path>`, `<type>file</type>`, "<content>" + "\n"].join("\n")
output += file.raw.map((line, i) => `${i + file.offset}: ${line}`).join("\n")
const last = file.offset + file.raw.length - 1
const next = last + 1
const truncated = file.more || file.cut
if (file.cut) {
output += `\n\n(Output capped at ${MAX_BYTES_LABEL}. Showing lines ${file.offset}-${last}. Use offset=${next} to continue.)`
} else if (file.more) {
output += `\n\n(Showing lines ${file.offset}-${last} of ${file.count}. Use offset=${next} to continue.)`
} else {
output += `\n\n(End of file - total ${file.count} lines)`
}
output += "\n</content>"
yield* warm(filepath, ctx.sessionID)
if (loaded.length > 0) {
output += `\n\n<system-reminder>\n${loaded.map((item) => item.content).join("\n\n")}\n</system-reminder>`
}
return {
title,
output,
metadata: {
preview: file.raw.slice(0, 20).join("\n"),
truncated,
loaded: loaded.map((item) => item.filepath),
},
}
})
return {
description: DESCRIPTION,
parameters,
async execute(params: z.infer<typeof parameters>, ctx) {
return Effect.runPromise(run(deps, params, ctx).pipe(Effect.orDie))
return Effect.runPromise(run(params, ctx).pipe(Effect.orDie))
},
}
}),

View File

@@ -1,102 +0,0 @@
import type { Agent } from "../agent/agent"
import { Config } from "../config/config"
import { Session } from "../session"
import { SessionPrompt } from "../session/prompt"
import { SessionID } from "../session/schema"
import type { ModelID, ProviderID } from "../provider/schema"
import { Effect } from "effect"
type Ref = {
providerID: ProviderID
modelID: ModelID
}
type Parts = Awaited<ReturnType<typeof SessionPrompt.resolvePromptParts>>
type Reply = Awaited<ReturnType<typeof SessionPrompt.prompt>>
type Deps = {
cfg: Effect.Effect<Config.Info>
get: (taskID: string) => Effect.Effect<Session.Info | undefined>
create: (input: { parentID: SessionID; title: string }) => Effect.Effect<Session.Info>
resolve: (prompt: string) => Effect.Effect<Parts>
prompt: (input: {
sessionID: SessionID
model: Ref
agent: string
tools: Record<string, boolean>
parts: Parts
}) => Effect.Effect<Reply>
}
type Input = {
parentID: SessionID
taskID?: string
description: string
prompt: string
agent: Agent.Info
model: Ref
abort?: AbortSignal
cancel?: (sessionID: SessionID) => Promise<void> | void
start?: (sessionID: SessionID, model: Ref) => Promise<void> | void
}
export function tools(agent: Agent.Info, cfg: Config.Info) {
const task = agent.permission.some((rule) => rule.permission === "task")
const todo = agent.permission.some((rule) => rule.permission === "todowrite")
return {
...(todo ? {} : { todowrite: false }),
...(task ? {} : { task: false }),
...Object.fromEntries((cfg.experimental?.primary_tools ?? []).map((tool) => [tool, false])),
}
}
export function output(sessionID: SessionID, text: string) {
return [
`task_id: ${sessionID} (for resuming to continue this task if needed)`,
"",
"<task_result>",
text,
"</task_result>",
].join("\n")
}
export const run = Effect.fn("Subtask.run")(function* (deps: Deps, input: Input) {
const cfg = yield* deps.cfg
const model = input.agent.model ?? input.model
const session = yield* Effect.uninterruptibleMask((restore) =>
Effect.gen(function* () {
const found = input.taskID ? yield* restore(deps.get(input.taskID)) : undefined
const session = found
? found
: yield* restore(
deps.create({
parentID: input.parentID,
title: input.description + ` (@${input.agent.name} subagent)`,
}),
)
const start = input.start?.(session.id, model)
if (start) yield* Effect.promise(() => Promise.resolve(start))
return session
}),
)
if (input.abort?.aborted) {
const cancel = input.cancel?.(session.id)
if (cancel) yield* Effect.promise(() => Promise.resolve(cancel))
}
const result = yield* deps.prompt({
sessionID: session.id,
model,
agent: input.agent.name,
tools: tools(input.agent, cfg),
parts: yield* deps.resolve(input.prompt),
})
return {
sessionID: session.id,
model,
text: result.parts.findLast((part) => part.type === "text")?.text ?? "",
}
})

View File

@@ -1,17 +1,16 @@
import { Tool } from "./tool"
import DESCRIPTION from "./task.txt"
import z from "zod"
import { Effect } from "effect"
import { Config } from "../config/config"
import { Session } from "../session"
import { SessionPrompt } from "../session/prompt"
import { SessionID, MessageID } from "../session/schema"
import { MessageV2 } from "../session/message-v2"
import { Identifier } from "../id/id"
import { Agent } from "../agent/agent"
import type { SessionID } from "../session/schema"
import { MessageID, SessionID as SessionRef } from "../session/schema"
import { SessionPrompt } from "../session/prompt"
import { iife } from "@/util/iife"
import { defer } from "@/util/defer"
import { Config } from "../config/config"
import { Permission } from "@/permission"
import { output, run } from "./subtask"
const parameters = z.object({
description: z.string().describe("A short (3-5 words) description of the task"),
@@ -46,6 +45,8 @@ export const TaskTool = Tool.define("task", async (ctx) => {
description,
parameters,
async execute(params: z.infer<typeof parameters>, ctx) {
const config = await Config.get()
// Skip permission check when user explicitly invoked via @ or command subtask
if (!ctx.extra?.bypassAgentCheck) {
await ctx.ask({
@@ -61,60 +62,104 @@ export const TaskTool = Tool.define("task", async (ctx) => {
const agent = await Agent.get(params.subagent_type)
if (!agent) throw new Error(`Unknown agent type: ${params.subagent_type} is not a valid agent type`)
const hasTaskPermission = agent.permission.some((rule) => rule.permission === "task")
const hasTodoWritePermission = agent.permission.some((rule) => rule.permission === "todowrite")
const session = await iife(async () => {
if (params.task_id) {
const found = await Session.get(SessionID.make(params.task_id)).catch(() => {})
if (found) return found
}
return await Session.create({
parentID: ctx.sessionID,
title: params.description + ` (@${agent.name} subagent)`,
permission: [
...(hasTodoWritePermission
? []
: [
{
permission: "todowrite" as const,
pattern: "*" as const,
action: "deny" as const,
},
]),
...(hasTaskPermission
? []
: [
{
permission: "task" as const,
pattern: "*" as const,
action: "deny" as const,
},
]),
...(config.experimental?.primary_tools?.map((t) => ({
pattern: "*",
action: "allow" as const,
permission: t,
})) ?? []),
],
})
})
const msg = await MessageV2.get({ sessionID: ctx.sessionID, messageID: ctx.messageID })
if (msg.info.role !== "assistant") throw new Error("Not an assistant message")
let child: SessionID | undefined
const cancel = () => {
if (!child) return
SessionPrompt.cancel(child)
const model = agent.model ?? {
modelID: msg.info.modelID,
providerID: msg.info.providerID,
}
ctx.metadata({
title: params.description,
metadata: {
sessionId: session.id,
model,
},
})
const messageID = MessageID.ascending()
function cancel() {
SessionPrompt.cancel(session.id)
}
ctx.abort.addEventListener("abort", cancel)
using _ = defer(() => ctx.abort.removeEventListener("abort", cancel))
const promptParts = await SessionPrompt.resolvePromptParts(params.prompt)
const task = await Effect.runPromise(
run(
{
cfg: Effect.promise(() => Config.get()),
get: (taskID) => Effect.promise(() => Session.get(SessionRef.make(taskID)).catch(() => undefined)),
create: (input) => Effect.promise(() => Session.create(input)),
resolve: (prompt) => Effect.promise(() => SessionPrompt.resolvePromptParts(prompt)),
prompt: (input) =>
Effect.promise(() => SessionPrompt.prompt({ ...input, messageID: MessageID.ascending() })),
},
{
parentID: ctx.sessionID,
taskID: params.task_id,
description: params.description,
prompt: params.prompt,
agent,
abort: ctx.abort,
cancel: SessionPrompt.cancel,
model: {
modelID: msg.info.modelID,
providerID: msg.info.providerID,
},
start(sessionID, model) {
child = sessionID
ctx.metadata({
title: params.description,
metadata: {
sessionId: sessionID,
model,
},
})
},
},
),
)
const result = await SessionPrompt.prompt({
messageID,
sessionID: session.id,
model: {
modelID: model.modelID,
providerID: model.providerID,
},
agent: agent.name,
tools: {
...(hasTodoWritePermission ? {} : { todowrite: false }),
...(hasTaskPermission ? {} : { task: false }),
...Object.fromEntries((config.experimental?.primary_tools ?? []).map((t) => [t, false])),
},
parts: promptParts,
})
const text = result.parts.findLast((x) => x.type === "text")?.text ?? ""
const output = [
`task_id: ${session.id} (for resuming to continue this task if needed)`,
"",
"<task_result>",
text,
"</task_result>",
].join("\n")
return {
title: params.description,
metadata: {
sessionId: task.sessionID,
model: task.model,
sessionId: session.id,
model,
},
output: output(task.sessionID, task.text),
output,
}
},
}

View File

@@ -26,20 +26,6 @@ export namespace Tool {
metadata(input: { title?: string; metadata?: M }): void
ask(input: Omit<Permission.Request, "id" | "sessionID" | "tool">): Promise<void>
}
export function context<M extends Metadata = Metadata>(
input: Omit<Context<M>, "abort" | "callID"> & {
abort?: AbortSignal
callID?: string
},
): Context<M> {
return {
...input,
abort: input.abort ?? new AbortController().signal,
callID: input.callID,
}
}
export interface Def<Parameters extends z.ZodType = z.ZodType, M extends Metadata = Metadata> {
description: string
parameters: Parameters

View File

@@ -250,7 +250,7 @@ describe("Runner", () => {
Effect.gen(function* () {
const s = yield* Scope.Scope
const runner = Runner.make<string>(s)
const result = yield* runner.startShell(Effect.succeed("shell-done"))
const result = yield* runner.startShell((_signal) => Effect.succeed("shell-done"))
expect(result).toBe("shell-done")
expect(runner.busy).toBe(false)
}),
@@ -264,7 +264,7 @@ describe("Runner", () => {
const fiber = yield* runner.ensureRunning(Effect.never.pipe(Effect.as("x"))).pipe(Effect.forkChild)
yield* Effect.sleep("10 millis")
const exit = yield* runner.startShell(Effect.succeed("nope")).pipe(Effect.exit)
const exit = yield* runner.startShell((_s) => Effect.succeed("nope")).pipe(Effect.exit)
expect(Exit.isFailure(exit)).toBe(true)
yield* runner.cancel
@@ -279,10 +279,12 @@ describe("Runner", () => {
const runner = Runner.make<string>(s)
const gate = yield* Deferred.make<void>()
const sh = yield* runner.startShell(Deferred.await(gate).pipe(Effect.as("first"))).pipe(Effect.forkChild)
const sh = yield* runner
.startShell((_signal) => Deferred.await(gate).pipe(Effect.as("first")))
.pipe(Effect.forkChild)
yield* Effect.sleep("10 millis")
const exit = yield* runner.startShell(Effect.succeed("second")).pipe(Effect.exit)
const exit = yield* runner.startShell((_s) => Effect.succeed("second")).pipe(Effect.exit)
expect(Exit.isFailure(exit)).toBe(true)
yield* Deferred.succeed(gate, undefined)
@@ -300,26 +302,37 @@ describe("Runner", () => {
},
})
const sh = yield* runner.startShell(Effect.never.pipe(Effect.as("aborted"))).pipe(Effect.forkChild)
const sh = yield* runner
.startShell((signal) =>
Effect.promise(
() =>
new Promise<string>((resolve) => {
signal.addEventListener("abort", () => resolve("aborted"), { once: true })
}),
),
)
.pipe(Effect.forkChild)
yield* Effect.sleep("10 millis")
const exit = yield* runner.startShell(Effect.succeed("second")).pipe(Effect.exit)
const exit = yield* runner.startShell((_s) => Effect.succeed("second")).pipe(Effect.exit)
expect(Exit.isFailure(exit)).toBe(true)
yield* runner.cancel
const done = yield* Fiber.await(sh)
expect(Exit.isFailure(done)).toBe(true)
expect(Exit.isSuccess(done)).toBe(true)
}),
)
it.live(
"cancel interrupts shell",
"cancel interrupts shell that ignores abort signal",
Effect.gen(function* () {
const s = yield* Scope.Scope
const runner = Runner.make<string>(s)
const gate = yield* Deferred.make<void>()
const sh = yield* runner.startShell(Deferred.await(gate).pipe(Effect.as("ignored"))).pipe(Effect.forkChild)
const sh = yield* runner
.startShell((_signal) => Deferred.await(gate).pipe(Effect.as("ignored")))
.pipe(Effect.forkChild)
yield* Effect.sleep("10 millis")
const stop = yield* runner.cancel.pipe(Effect.forkChild)
@@ -343,7 +356,9 @@ describe("Runner", () => {
const runner = Runner.make<string>(s)
const gate = yield* Deferred.make<void>()
const sh = yield* runner.startShell(Deferred.await(gate).pipe(Effect.as("shell-result"))).pipe(Effect.forkChild)
const sh = yield* runner
.startShell((_signal) => Deferred.await(gate).pipe(Effect.as("shell-result")))
.pipe(Effect.forkChild)
yield* Effect.sleep("10 millis")
expect(runner.state._tag).toBe("Shell")
@@ -369,7 +384,9 @@ describe("Runner", () => {
const calls = yield* Ref.make(0)
const gate = yield* Deferred.make<void>()
const sh = yield* runner.startShell(Deferred.await(gate).pipe(Effect.as("shell"))).pipe(Effect.forkChild)
const sh = yield* runner
.startShell((_signal) => Deferred.await(gate).pipe(Effect.as("shell")))
.pipe(Effect.forkChild)
yield* Effect.sleep("10 millis")
const work = Effect.gen(function* () {
@@ -397,7 +414,16 @@ describe("Runner", () => {
const runner = Runner.make<string>(s)
const gate = yield* Deferred.make<void>()
const sh = yield* runner.startShell(Effect.never.pipe(Effect.as("aborted"))).pipe(Effect.forkChild)
const sh = yield* runner
.startShell((signal) =>
Effect.promise(
() =>
new Promise<string>((resolve) => {
signal.addEventListener("abort", () => resolve("aborted"), { once: true })
}),
),
)
.pipe(Effect.forkChild)
yield* Effect.sleep("10 millis")
const run = yield* runner.ensureRunning(Effect.succeed("y")).pipe(Effect.forkChild)
@@ -452,7 +478,7 @@ describe("Runner", () => {
const runner = Runner.make<string>(s, {
onBusy: Ref.update(count, (n) => n + 1),
})
yield* runner.startShell(Effect.succeed("done"))
yield* runner.startShell((_signal) => Effect.succeed("done"))
expect(yield* Ref.get(count)).toBe(1)
}),
)
@@ -483,7 +509,9 @@ describe("Runner", () => {
const runner = Runner.make<string>(s)
const gate = yield* Deferred.make<void>()
const fiber = yield* runner.startShell(Deferred.await(gate).pipe(Effect.as("ok"))).pipe(Effect.forkChild)
const fiber = yield* runner
.startShell((_signal) => Deferred.await(gate).pipe(Effect.as("ok")))
.pipe(Effect.forkChild)
yield* Effect.sleep("10 millis")
expect(runner.busy).toBe(true)

View File

@@ -0,0 +1,18 @@
import { describe, expect, test } from "bun:test"
import { Npm } from "../src/npm"
const win = process.platform === "win32"
describe("Npm.sanitize", () => {
test("keeps normal scoped package specs unchanged", () => {
expect(Npm.sanitize("@opencode/acme")).toBe("@opencode/acme")
expect(Npm.sanitize("@opencode/acme@1.0.0")).toBe("@opencode/acme@1.0.0")
expect(Npm.sanitize("prettier")).toBe("prettier")
})
test("handles git https specs", () => {
const spec = "acme@git+https://github.com/opencode/acme.git"
const expected = win ? "acme@git+https_//github.com/opencode/acme.git" : spec
expect(Npm.sanitize(spec)).toBe(expected)
})
})

View File

@@ -0,0 +1,88 @@
import { describe, expect, test } from "bun:test"
import { parsePluginSpecifier } from "../../src/plugin/shared"
describe("parsePluginSpecifier", () => {
test("parses standard npm package without version", () => {
expect(parsePluginSpecifier("acme")).toEqual({
pkg: "acme",
version: "latest",
})
})
test("parses standard npm package with version", () => {
expect(parsePluginSpecifier("acme@1.0.0")).toEqual({
pkg: "acme",
version: "1.0.0",
})
})
test("parses scoped npm package without version", () => {
expect(parsePluginSpecifier("@opencode/acme")).toEqual({
pkg: "@opencode/acme",
version: "latest",
})
})
test("parses scoped npm package with version", () => {
expect(parsePluginSpecifier("@opencode/acme@1.0.0")).toEqual({
pkg: "@opencode/acme",
version: "1.0.0",
})
})
test("parses package with git+https url", () => {
expect(parsePluginSpecifier("acme@git+https://github.com/opencode/acme.git")).toEqual({
pkg: "acme",
version: "git+https://github.com/opencode/acme.git",
})
})
test("parses scoped package with git+https url", () => {
expect(parsePluginSpecifier("@opencode/acme@git+https://github.com/opencode/acme.git")).toEqual({
pkg: "@opencode/acme",
version: "git+https://github.com/opencode/acme.git",
})
})
test("parses package with git+ssh url containing another @", () => {
expect(parsePluginSpecifier("acme@git+ssh://git@github.com/opencode/acme.git")).toEqual({
pkg: "acme",
version: "git+ssh://git@github.com/opencode/acme.git",
})
})
test("parses scoped package with git+ssh url containing another @", () => {
expect(parsePluginSpecifier("@opencode/acme@git+ssh://git@github.com/opencode/acme.git")).toEqual({
pkg: "@opencode/acme",
version: "git+ssh://git@github.com/opencode/acme.git",
})
})
test("parses unaliased git+ssh url", () => {
expect(parsePluginSpecifier("git+ssh://git@github.com/opencode/acme.git")).toEqual({
pkg: "git+ssh://git@github.com/opencode/acme.git",
version: "",
})
})
test("parses npm alias using the alias name", () => {
expect(parsePluginSpecifier("acme@npm:@opencode/acme@1.0.0")).toEqual({
pkg: "acme",
version: "npm:@opencode/acme@1.0.0",
})
})
test("parses bare npm protocol specifier using the target package", () => {
expect(parsePluginSpecifier("npm:@opencode/acme@1.0.0")).toEqual({
pkg: "@opencode/acme",
version: "1.0.0",
})
})
test("parses unversioned npm protocol specifier", () => {
expect(parsePluginSpecifier("npm:@opencode/acme")).toEqual({
pkg: "@opencode/acme",
version: "latest",
})
})
})

View File

@@ -1,7 +1,8 @@
import { NodeFileSystem } from "@effect/platform-node"
import { expect } from "bun:test"
import { expect, spyOn } from "bun:test"
import { Cause, Effect, Exit, Fiber, Layer } from "effect"
import path from "path"
import z from "zod"
import { Agent as AgentSvc } from "../../src/agent/agent"
import { Bus } from "../../src/bus"
import { Command } from "../../src/command"
@@ -28,6 +29,7 @@ import { MessageID, PartID, SessionID } from "../../src/session/schema"
import { SessionStatus } from "../../src/session/status"
import { Shell } from "../../src/shell/shell"
import { Snapshot } from "../../src/snapshot"
import { TaskTool } from "../../src/tool/task"
import { ToolRegistry } from "../../src/tool/registry"
import { Truncate } from "../../src/tool/truncate"
import { Log } from "../../src/util/log"
@@ -627,26 +629,41 @@ it.live(
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const { prompt, chat, sessions } = yield* boot()
const llm = yield* TestLLMServer
yield* llm.hang
const ready = defer<void>()
const aborted = defer<void>()
const init = spyOn(TaskTool, "init").mockImplementation(async () => ({
description: "task",
parameters: z.object({
description: z.string(),
prompt: z.string(),
subagent_type: z.string(),
task_id: z.string().optional(),
command: z.string().optional(),
}),
execute: async (_args, ctx) => {
ready.resolve()
ctx.abort.addEventListener("abort", () => aborted.resolve(), { once: true })
await new Promise<void>(() => {})
return {
title: "",
metadata: {
sessionId: SessionID.make("task"),
model: ref,
},
output: "",
}
},
}))
yield* Effect.addFinalizer(() => Effect.sync(() => init.mockRestore()))
const { prompt, chat } = yield* boot()
const msg = yield* user(chat.id, "hello")
yield* addSubtask(chat.id, msg.id)
const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild)
yield* Effect.gen(function* () {
while (true) {
const [child] = yield* sessions.children(chat.id)
if (child) {
const msgs = yield* sessions.messages({ sessionID: child.id })
if (msgs.some((msg) => msg.info.role === "assistant")) return
}
yield* Effect.sleep("10 millis")
}
})
yield* Effect.promise(() => ready.promise)
yield* prompt.cancel(chat.id)
yield* Effect.promise(() => aborted.promise)
const exit = yield* Fiber.await(fiber)
expect(Exit.isSuccess(exit)).toBe(true)

View File

@@ -1,28 +1,13 @@
import { afterEach, describe, expect, mock, spyOn, test } from "bun:test"
import { afterEach, describe, expect, test } from "bun:test"
import { Agent } from "../../src/agent/agent"
import { Config } from "../../src/config/config"
import { Instance } from "../../src/project/instance"
import { ModelID, ProviderID } from "../../src/provider/schema"
import { MessageV2 } from "../../src/session/message-v2"
import { SessionPrompt } from "../../src/session/prompt"
import { MessageID, SessionID } from "../../src/session/schema"
import { Session } from "../../src/session"
import { TaskTool } from "../../src/tool/task"
import { tmpdir } from "../fixture/fixture"
afterEach(async () => {
mock.restore()
await Instance.disposeAll()
})
function wait<T>() {
let done!: (value: T | PromiseLike<T>) => void
const promise = new Promise<T>((resolve) => {
done = resolve
})
return { promise, done }
}
describe("tool.task", () => {
test("description sorts subagents by name and is stable across calls", async () => {
await using tmp = await tmpdir({
@@ -61,73 +46,4 @@ describe("tool.task", () => {
},
})
})
test("cancels child session when aborted during creation", async () => {
const started = wait<void>()
const gate = wait<void>()
const parent = SessionID.make("parent")
const child = SessionID.make("child")
const messageID = MessageID.ascending()
const abort = new AbortController()
const agent: Agent.Info = {
name: "general",
description: "General agent",
mode: "subagent",
options: {},
permission: [],
}
const ref = {
providerID: ProviderID.make("test"),
modelID: ModelID.make("test-model"),
}
spyOn(Agent, "list").mockResolvedValue([agent])
spyOn(Agent, "get").mockResolvedValue(agent)
spyOn(Config, "get").mockResolvedValue({ experimental: {} } as Awaited<ReturnType<typeof Config.get>>)
spyOn(MessageV2, "get").mockResolvedValue({
info: {
role: "assistant",
providerID: ref.providerID,
modelID: ref.modelID,
},
} as Awaited<ReturnType<typeof MessageV2.get>>)
spyOn(Session, "get").mockRejectedValue(new Error("missing"))
spyOn(Session, "create").mockImplementation(async () => {
started.done()
await gate.promise
return { id: child } as Awaited<ReturnType<typeof Session.create>>
})
const cancel = spyOn(SessionPrompt, "cancel").mockResolvedValue()
spyOn(SessionPrompt, "resolvePromptParts").mockResolvedValue(
[] as Awaited<ReturnType<typeof SessionPrompt.resolvePromptParts>>,
)
spyOn(SessionPrompt, "prompt").mockResolvedValue({
parts: [{ type: "text", text: "done" }],
} as Awaited<ReturnType<typeof SessionPrompt.prompt>>)
const tool = await TaskTool.init()
const run = tool.execute(
{
description: "inspect bug",
prompt: "check it",
subagent_type: "general",
},
{
sessionID: parent,
messageID,
agent: "build",
abort: abort.signal,
messages: [],
metadata: () => {},
ask: async () => {},
},
)
await started.promise
abort.abort()
gate.done()
await run
expect(cancel).toHaveBeenCalledWith(child)
})
})

View File

@@ -126,6 +126,7 @@ export type EventPermissionReplied = {
export type SessionStatus =
| {
type: "idle"
suggestion?: string
}
| {
type: "retry"

View File

@@ -573,6 +573,7 @@ OpenCode can be configured using environment variables.
| `OPENCODE_DISABLE_CLAUDE_CODE_PROMPT` | boolean | Disable reading `~/.claude/CLAUDE.md` |
| `OPENCODE_DISABLE_CLAUDE_CODE_SKILLS` | boolean | Disable loading `.claude/skills` |
| `OPENCODE_DISABLE_MODELS_FETCH` | boolean | Disable fetching models from remote sources |
| `OPENCODE_DISABLE_MOUSE` | boolean | Disable mouse capture in the TUI |
| `OPENCODE_FAKE_VCS` | string | Fake VCS provider for testing purposes |
| `OPENCODE_DISABLE_FILETIME_CHECK` | boolean | Disable file time checking for optimization |
| `OPENCODE_CLIENT` | string | Client identifier (defaults to `cli`) |

View File

@@ -272,7 +272,8 @@ Use a dedicated `tui.json` (or `tui.jsonc`) file for TUI-specific settings.
"scroll_acceleration": {
"enabled": true
},
"diff_style": "auto"
"diff_style": "auto",
"mouse": true
}
```
@@ -280,8 +281,6 @@ Use `OPENCODE_TUI_CONFIG` to point to a custom TUI config file.
Legacy `theme`, `keybinds`, and `tui` keys in `opencode.json` are deprecated and automatically migrated when possible.
[Learn more about TUI configuration here](/docs/tui#configure).
---
### Server

View File

@@ -368,7 +368,8 @@ You can customize TUI behavior through `tui.json` (or `tui.jsonc`).
"scroll_acceleration": {
"enabled": true
},
"diff_style": "auto"
"diff_style": "auto",
"mouse": true
}
```
@@ -381,6 +382,7 @@ This is separate from `opencode.json`, which configures server/runtime behavior.
- `scroll_acceleration.enabled` - Enable macOS-style scroll acceleration for smooth, natural scrolling. When enabled, scroll speed increases with rapid scrolling gestures and stays precise for slower movements. **This setting takes precedence over `scroll_speed` and overrides it when enabled.**
- `scroll_speed` - Controls how fast the TUI scrolls when using scroll commands (minimum: `0.001`, supports decimal values). Defaults to `3`. **Note: This is ignored if `scroll_acceleration.enabled` is set to `true`.**
- `diff_style` - Controls diff rendering. `"auto"` adapts to terminal width, `"stacked"` always shows a single-column layout.
- `mouse` - Enable or disable mouse capture in the TUI (default: `true`). When disabled, the terminal's native mouse selection/scrolling behavior is preserved.
Use `OPENCODE_TUI_CONFIG` to load a custom TUI config path.

View File

@@ -94,8 +94,6 @@ You can also access our models through the following API endpoints.
| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
| Qwen3.6 Plus Free | qwen3.6-plus-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
| Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
@@ -122,8 +120,6 @@ We support a pay-as-you-go model. Below are the prices **per 1M tokens**.
| Model | Input | Output | Cached Read | Cached Write |
| --------------------------------- | ------ | ------- | ----------- | ------------ |
| Big Pickle | Free | Free | Free | - |
| MiMo V2 Pro Free | Free | Free | Free | - |
| MiMo V2 Omni Free | Free | Free | Free | - |
| Qwen3.6 Plus Free | Free | Free | Free | - |
| Nemotron 3 Super Free | Free | Free | Free | - |
| MiniMax M2.5 Free | Free | Free | Free | - |
@@ -169,8 +165,6 @@ Credit card fees are passed along at cost (4.4% + $0.30 per transaction); we don
The free models:
- MiniMax M2.5 Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model.
- MiMo V2 Pro Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model.
- MiMo V2 Omni Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model.
- Qwen3.6 Plus Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model.
- Nemotron 3 Super Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model.
- Big Pickle is a stealth model that's free on OpenCode for a limited time. The team is using this time to collect feedback and improve the model.
@@ -218,8 +212,6 @@ All our models are hosted in the US. Our providers follow a zero-retention polic
- Big Pickle: During its free period, collected data may be used to improve the model.
- MiniMax M2.5 Free: During its free period, collected data may be used to improve the model.
- MiMo V2 Pro Free: During its free period, collected data may be used to improve the model.
- MiMo V2 Omni Free: During its free period, collected data may be used to improve the model.
- Qwen3.6 Plus Free: During its free period, collected data may be used to improve the model.
- Nemotron 3 Super Free: During its free period, collected data may be used to improve the model.
- OpenAI APIs: Requests are retained for 30 days in accordance with [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).