Compare commits

...

16 Commits

Author SHA1 Message Date
Kit Langton
a95b5c7979 effectify createUserMessage: move into layer as createMessage, delete old 400-line async version 2026-03-28 21:16:05 -04:00
Kit Langton
ea147ae63d fix type errors after rebase: add casts for Session.updateMessage/updatePart returns 2026-03-28 21:09:10 -04:00
Kit Langton
2a2cf121e4 extract getModel helper, simplify Promise.resolve wrapper 2026-03-28 21:06:23 -04:00
Kit Langton
7ab4630d85 simplify title: consolidate Effect.promise calls, use fiber signal, shorten names 2026-03-28 21:06:23 -04:00
Kit Langton
04109e4d03 effectify ensureTitle: move into layer, use agents/sessions services, remove dead code 2026-03-28 21:06:21 -04:00
Kit Langton
524669f59a remove dead resolveCommand and resolvePromptPartsImpl 2026-03-28 21:06:21 -04:00
Kit Langton
2ac16d0616 effectify resolveCommand: use Command.Service, Agent, Plugin directly in layer 2026-03-28 21:06:20 -04:00
Kit Langton
1046895112 effectify resolvePromptParts: use AppFileSystem and Agent service directly 2026-03-28 21:06:19 -04:00
Kit Langton
23fabc9fd5 use SessionCompaction.Service and Plugin.Service directly in layer 2026-03-28 21:06:18 -04:00
Kit Langton
4744913855 use shorthand properties in Service.of, Fiber.interruptAll in finalizer 2026-03-28 21:06:18 -04:00
Kit Langton
818eb49de1 add Effect-based prompt tests covering loop lifecycle, cancel, concurrency
12 tests using testEffect + TestLLM pattern (no HTTP server):
- loop exits on stop finish, calls LLM for new messages, continues on tool-calls
- loop sets status busy→idle
- cancel interrupts cleanly, returns last assistant, records MessageAbortedError
- cancel with queued callers resolves all cleanly
- concurrent callers get same result via Deferred queue
- concurrent callers receive same error on failure
- assertNotBusy throws BusyError when running, succeeds when idle
- shell rejects with BusyError when loop running
2026-03-28 21:06:17 -04:00
Kit Langton
e26193408f use SessionProcessor.Service, Session.Service, Agent.Service directly in layer
Yield effectified services instead of going through async facades. Eliminates
Effect→Promise→Effect double-bounce for processor.create, processor.process,
Session.get/touch/setPermission/updateMessage, and Agent.get/list. Await cancel
in session route. Remove redundant InstanceState.get in shellE ensuring block.
2026-03-28 21:06:16 -04:00
Kit Langton
d7ae7609cd use Session.Service and Agent.Service directly instead of Effect.promise wrappers
Yield the effectified services in the layer and call their methods
directly, eliminating the double Effect→Promise→Effect bounce through
async facades. Layer.unwrap(Effect.sync(...)) breaks the circular
import. Also improves the assertNotBusy test with a proper gate/spy
so it deterministically catches the busy state.
2026-03-28 21:06:15 -04:00
Kit Langton
3a3dee7212 refactor(session): effectify SessionPrompt service
Migrate SessionPrompt to the Effect service pattern (Interface, Service,
Layer, InstanceState, makeRuntime + async facades).

Key design decisions:

- Fiber-based cancellation replaces manual AbortController management.
  Effect.promise((signal) => ...) derives AbortSignals automatically;
  cancel() interrupts fibers and signals propagate to the AI SDK,
  shell processes, and tool execution.

- Deferred queue replaces Promise callback queue. Concurrent loop()
  callers get a Deferred that resolves when the running fiber finishes.
  On cancel or error, queued callers now receive proper errors instead
  of hanging forever.

- Separate loops/shells maps in InstanceState replace the single shared
  state object, with shell-to-loop handoff preserved: if callers queue
  a loop while a shell is running, shellE cleanup starts the loop.

- Heavy helper functions (createUserMessage, handleSubtask, shellImpl,
  resolveCommand, insertReminders, ensureTitle) stay as plain async
  functions called via Effect.promise, keeping the migration incremental.

- resolveTools and createStructuredOutputTool are unchanged (deeply tied
  to AI SDK tool callbacks).
2026-03-28 21:06:14 -04:00
opencode
f0a9075fdf release: v1.3.4 2026-03-29 01:00:44 +00:00
Luke Parker
fee1e25ab4 ci: cancel stale nix-hashes runs (#19571) 2026-03-29 10:39:02 +10:00
25 changed files with 2177 additions and 1359 deletions

View File

@@ -17,6 +17,10 @@ on:
- "patches/**"
- ".github/workflows/nix-hashes.yml"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# Native runners required: bun install cross-compilation flags (--os/--cpu)
# do not produce byte-identical node_modules as native installs.

View File

@@ -26,7 +26,7 @@
},
"packages/app": {
"name": "@opencode-ai/app",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@kobalte/core": "catalog:",
"@opencode-ai/sdk": "workspace:*",
@@ -79,7 +79,7 @@
},
"packages/console/app": {
"name": "@opencode-ai/console-app",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@cloudflare/vite-plugin": "1.15.2",
"@ibm/plex": "6.4.1",
@@ -113,7 +113,7 @@
},
"packages/console/core": {
"name": "@opencode-ai/console-core",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@aws-sdk/client-sts": "3.782.0",
"@jsx-email/render": "1.1.1",
@@ -140,7 +140,7 @@
},
"packages/console/function": {
"name": "@opencode-ai/console-function",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@ai-sdk/anthropic": "3.0.64",
"@ai-sdk/openai": "3.0.48",
@@ -164,7 +164,7 @@
},
"packages/console/mail": {
"name": "@opencode-ai/console-mail",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@jsx-email/all": "2.2.3",
"@jsx-email/cli": "1.4.3",
@@ -188,7 +188,7 @@
},
"packages/desktop": {
"name": "@opencode-ai/desktop",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@opencode-ai/app": "workspace:*",
"@opencode-ai/ui": "workspace:*",
@@ -221,7 +221,7 @@
},
"packages/desktop-electron": {
"name": "@opencode-ai/desktop-electron",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@opencode-ai/app": "workspace:*",
"@opencode-ai/ui": "workspace:*",
@@ -252,7 +252,7 @@
},
"packages/enterprise": {
"name": "@opencode-ai/enterprise",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@opencode-ai/ui": "workspace:*",
"@opencode-ai/util": "workspace:*",
@@ -281,7 +281,7 @@
},
"packages/function": {
"name": "@opencode-ai/function",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@octokit/auth-app": "8.0.1",
"@octokit/rest": "catalog:",
@@ -297,7 +297,7 @@
},
"packages/opencode": {
"name": "opencode",
"version": "1.3.3",
"version": "1.3.4",
"bin": {
"opencode": "./bin/opencode",
},
@@ -422,7 +422,7 @@
},
"packages/plugin": {
"name": "@opencode-ai/plugin",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@opencode-ai/sdk": "workspace:*",
"zod": "catalog:",
@@ -456,7 +456,7 @@
},
"packages/sdk/js": {
"name": "@opencode-ai/sdk",
"version": "1.3.3",
"version": "1.3.4",
"devDependencies": {
"@hey-api/openapi-ts": "0.90.10",
"@tsconfig/node22": "catalog:",
@@ -467,7 +467,7 @@
},
"packages/slack": {
"name": "@opencode-ai/slack",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@opencode-ai/sdk": "workspace:*",
"@slack/bolt": "^3.17.1",
@@ -502,7 +502,7 @@
},
"packages/ui": {
"name": "@opencode-ai/ui",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@kobalte/core": "catalog:",
"@opencode-ai/sdk": "workspace:*",
@@ -549,7 +549,7 @@
},
"packages/util": {
"name": "@opencode-ai/util",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"zod": "catalog:",
},
@@ -560,7 +560,7 @@
},
"packages/web": {
"name": "@opencode-ai/web",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@astrojs/cloudflare": "12.6.3",
"@astrojs/markdown-remark": "6.3.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@opencode-ai/app",
"version": "1.3.3",
"version": "1.3.4",
"description": "",
"type": "module",
"exports": {

View File

@@ -1,6 +1,6 @@
{
"name": "@opencode-ai/console-app",
"version": "1.3.3",
"version": "1.3.4",
"type": "module",
"license": "MIT",
"scripts": {

View File

@@ -1,7 +1,7 @@
{
"$schema": "https://json.schemastore.org/package.json",
"name": "@opencode-ai/console-core",
"version": "1.3.3",
"version": "1.3.4",
"private": true,
"type": "module",
"license": "MIT",

View File

@@ -1,6 +1,6 @@
{
"name": "@opencode-ai/console-function",
"version": "1.3.3",
"version": "1.3.4",
"$schema": "https://json.schemastore.org/package.json",
"private": true,
"type": "module",

View File

@@ -1,6 +1,6 @@
{
"name": "@opencode-ai/console-mail",
"version": "1.3.3",
"version": "1.3.4",
"dependencies": {
"@jsx-email/all": "2.2.3",
"@jsx-email/cli": "1.4.3",

View File

@@ -1,7 +1,7 @@
{
"name": "@opencode-ai/desktop-electron",
"private": true,
"version": "1.3.3",
"version": "1.3.4",
"type": "module",
"license": "MIT",
"homepage": "https://opencode.ai",

View File

@@ -1,7 +1,7 @@
{
"name": "@opencode-ai/desktop",
"private": true,
"version": "1.3.3",
"version": "1.3.4",
"type": "module",
"license": "MIT",
"scripts": {

View File

@@ -1,6 +1,6 @@
{
"name": "@opencode-ai/enterprise",
"version": "1.3.3",
"version": "1.3.4",
"private": true,
"type": "module",
"license": "MIT",

View File

@@ -1,7 +1,7 @@
id = "opencode"
name = "OpenCode"
description = "The open source coding agent."
version = "1.3.3"
version = "1.3.4"
schema_version = 1
authors = ["Anomaly"]
repository = "https://github.com/anomalyco/opencode"
@@ -11,26 +11,26 @@ name = "OpenCode"
icon = "./icons/opencode.svg"
[agent_servers.opencode.targets.darwin-aarch64]
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.3/opencode-darwin-arm64.zip"
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.4/opencode-darwin-arm64.zip"
cmd = "./opencode"
args = ["acp"]
[agent_servers.opencode.targets.darwin-x86_64]
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.3/opencode-darwin-x64.zip"
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.4/opencode-darwin-x64.zip"
cmd = "./opencode"
args = ["acp"]
[agent_servers.opencode.targets.linux-aarch64]
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.3/opencode-linux-arm64.tar.gz"
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.4/opencode-linux-arm64.tar.gz"
cmd = "./opencode"
args = ["acp"]
[agent_servers.opencode.targets.linux-x86_64]
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.3/opencode-linux-x64.tar.gz"
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.4/opencode-linux-x64.tar.gz"
cmd = "./opencode"
args = ["acp"]
[agent_servers.opencode.targets.windows-x86_64]
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.3/opencode-windows-x64.zip"
archive = "https://github.com/anomalyco/opencode/releases/download/v1.3.4/opencode-windows-x64.zip"
cmd = "./opencode.exe"
args = ["acp"]

View File

@@ -1,6 +1,6 @@
{
"name": "@opencode-ai/function",
"version": "1.3.3",
"version": "1.3.4",
"$schema": "https://json.schemastore.org/package.json",
"private": true,
"type": "module",

View File

@@ -1,6 +1,6 @@
{
"$schema": "https://json.schemastore.org/package.json",
"version": "1.3.3",
"version": "1.3.4",
"name": "opencode",
"type": "module",
"license": "MIT",

View File

@@ -381,7 +381,7 @@ export const SessionRoutes = lazy(() =>
}),
),
async (c) => {
SessionPrompt.cancel(c.req.valid("param").sessionID)
await SessionPrompt.cancel(c.req.valid("param").sessionID)
return c.json(true)
},
)
@@ -699,7 +699,7 @@ export const SessionRoutes = lazy(() =>
),
async (c) => {
const params = c.req.valid("param")
SessionPrompt.assertNotBusy(params.sessionID)
await SessionPrompt.assertNotBusy(params.sessionID)
await Session.removeMessage({
sessionID: params.sessionID,
messageID: params.messageID,

File diff suppressed because it is too large Load Diff

View File

@@ -21,7 +21,7 @@ export namespace SessionRevert {
export type RevertInput = z.infer<typeof RevertInput>
export async function revert(input: RevertInput) {
SessionPrompt.assertNotBusy(input.sessionID)
await SessionPrompt.assertNotBusy(input.sessionID)
const all = await Session.messages({ sessionID: input.sessionID })
let lastUser: MessageV2.User | undefined
const session = await Session.get(input.sessionID)
@@ -80,7 +80,7 @@ export namespace SessionRevert {
export async function unrevert(input: { sessionID: SessionID }) {
log.info("unreverting", input)
SessionPrompt.assertNotBusy(input.sessionID)
await SessionPrompt.assertNotBusy(input.sessionID)
const session = await Session.get(input.sessionID)
if (!session.revert) return session
if (session.revert.snapshot) await Snapshot.restore(session.revert.snapshot)

View File

@@ -0,0 +1,247 @@
import { describe, expect, spyOn, test } from "bun:test"
import { Instance } from "../../src/project/instance"
import { Provider } from "../../src/provider/provider"
import { Session } from "../../src/session"
import { MessageV2 } from "../../src/session/message-v2"
import { SessionPrompt } from "../../src/session/prompt"
import { SessionStatus } from "../../src/session/status"
import { MessageID, PartID, SessionID } from "../../src/session/schema"
import { Log } from "../../src/util/log"
import { tmpdir } from "../fixture/fixture"
Log.init({ print: false })
function deferred() {
let resolve!: () => void
const promise = new Promise<void>((done) => {
resolve = done
})
return { promise, resolve }
}
// Helper: seed a session with a user message + finished assistant message
// so loop() exits immediately without calling any LLM
async function seed(sessionID: SessionID) {
const userMsg: MessageV2.Info = {
id: MessageID.ascending(),
role: "user",
sessionID,
time: { created: Date.now() },
agent: "build",
model: { providerID: "openai" as any, modelID: "gpt-5.2" as any },
}
await Session.updateMessage(userMsg)
await Session.updatePart({
id: PartID.ascending(),
messageID: userMsg.id,
sessionID,
type: "text",
text: "hello",
})
const assistantMsg: MessageV2.Info = {
id: MessageID.ascending(),
role: "assistant",
parentID: userMsg.id,
sessionID,
mode: "build",
agent: "build",
cost: 0,
path: { cwd: "/tmp", root: "/tmp" },
tokens: { input: 0, output: 0, reasoning: 0, cache: { read: 0, write: 0 } },
modelID: "gpt-5.2" as any,
providerID: "openai" as any,
time: { created: Date.now(), completed: Date.now() },
finish: "stop",
}
await Session.updateMessage(assistantMsg)
await Session.updatePart({
id: PartID.ascending(),
messageID: assistantMsg.id,
sessionID,
type: "text",
text: "hi there",
})
return { userMsg, assistantMsg }
}
describe("session.prompt concurrency", () => {
test("loop returns assistant message and sets status to idle", async () => {
await using tmp = await tmpdir({ git: true })
await Instance.provide({
directory: tmp.path,
fn: async () => {
const session = await Session.create({})
await seed(session.id)
const result = await SessionPrompt.loop({ sessionID: session.id })
expect(result.info.role).toBe("assistant")
if (result.info.role === "assistant") expect(result.info.finish).toBe("stop")
const status = await SessionStatus.get(session.id)
expect(status.type).toBe("idle")
},
})
})
test("concurrent loop callers get the same result", async () => {
await using tmp = await tmpdir({ git: true })
await Instance.provide({
directory: tmp.path,
fn: async () => {
const session = await Session.create({})
await seed(session.id)
const [a, b] = await Promise.all([
SessionPrompt.loop({ sessionID: session.id }),
SessionPrompt.loop({ sessionID: session.id }),
])
expect(a.info.id).toBe(b.info.id)
expect(a.info.role).toBe("assistant")
},
})
})
test("assertNotBusy throws when loop is running", async () => {
await using tmp = await tmpdir({ git: true })
await Instance.provide({
directory: tmp.path,
fn: async () => {
const session = await Session.create({})
const userMsg: MessageV2.Info = {
id: MessageID.ascending(),
role: "user",
sessionID: session.id,
time: { created: Date.now() },
agent: "build",
model: { providerID: "openai" as any, modelID: "gpt-5.2" as any },
}
await Session.updateMessage(userMsg)
await Session.updatePart({
id: PartID.ascending(),
messageID: userMsg.id,
sessionID: session.id,
type: "text",
text: "hello",
})
const ready = deferred()
const gate = deferred()
const getModel = spyOn(Provider, "getModel").mockImplementation(async () => {
ready.resolve()
await gate.promise
throw new Error("test stop")
})
try {
const loopPromise = SessionPrompt.loop({ sessionID: session.id }).catch(() => undefined)
await ready.promise
await expect(SessionPrompt.assertNotBusy(session.id)).rejects.toBeInstanceOf(Session.BusyError)
gate.resolve()
await loopPromise
} finally {
gate.resolve()
getModel.mockRestore()
}
// After loop completes, assertNotBusy should succeed
await SessionPrompt.assertNotBusy(session.id)
},
})
})
test("cancel sets status to idle", async () => {
await using tmp = await tmpdir({ git: true })
await Instance.provide({
directory: tmp.path,
fn: async () => {
const session = await Session.create({})
// Seed only a user message — loop must call getModel to proceed
const userMsg: MessageV2.Info = {
id: MessageID.ascending(),
role: "user",
sessionID: session.id,
time: { created: Date.now() },
agent: "build",
model: { providerID: "openai" as any, modelID: "gpt-5.2" as any },
}
await Session.updateMessage(userMsg)
await Session.updatePart({
id: PartID.ascending(),
messageID: userMsg.id,
sessionID: session.id,
type: "text",
text: "hello",
})
// Also seed an assistant message so lastAssistant() fallback can find it
const assistantMsg: MessageV2.Info = {
id: MessageID.ascending(),
role: "assistant",
parentID: userMsg.id,
sessionID: session.id,
mode: "build",
agent: "build",
cost: 0,
path: { cwd: "/tmp", root: "/tmp" },
tokens: { input: 0, output: 0, reasoning: 0, cache: { read: 0, write: 0 } },
modelID: "gpt-5.2" as any,
providerID: "openai" as any,
time: { created: Date.now() },
}
await Session.updateMessage(assistantMsg)
await Session.updatePart({
id: PartID.ascending(),
messageID: assistantMsg.id,
sessionID: session.id,
type: "text",
text: "hi there",
})
const ready = deferred()
const gate = deferred()
const getModel = spyOn(Provider, "getModel").mockImplementation(async () => {
ready.resolve()
await gate.promise
throw new Error("test stop")
})
try {
// Start loop — it will block in getModel (assistant has no finish, so loop continues)
const loopPromise = SessionPrompt.loop({ sessionID: session.id })
await ready.promise
await SessionPrompt.cancel(session.id)
const status = await SessionStatus.get(session.id)
expect(status.type).toBe("idle")
// loop should resolve cleanly, not throw "All fibers interrupted"
const result = await loopPromise
expect(result.info.role).toBe("assistant")
expect(result.info.id).toBe(assistantMsg.id)
} finally {
gate.resolve()
getModel.mockRestore()
}
},
})
}, 10000)
test("cancel on idle session just sets idle", async () => {
await using tmp = await tmpdir({ git: true })
await Instance.provide({
directory: tmp.path,
fn: async () => {
const session = await Session.create({})
await SessionPrompt.cancel(session.id)
const status = await SessionStatus.get(session.id)
expect(status.type).toBe("idle")
},
})
})
})

View File

@@ -0,0 +1,769 @@
import { NodeFileSystem } from "@effect/platform-node"
import { expect } from "bun:test"
import { Cause, Deferred, Effect, Exit, Fiber, Layer, ServiceMap } from "effect"
import * as Stream from "effect/Stream"
import path from "path"
import type { Agent } from "../../src/agent/agent"
import { Agent as AgentSvc } from "../../src/agent/agent"
import { Bus } from "../../src/bus"
import { Command } from "../../src/command"
import { Config } from "../../src/config/config"
import { Permission } from "../../src/permission"
import { Plugin } from "../../src/plugin"
import type { Provider } from "../../src/provider/provider"
import { ModelID, ProviderID } from "../../src/provider/schema"
import { Session } from "../../src/session"
import { LLM } from "../../src/session/llm"
import { MessageV2 } from "../../src/session/message-v2"
import { AppFileSystem } from "../../src/filesystem"
import { SessionCompaction } from "../../src/session/compaction"
import { SessionProcessor } from "../../src/session/processor"
import { SessionPrompt } from "../../src/session/prompt"
import { MessageID, PartID, SessionID } from "../../src/session/schema"
import { SessionStatus } from "../../src/session/status"
import { Snapshot } from "../../src/snapshot"
import { Log } from "../../src/util/log"
import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
import { provideTmpdirInstance } from "../fixture/fixture"
import { testEffect } from "../lib/effect"
Log.init({ print: false })
const ref = {
providerID: ProviderID.make("test"),
modelID: ModelID.make("test-model"),
}
type Script = Stream.Stream<LLM.Event, unknown> | ((input: LLM.StreamInput) => Stream.Stream<LLM.Event, unknown>)
class TestLLM extends ServiceMap.Service<
TestLLM,
{
readonly push: (stream: Script) => Effect.Effect<void>
readonly reply: (...items: LLM.Event[]) => Effect.Effect<void>
readonly calls: Effect.Effect<number>
readonly inputs: Effect.Effect<LLM.StreamInput[]>
}
>()("@test/PromptLLM") {}
function stream(...items: LLM.Event[]) {
return Stream.make(...items)
}
function usage(input = 1, output = 1, total = input + output) {
return {
inputTokens: input,
outputTokens: output,
totalTokens: total,
inputTokenDetails: {
noCacheTokens: undefined,
cacheReadTokens: undefined,
cacheWriteTokens: undefined,
},
outputTokenDetails: {
textTokens: undefined,
reasoningTokens: undefined,
},
}
}
function start(): LLM.Event {
return { type: "start" }
}
function textStart(id = "t"): LLM.Event {
return { type: "text-start", id }
}
function textDelta(id: string, text: string): LLM.Event {
return { type: "text-delta", id, text }
}
function textEnd(id = "t"): LLM.Event {
return { type: "text-end", id }
}
function finishStep(): LLM.Event {
return {
type: "finish-step",
finishReason: "stop",
rawFinishReason: "stop",
response: { id: "res", modelId: "test-model", timestamp: new Date() },
providerMetadata: undefined,
usage: usage(),
}
}
function finish(): LLM.Event {
return { type: "finish", finishReason: "stop", rawFinishReason: "stop", totalUsage: usage() }
}
function wait(abort: AbortSignal) {
return Effect.promise(
() =>
new Promise<void>((done) => {
abort.addEventListener("abort", () => done(), { once: true })
}),
)
}
function hang(input: LLM.StreamInput, ...items: LLM.Event[]) {
return stream(...items).pipe(
Stream.concat(
Stream.unwrap(wait(input.abort).pipe(Effect.as(Stream.fail(new DOMException("Aborted", "AbortError"))))),
),
)
}
function defer<T>() {
let resolve!: (value: T | PromiseLike<T>) => void
const promise = new Promise<T>((done) => {
resolve = done
})
return { promise, resolve }
}
const llm = Layer.unwrap(
Effect.gen(function* () {
const queue: Script[] = []
const inputs: LLM.StreamInput[] = []
let calls = 0
const push = Effect.fn("TestLLM.push")((item: Script) => {
queue.push(item)
return Effect.void
})
const reply = Effect.fn("TestLLM.reply")((...items: LLM.Event[]) => push(stream(...items)))
return Layer.mergeAll(
Layer.succeed(
LLM.Service,
LLM.Service.of({
stream: (input) => {
calls += 1
inputs.push(input)
const item = queue.shift() ?? Stream.empty
return typeof item === "function" ? item(input) : item
},
}),
),
Layer.succeed(
TestLLM,
TestLLM.of({
push,
reply,
calls: Effect.sync(() => calls),
inputs: Effect.sync(() => [...inputs]),
}),
),
)
}),
)
const status = SessionStatus.layer.pipe(Layer.provideMerge(Bus.layer))
const infra = Layer.mergeAll(NodeFileSystem.layer, CrossSpawnSpawner.defaultLayer)
const deps = Layer.mergeAll(
Session.defaultLayer,
Snapshot.defaultLayer,
AgentSvc.defaultLayer,
Command.defaultLayer,
Permission.layer,
Plugin.defaultLayer,
Config.defaultLayer,
AppFileSystem.defaultLayer,
status,
llm,
).pipe(Layer.provideMerge(infra))
const proc = SessionProcessor.layer.pipe(Layer.provideMerge(deps))
const compact = SessionCompaction.layer.pipe(Layer.provideMerge(proc), Layer.provideMerge(deps))
const env = SessionPrompt.layer.pipe(Layer.provideMerge(compact), Layer.provideMerge(proc), Layer.provideMerge(deps))
const it = testEffect(env)
// Config that registers a custom "test" provider with a "test-model" model
// so Provider.getModel("test", "test-model") succeeds inside the loop.
const cfg = {
provider: {
test: {
name: "Test",
id: "test",
env: [],
npm: "@ai-sdk/openai-compatible",
models: {
"test-model": {
id: "test-model",
name: "Test Model",
attachment: false,
reasoning: false,
temperature: false,
tool_call: true,
release_date: "2025-01-01",
limit: { context: 100000, output: 10000 },
cost: { input: 0, output: 0 },
options: {},
},
},
options: {
apiKey: "test-key",
baseURL: "http://localhost:1/v1",
},
},
},
}
const user = Effect.fn("test.user")(function* (sessionID: SessionID, text: string) {
const session = yield* Session.Service
const msg = yield* session.updateMessage({
id: MessageID.ascending(),
role: "user",
sessionID,
agent: "build",
model: ref,
time: { created: Date.now() },
})
yield* session.updatePart({
id: PartID.ascending(),
messageID: msg.id,
sessionID,
type: "text",
text,
})
return msg
})
const seed = Effect.fn("test.seed")(function* (sessionID: SessionID, opts?: { finish?: string }) {
const session = yield* Session.Service
const msg = yield* user(sessionID, "hello")
const assistant: MessageV2.Assistant = {
id: MessageID.ascending(),
role: "assistant",
parentID: msg.id,
sessionID,
mode: "build",
agent: "build",
cost: 0,
path: { cwd: "/tmp", root: "/tmp" },
tokens: { input: 0, output: 0, reasoning: 0, cache: { read: 0, write: 0 } },
modelID: ref.modelID,
providerID: ref.providerID,
time: { created: Date.now() },
...(opts?.finish ? { finish: opts.finish } : {}),
}
yield* session.updateMessage(assistant)
yield* session.updatePart({
id: PartID.ascending(),
messageID: assistant.id,
sessionID,
type: "text",
text: "hi there",
})
return { user: msg, assistant }
})
// Priority 1: Loop lifecycle
it.effect("loop exits immediately when last assistant has stop finish", () =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
const chat = yield* sessions.create({})
yield* seed(chat.id, { finish: "stop" })
const result = yield* prompt.loop({ sessionID: chat.id })
expect(result.info.role).toBe("assistant")
if (result.info.role === "assistant") expect(result.info.finish).toBe("stop")
expect(yield* test.calls).toBe(0)
}),
{ git: true },
),
)
it.effect("loop calls LLM and returns assistant message", () =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
yield* test.reply(start(), textStart(), textDelta("t", "world"), textEnd(), finishStep(), finish())
const chat = yield* sessions.create({})
yield* user(chat.id, "hello")
const result = yield* prompt.loop({ sessionID: chat.id })
expect(result.info.role).toBe("assistant")
const parts = result.parts.filter((p) => p.type === "text")
expect(parts.some((p) => p.type === "text" && p.text === "world")).toBe(true)
expect(yield* test.calls).toBe(1)
}),
{ git: true, config: cfg },
),
)
it.effect("loop continues when finish is tool-calls", () =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
// First reply finishes with tool-calls, second with stop
yield* test.reply(
start(),
textStart(),
textDelta("t", "first"),
textEnd(),
{
type: "finish-step",
finishReason: "tool-calls",
rawFinishReason: "tool_calls",
response: { id: "res", modelId: "test-model", timestamp: new Date() },
providerMetadata: undefined,
usage: usage(),
},
{ type: "finish", finishReason: "tool-calls", rawFinishReason: "tool_calls", totalUsage: usage() },
)
yield* test.reply(start(), textStart(), textDelta("t", "second"), textEnd(), finishStep(), finish())
const chat = yield* sessions.create({})
yield* user(chat.id, "hello")
const result = yield* prompt.loop({ sessionID: chat.id })
expect(yield* test.calls).toBe(2)
expect(result.info.role).toBe("assistant")
}),
{ git: true, config: cfg },
),
)
it.effect("loop sets status to busy then idle", () =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
const bus = yield* Bus.Service
yield* test.reply(start(), textStart(), textDelta("t", "ok"), textEnd(), finishStep(), finish())
const chat = yield* sessions.create({})
yield* user(chat.id, "hi")
const types: string[] = []
const idle = defer<void>()
const off = yield* bus.subscribeCallback(SessionStatus.Event.Status, (evt) => {
if (evt.properties.sessionID !== chat.id) return
types.push(evt.properties.status.type)
if (evt.properties.status.type === "idle") idle.resolve()
})
yield* prompt.loop({ sessionID: chat.id })
yield* Effect.promise(() => idle.promise)
off()
expect(types).toContain("busy")
expect(types[types.length - 1]).toBe("idle")
}),
{ git: true, config: cfg },
),
)
// Priority 2: Cancel safety
it.effect(
"cancel interrupts loop and returns last assistant",
() =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
const chat = yield* sessions.create({})
yield* seed(chat.id)
// Make LLM hang so the loop blocks
yield* test.push((input) => hang(input, start()))
// Seed a new user message so the loop enters the LLM path
yield* user(chat.id, "more")
const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild)
// Give the loop time to start
yield* Effect.promise(() => new Promise<void>((r) => setTimeout(r, 200)))
yield* prompt.cancel(chat.id)
const exit = yield* Fiber.await(fiber)
expect(Exit.isSuccess(exit)).toBe(true)
if (Exit.isSuccess(exit)) {
expect(exit.value.info.role).toBe("assistant")
}
}),
{ git: true, config: cfg },
),
30_000,
)
it.effect(
"cancel records MessageAbortedError on interrupted process",
() =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const ready = defer<void>()
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
yield* test.push((input) =>
hang(input, start()).pipe(
Stream.tap((event) => (event.type === "start" ? Effect.sync(() => ready.resolve()) : Effect.void)),
),
)
const chat = yield* sessions.create({})
yield* user(chat.id, "hello")
const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild)
yield* Effect.promise(() => ready.promise)
yield* prompt.cancel(chat.id)
const exit = yield* Fiber.await(fiber)
expect(Exit.isSuccess(exit)).toBe(true)
if (Exit.isSuccess(exit)) {
const info = exit.value.info
if (info.role === "assistant") {
expect(info.error?.name).toBe("MessageAbortedError")
}
}
}),
{ git: true, config: cfg },
),
30_000,
)
it.effect(
"cancel with queued callers resolves all cleanly",
() =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const ready = defer<void>()
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
yield* test.push((input) =>
hang(input, start()).pipe(
Stream.tap((event) => (event.type === "start" ? Effect.sync(() => ready.resolve()) : Effect.void)),
),
)
const chat = yield* sessions.create({})
yield* user(chat.id, "hello")
const a = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild)
yield* Effect.promise(() => ready.promise)
// Queue a second caller
const b = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild)
yield* Effect.promise(() => new Promise<void>((r) => setTimeout(r, 50)))
yield* prompt.cancel(chat.id)
const [exitA, exitB] = yield* Effect.all([Fiber.await(a), Fiber.await(b)])
// Both should resolve (success or interrupt, not error)
expect(Exit.isFailure(exitA) && !Cause.hasInterruptsOnly(exitA.cause)).toBe(false)
expect(Exit.isFailure(exitB) && !Cause.hasInterruptsOnly(exitB.cause)).toBe(false)
}),
{ git: true, config: cfg },
),
30_000,
)
// Priority 3: Deferred queue
it.effect("concurrent loop callers get same result", () =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
const chat = yield* sessions.create({})
yield* seed(chat.id, { finish: "stop" })
const [a, b] = yield* Effect.all([prompt.loop({ sessionID: chat.id }), prompt.loop({ sessionID: chat.id })], {
concurrency: "unbounded",
})
expect(a.info.id).toBe(b.info.id)
expect(a.info.role).toBe("assistant")
}),
{ git: true },
),
)
it.effect("concurrent loop callers all receive same error result", () =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
// Push a stream that fails — the loop records the error on the assistant message
yield* test.push(Stream.fail(new Error("boom")))
const chat = yield* sessions.create({})
yield* user(chat.id, "hello")
const [a, b] = yield* Effect.all([prompt.loop({ sessionID: chat.id }), prompt.loop({ sessionID: chat.id })], {
concurrency: "unbounded",
})
// Both callers get the same assistant with an error recorded
expect(a.info.id).toBe(b.info.id)
expect(a.info.role).toBe("assistant")
if (a.info.role === "assistant") {
expect(a.info.error).toBeDefined()
}
}),
{ git: true, config: cfg },
),
)
it.effect(
"assertNotBusy throws BusyError when loop running",
() =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const ready = defer<void>()
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
yield* test.push((input) =>
hang(input, start()).pipe(
Stream.tap((event) => (event.type === "start" ? Effect.sync(() => ready.resolve()) : Effect.void)),
),
)
const chat = yield* sessions.create({})
yield* user(chat.id, "hi")
const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild)
yield* Effect.promise(() => ready.promise)
const exit = yield* prompt.assertNotBusy(chat.id).pipe(Effect.exit)
expect(Exit.isFailure(exit)).toBe(true)
yield* prompt.cancel(chat.id)
yield* Fiber.await(fiber)
}),
{ git: true, config: cfg },
),
30_000,
)
it.effect("assertNotBusy succeeds when idle", () =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
const chat = yield* sessions.create({})
const exit = yield* prompt.assertNotBusy(chat.id).pipe(Effect.exit)
expect(Exit.isSuccess(exit)).toBe(true)
}),
{ git: true },
),
)
// Priority 4: Shell basics
it.effect(
"shell rejects with BusyError when loop running",
() =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const ready = defer<void>()
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
yield* test.push((input) =>
hang(input, start()).pipe(
Stream.tap((event) => (event.type === "start" ? Effect.sync(() => ready.resolve()) : Effect.void)),
),
)
const chat = yield* sessions.create({})
yield* user(chat.id, "hi")
const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild)
yield* Effect.promise(() => ready.promise)
const exit = yield* prompt.shell({ sessionID: chat.id, agent: "build", command: "echo hi" }).pipe(Effect.exit)
expect(Exit.isFailure(exit)).toBe(true)
yield* prompt.cancel(chat.id)
yield* Fiber.await(fiber)
}),
{ git: true, config: cfg },
),
30_000,
)
it.effect(
"loop waits while shell runs and starts after shell exits",
() =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
yield* test.reply(start(), textStart(), textDelta("t", "after-shell"), textEnd(), finishStep(), finish())
const chat = yield* sessions.create({})
const sh = yield* prompt
.shell({ sessionID: chat.id, agent: "build", command: "sleep 0.2" })
.pipe(Effect.forkChild)
yield* Effect.promise(() => new Promise<void>((done) => setTimeout(done, 50)))
const run = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild)
yield* Effect.promise(() => new Promise<void>((done) => setTimeout(done, 50)))
expect(yield* test.calls).toBe(0)
yield* Fiber.await(sh)
const exit = yield* Fiber.await(run)
expect(Exit.isSuccess(exit)).toBe(true)
if (Exit.isSuccess(exit)) {
expect(exit.value.info.role).toBe("assistant")
expect(exit.value.parts.some((part) => part.type === "text" && part.text === "after-shell")).toBe(true)
}
expect(yield* test.calls).toBe(1)
}),
{ git: true, config: cfg },
),
30_000,
)
it.effect(
"shell completion resumes queued loop callers",
() =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const test = yield* TestLLM
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
yield* test.reply(start(), textStart(), textDelta("t", "done"), textEnd(), finishStep(), finish())
const chat = yield* sessions.create({})
const sh = yield* prompt
.shell({ sessionID: chat.id, agent: "build", command: "sleep 0.2" })
.pipe(Effect.forkChild)
yield* Effect.promise(() => new Promise<void>((done) => setTimeout(done, 50)))
const a = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild)
const b = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild)
yield* Effect.promise(() => new Promise<void>((done) => setTimeout(done, 50)))
expect(yield* test.calls).toBe(0)
yield* Fiber.await(sh)
const [ea, eb] = yield* Effect.all([Fiber.await(a), Fiber.await(b)])
expect(Exit.isSuccess(ea)).toBe(true)
expect(Exit.isSuccess(eb)).toBe(true)
if (Exit.isSuccess(ea) && Exit.isSuccess(eb)) {
expect(ea.value.info.id).toBe(eb.value.info.id)
expect(ea.value.info.role).toBe("assistant")
}
expect(yield* test.calls).toBe(1)
}),
{ git: true, config: cfg },
),
30_000,
)
it.effect(
"cancel interrupts shell and resolves cleanly",
() =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
const chat = yield* sessions.create({})
const sh = yield* prompt
.shell({ sessionID: chat.id, agent: "build", command: "sleep 30" })
.pipe(Effect.forkChild)
yield* Effect.promise(() => new Promise<void>((done) => setTimeout(done, 50)))
yield* prompt.cancel(chat.id)
const exit = yield* Fiber.await(sh)
expect(Exit.isSuccess(exit)).toBe(true)
if (Exit.isSuccess(exit)) {
expect(exit.value.info.role).toBe("assistant")
expect(exit.value.parts.some((part) => part.type === "tool")).toBe(true)
}
const status = yield* SessionStatus.Service
expect((yield* status.get(chat.id)).type).toBe("idle")
const busy = yield* prompt.assertNotBusy(chat.id).pipe(Effect.exit)
expect(Exit.isSuccess(busy)).toBe(true)
}),
{ git: true, config: cfg },
),
30_000,
)
it.effect(
"shell rejects when another shell is already running",
() =>
provideTmpdirInstance(
(dir) =>
Effect.gen(function* () {
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
const chat = yield* sessions.create({})
const a = yield* prompt
.shell({ sessionID: chat.id, agent: "build", command: "sleep 30" })
.pipe(Effect.forkChild)
yield* Effect.promise(() => new Promise<void>((done) => setTimeout(done, 50)))
const exit = yield* prompt.shell({ sessionID: chat.id, agent: "build", command: "echo hi" }).pipe(Effect.exit)
expect(Exit.isFailure(exit)).toBe(true)
yield* prompt.cancel(chat.id)
yield* Fiber.await(a)
}),
{ git: true, config: cfg },
),
30_000,
)

View File

@@ -1,7 +1,7 @@
{
"$schema": "https://json.schemastore.org/package.json",
"name": "@opencode-ai/plugin",
"version": "1.3.3",
"version": "1.3.4",
"type": "module",
"license": "MIT",
"scripts": {

View File

@@ -1,7 +1,7 @@
{
"$schema": "https://json.schemastore.org/package.json",
"name": "@opencode-ai/sdk",
"version": "1.3.3",
"version": "1.3.4",
"type": "module",
"license": "MIT",
"scripts": {

View File

@@ -1,6 +1,6 @@
{
"name": "@opencode-ai/slack",
"version": "1.3.3",
"version": "1.3.4",
"type": "module",
"license": "MIT",
"scripts": {

View File

@@ -1,6 +1,6 @@
{
"name": "@opencode-ai/ui",
"version": "1.3.3",
"version": "1.3.4",
"type": "module",
"license": "MIT",
"exports": {

View File

@@ -1,6 +1,6 @@
{
"name": "@opencode-ai/util",
"version": "1.3.3",
"version": "1.3.4",
"private": true,
"type": "module",
"license": "MIT",

View File

@@ -2,7 +2,7 @@
"name": "@opencode-ai/web",
"type": "module",
"license": "MIT",
"version": "1.3.3",
"version": "1.3.4",
"scripts": {
"dev": "astro dev",
"dev:remote": "VITE_API_URL=https://api.opencode.ai astro dev",

View File

@@ -2,7 +2,7 @@
"name": "opencode",
"displayName": "opencode",
"description": "opencode for VS Code",
"version": "1.3.3",
"version": "1.3.4",
"publisher": "sst-dev",
"repository": {
"type": "git",