mirror of
https://github.com/anomalyco/opencode.git
synced 2026-02-01 22:48:16 +00:00
LLM cleanup (#5462)
Co-authored-by: GitHub Action <action@github.com> Co-authored-by: Aiden Cline <63023139+rekram1-node@users.noreply.github.com>
This commit is contained in:
@@ -10,4 +10,5 @@
|
||||
"options": {},
|
||||
},
|
||||
},
|
||||
"mcp": {},
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ export const { use: useLocal, provider: LocalProvider } = createSimpleContext({
|
||||
})
|
||||
|
||||
const agent = (() => {
|
||||
const list = createMemo(() => sync.data.agent.filter((x) => x.mode !== "subagent"))
|
||||
const list = createMemo(() => sync.data.agent.filter((x) => x.mode !== "subagent" && !x.hidden))
|
||||
const [store, setStore] = createStore<{
|
||||
current: string
|
||||
}>({
|
||||
|
||||
@@ -2,18 +2,24 @@ import { Config } from "../config/config"
|
||||
import z from "zod"
|
||||
import { Provider } from "../provider/provider"
|
||||
import { generateObject, type ModelMessage } from "ai"
|
||||
import PROMPT_GENERATE from "./generate.txt"
|
||||
import { SystemPrompt } from "../session/system"
|
||||
import { Instance } from "../project/instance"
|
||||
import { mergeDeep } from "remeda"
|
||||
|
||||
import PROMPT_GENERATE from "./generate.txt"
|
||||
import PROMPT_COMPACTION from "./prompt/compaction.txt"
|
||||
import PROMPT_EXPLORE from "./prompt/explore.txt"
|
||||
import PROMPT_SUMMARY from "./prompt/summary.txt"
|
||||
import PROMPT_TITLE from "./prompt/title.txt"
|
||||
|
||||
export namespace Agent {
|
||||
export const Info = z
|
||||
.object({
|
||||
name: z.string(),
|
||||
description: z.string().optional(),
|
||||
mode: z.enum(["subagent", "primary", "all"]),
|
||||
builtIn: z.boolean(),
|
||||
native: z.boolean().optional(),
|
||||
hidden: z.boolean().optional(),
|
||||
topP: z.number().optional(),
|
||||
temperature: z.number().optional(),
|
||||
color: z.string().optional(),
|
||||
@@ -112,7 +118,8 @@ export namespace Agent {
|
||||
options: {},
|
||||
permission: agentPermission,
|
||||
mode: "subagent",
|
||||
builtIn: true,
|
||||
native: true,
|
||||
hidden: true,
|
||||
},
|
||||
explore: {
|
||||
name: "explore",
|
||||
@@ -124,30 +131,23 @@ export namespace Agent {
|
||||
...defaultTools,
|
||||
},
|
||||
description: `Fast agent specialized for exploring codebases. Use this when you need to quickly find files by patterns (eg. "src/components/**/*.tsx"), search code for keywords (eg. "API endpoints"), or answer questions about the codebase (eg. "how do API endpoints work?"). When calling this agent, specify the desired thoroughness level: "quick" for basic searches, "medium" for moderate exploration, or "very thorough" for comprehensive analysis across multiple locations and naming conventions.`,
|
||||
prompt: [
|
||||
`You are a file search specialist. You excel at thoroughly navigating and exploring codebases.`,
|
||||
``,
|
||||
`Your strengths:`,
|
||||
`- Rapidly finding files using glob patterns`,
|
||||
`- Searching code and text with powerful regex patterns`,
|
||||
`- Reading and analyzing file contents`,
|
||||
``,
|
||||
`Guidelines:`,
|
||||
`- Use Glob for broad file pattern matching`,
|
||||
`- Use Grep for searching file contents with regex`,
|
||||
`- Use Read when you know the specific file path you need to read`,
|
||||
`- Use Bash for file operations like copying, moving, or listing directory contents`,
|
||||
`- Adapt your search approach based on the thoroughness level specified by the caller`,
|
||||
`- Return file paths as absolute paths in your final response`,
|
||||
`- For clear communication, avoid using emojis`,
|
||||
`- Do not create any files, or run bash commands that modify the user's system state in any way`,
|
||||
``,
|
||||
`Complete the user's search request efficiently and report your findings clearly.`,
|
||||
].join("\n"),
|
||||
prompt: PROMPT_EXPLORE,
|
||||
options: {},
|
||||
permission: agentPermission,
|
||||
mode: "subagent",
|
||||
builtIn: true,
|
||||
native: true,
|
||||
},
|
||||
compaction: {
|
||||
name: "compaction",
|
||||
mode: "primary",
|
||||
native: true,
|
||||
hidden: true,
|
||||
prompt: PROMPT_COMPACTION,
|
||||
tools: {
|
||||
"*": false,
|
||||
},
|
||||
options: {},
|
||||
permission: agentPermission,
|
||||
},
|
||||
build: {
|
||||
name: "build",
|
||||
@@ -155,7 +155,27 @@ export namespace Agent {
|
||||
options: {},
|
||||
permission: agentPermission,
|
||||
mode: "primary",
|
||||
builtIn: true,
|
||||
native: true,
|
||||
},
|
||||
title: {
|
||||
name: "title",
|
||||
mode: "primary",
|
||||
options: {},
|
||||
native: true,
|
||||
hidden: true,
|
||||
permission: agentPermission,
|
||||
prompt: PROMPT_TITLE,
|
||||
tools: {},
|
||||
},
|
||||
summary: {
|
||||
name: "summary",
|
||||
mode: "primary",
|
||||
options: {},
|
||||
native: true,
|
||||
hidden: true,
|
||||
permission: agentPermission,
|
||||
prompt: PROMPT_SUMMARY,
|
||||
tools: {},
|
||||
},
|
||||
plan: {
|
||||
name: "plan",
|
||||
@@ -165,7 +185,7 @@ export namespace Agent {
|
||||
...defaultTools,
|
||||
},
|
||||
mode: "primary",
|
||||
builtIn: true,
|
||||
native: true,
|
||||
},
|
||||
}
|
||||
for (const [key, value] of Object.entries(cfg.agent ?? {})) {
|
||||
@@ -181,7 +201,7 @@ export namespace Agent {
|
||||
permission: agentPermission,
|
||||
options: {},
|
||||
tools: {},
|
||||
builtIn: false,
|
||||
native: false,
|
||||
}
|
||||
const {
|
||||
name,
|
||||
|
||||
18
packages/opencode/src/agent/prompt/explore.txt
Normal file
18
packages/opencode/src/agent/prompt/explore.txt
Normal file
@@ -0,0 +1,18 @@
|
||||
You are a file search specialist. You excel at thoroughly navigating and exploring codebases.
|
||||
|
||||
Your strengths:
|
||||
- Rapidly finding files using glob patterns
|
||||
- Searching code and text with powerful regex patterns
|
||||
- Reading and analyzing file contents
|
||||
|
||||
Guidelines:
|
||||
- Use Glob for broad file pattern matching
|
||||
- Use Grep for searching file contents with regex
|
||||
- Use Read when you know the specific file path you need to read
|
||||
- Use Bash for file operations like copying, moving, or listing directory contents
|
||||
- Adapt your search approach based on the thoroughness level specified by the caller
|
||||
- Return file paths as absolute paths in your final response
|
||||
- For clear communication, avoid using emojis
|
||||
- Do not create any files, or run bash commands that modify the user's system state in any way
|
||||
|
||||
Complete the user's search request efficiently and report your findings clearly.
|
||||
@@ -22,8 +22,8 @@ Your output must be:
|
||||
- The title should NEVER include "summarizing" or "generating" when generating a title
|
||||
- DO NOT SAY YOU CANNOT GENERATE A TITLE OR COMPLAIN ABOUT THE INPUT
|
||||
- Always output something meaningful, even if the input is minimal.
|
||||
- If the user message is short or conversational (e.g. “hello”, “lol”, “whats up”, “hey”):
|
||||
→ create a title that reflects the user’s tone or intent (such as Greeting, Quick check-in, Light chat, Intro message, etc.)
|
||||
- If the user message is short or conversational (e.g. "hello", "lol", "whats up", "hey"):
|
||||
→ create a title that reflects the user's tone or intent (such as Greeting, Quick check-in, Light chat, Intro message, etc.)
|
||||
</rules>
|
||||
|
||||
<examples>
|
||||
@@ -227,8 +227,8 @@ const AgentListCommand = cmd({
|
||||
async fn() {
|
||||
const agents = await Agent.list()
|
||||
const sortedAgents = agents.sort((a, b) => {
|
||||
if (a.builtIn !== b.builtIn) {
|
||||
return a.builtIn ? -1 : 1
|
||||
if (a.native !== b.native) {
|
||||
return a.native ? -1 : 1
|
||||
}
|
||||
return a.name.localeCompare(b.name)
|
||||
})
|
||||
|
||||
@@ -12,7 +12,7 @@ export function DialogAgent() {
|
||||
return {
|
||||
value: item.name,
|
||||
title: item.name,
|
||||
description: item.builtIn ? "native" : item.description,
|
||||
description: item.native ? "native" : item.description,
|
||||
}
|
||||
}),
|
||||
)
|
||||
|
||||
@@ -184,7 +184,7 @@ export function Autocomplete(props: {
|
||||
const agents = createMemo(() => {
|
||||
const agents = sync.data.agent
|
||||
return agents
|
||||
.filter((agent) => !agent.builtIn && agent.mode !== "primary")
|
||||
.filter((agent) => !agent.hidden && agent.mode !== "primary")
|
||||
.map(
|
||||
(agent): AutocompleteOption => ({
|
||||
display: "@" + agent.name,
|
||||
|
||||
@@ -52,7 +52,7 @@ export const { use: useLocal, provider: LocalProvider } = createSimpleContext({
|
||||
})
|
||||
|
||||
const agent = iife(() => {
|
||||
const agents = createMemo(() => sync.data.agent.filter((x) => x.mode !== "subagent"))
|
||||
const agents = createMemo(() => sync.data.agent.filter((x) => x.mode !== "subagent" && !x.hidden))
|
||||
const [agentStore, setAgentStore] = createStore<{
|
||||
current: string
|
||||
}>({
|
||||
|
||||
@@ -858,7 +858,7 @@ export namespace Provider {
|
||||
return info
|
||||
}
|
||||
|
||||
export async function getLanguage(model: Model) {
|
||||
export async function getLanguage(model: Model): Promise<LanguageModelV2> {
|
||||
const s = await state()
|
||||
const key = `${model.providerID}/${model.id}`
|
||||
if (s.models.has(key)) return s.models.get(key)!
|
||||
|
||||
@@ -1,22 +1,18 @@
|
||||
import { BusEvent } from "@/bus/bus-event"
|
||||
import { Bus } from "@/bus"
|
||||
import { wrapLanguageModel, type ModelMessage } from "ai"
|
||||
import { Session } from "."
|
||||
import { Identifier } from "../id/id"
|
||||
import { Instance } from "../project/instance"
|
||||
import { Provider } from "../provider/provider"
|
||||
import { MessageV2 } from "./message-v2"
|
||||
import { SystemPrompt } from "./system"
|
||||
import z from "zod"
|
||||
import { SessionPrompt } from "./prompt"
|
||||
import { Flag } from "../flag/flag"
|
||||
import { Token } from "../util/token"
|
||||
import { Config } from "../config/config"
|
||||
import { Log } from "../util/log"
|
||||
import { ProviderTransform } from "@/provider/transform"
|
||||
import { SessionProcessor } from "./processor"
|
||||
import { fn } from "@/util/fn"
|
||||
import { mergeDeep, pipe } from "remeda"
|
||||
import { Agent } from "@/agent/agent"
|
||||
|
||||
export namespace SessionCompaction {
|
||||
const log = Log.create({ service: "session.compaction" })
|
||||
@@ -90,24 +86,21 @@ export namespace SessionCompaction {
|
||||
parentID: string
|
||||
messages: MessageV2.WithParts[]
|
||||
sessionID: string
|
||||
model: {
|
||||
providerID: string
|
||||
modelID: string
|
||||
}
|
||||
agent: string
|
||||
abort: AbortSignal
|
||||
auto: boolean
|
||||
}) {
|
||||
const cfg = await Config.get()
|
||||
const model = await Provider.getModel(input.model.providerID, input.model.modelID)
|
||||
const language = await Provider.getLanguage(model)
|
||||
const system = [...SystemPrompt.compaction(model.providerID)]
|
||||
const userMessage = input.messages.findLast((m) => m.info.id === input.parentID)!.info as MessageV2.User
|
||||
const agent = await Agent.get("compaction")
|
||||
const model = agent.model
|
||||
? await Provider.getModel(agent.model.providerID, agent.model.modelID)
|
||||
: await Provider.getModel(userMessage.model.providerID, userMessage.model.modelID)
|
||||
const msg = (await Session.updateMessage({
|
||||
id: Identifier.ascending("message"),
|
||||
role: "assistant",
|
||||
parentID: input.parentID,
|
||||
sessionID: input.sessionID,
|
||||
mode: input.agent,
|
||||
mode: "compaction",
|
||||
agent: "compaction",
|
||||
summary: true,
|
||||
path: {
|
||||
cwd: Instance.directory,
|
||||
@@ -120,7 +113,7 @@ export namespace SessionCompaction {
|
||||
reasoning: 0,
|
||||
cache: { read: 0, write: 0 },
|
||||
},
|
||||
modelID: input.model.modelID,
|
||||
modelID: model.id,
|
||||
providerID: model.providerID,
|
||||
time: {
|
||||
created: Date.now(),
|
||||
@@ -129,46 +122,18 @@ export namespace SessionCompaction {
|
||||
const processor = SessionProcessor.create({
|
||||
assistantMessage: msg,
|
||||
sessionID: input.sessionID,
|
||||
model: model,
|
||||
model,
|
||||
abort: input.abort,
|
||||
})
|
||||
const result = await processor.process({
|
||||
onError(error) {
|
||||
log.error("stream error", {
|
||||
error,
|
||||
})
|
||||
},
|
||||
// set to 0, we handle loop
|
||||
maxRetries: 0,
|
||||
providerOptions: ProviderTransform.providerOptions(
|
||||
model,
|
||||
pipe({}, mergeDeep(ProviderTransform.options(model, input.sessionID)), mergeDeep(model.options)),
|
||||
),
|
||||
headers: model.headers,
|
||||
abortSignal: input.abort,
|
||||
tools: model.capabilities.toolcall ? {} : undefined,
|
||||
user: userMessage,
|
||||
agent,
|
||||
abort: input.abort,
|
||||
sessionID: input.sessionID,
|
||||
tools: {},
|
||||
system: [],
|
||||
messages: [
|
||||
...system.map(
|
||||
(x): ModelMessage => ({
|
||||
role: "system",
|
||||
content: x,
|
||||
}),
|
||||
),
|
||||
...MessageV2.toModelMessage(
|
||||
input.messages.filter((m) => {
|
||||
if (m.info.role !== "assistant" || m.info.error === undefined) {
|
||||
return true
|
||||
}
|
||||
if (
|
||||
MessageV2.AbortedError.isInstance(m.info.error) &&
|
||||
m.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning")
|
||||
) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}),
|
||||
),
|
||||
...MessageV2.toModelMessage(input.messages),
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
@@ -179,28 +144,9 @@ export namespace SessionCompaction {
|
||||
],
|
||||
},
|
||||
],
|
||||
model: wrapLanguageModel({
|
||||
model: language,
|
||||
middleware: [
|
||||
{
|
||||
async transformParams(args) {
|
||||
if (args.type === "stream") {
|
||||
// @ts-expect-error
|
||||
args.params.prompt = ProviderTransform.message(args.params.prompt, model)
|
||||
}
|
||||
return args.params
|
||||
},
|
||||
},
|
||||
],
|
||||
}),
|
||||
experimental_telemetry: {
|
||||
isEnabled: cfg.experimental?.openTelemetry,
|
||||
metadata: {
|
||||
userId: cfg.username ?? "unknown",
|
||||
sessionId: input.sessionID,
|
||||
},
|
||||
},
|
||||
model,
|
||||
})
|
||||
|
||||
if (result === "continue" && input.auto) {
|
||||
const continueMsg = await Session.updateMessage({
|
||||
id: Identifier.ascending("message"),
|
||||
@@ -209,8 +155,8 @@ export namespace SessionCompaction {
|
||||
time: {
|
||||
created: Date.now(),
|
||||
},
|
||||
agent: input.agent,
|
||||
model: input.model,
|
||||
agent: userMessage.agent,
|
||||
model: userMessage.model,
|
||||
})
|
||||
await Session.updatePart({
|
||||
id: Identifier.ascending("part"),
|
||||
|
||||
184
packages/opencode/src/session/llm.ts
Normal file
184
packages/opencode/src/session/llm.ts
Normal file
@@ -0,0 +1,184 @@
|
||||
import { Provider } from "@/provider/provider"
|
||||
import { Log } from "@/util/log"
|
||||
import { streamText, wrapLanguageModel, type ModelMessage, type StreamTextResult, type Tool, type ToolSet } from "ai"
|
||||
import { mergeDeep, pipe } from "remeda"
|
||||
import { ProviderTransform } from "@/provider/transform"
|
||||
import { Config } from "@/config/config"
|
||||
import { Instance } from "@/project/instance"
|
||||
import type { Agent } from "@/agent/agent"
|
||||
import type { MessageV2 } from "./message-v2"
|
||||
import { Plugin } from "@/plugin"
|
||||
import { SystemPrompt } from "./system"
|
||||
import { ToolRegistry } from "@/tool/registry"
|
||||
import { Flag } from "@/flag/flag"
|
||||
|
||||
export namespace LLM {
|
||||
const log = Log.create({ service: "llm" })
|
||||
|
||||
export const OUTPUT_TOKEN_MAX = 32_000
|
||||
|
||||
export type StreamInput = {
|
||||
user: MessageV2.User
|
||||
sessionID: string
|
||||
model: Provider.Model
|
||||
agent: Agent.Info
|
||||
system: string[]
|
||||
abort: AbortSignal
|
||||
messages: ModelMessage[]
|
||||
small?: boolean
|
||||
tools: Record<string, Tool>
|
||||
retries?: number
|
||||
}
|
||||
|
||||
export type StreamOutput = StreamTextResult<ToolSet, unknown>
|
||||
|
||||
export async function stream(input: StreamInput) {
|
||||
const l = log
|
||||
.clone()
|
||||
.tag("providerID", input.model.providerID)
|
||||
.tag("modelID", input.model.id)
|
||||
.tag("sessionID", input.sessionID)
|
||||
.tag("small", (input.small ?? false).toString())
|
||||
.tag("agent", input.agent.name)
|
||||
l.info("stream", {
|
||||
modelID: input.model.id,
|
||||
providerID: input.model.providerID,
|
||||
})
|
||||
const [language, cfg] = await Promise.all([Provider.getLanguage(input.model), Config.get()])
|
||||
|
||||
const system = SystemPrompt.header(input.model.providerID)
|
||||
system.push(
|
||||
[
|
||||
// use agent prompt otherwise provider prompt
|
||||
...(input.agent.prompt ? [input.agent.prompt] : SystemPrompt.provider(input.model)),
|
||||
// any custom prompt passed into this call
|
||||
...input.system,
|
||||
// any custom prompt from last user message
|
||||
...(input.user.system ? [input.user.system] : []),
|
||||
]
|
||||
.filter((x) => x)
|
||||
.join("\n"),
|
||||
)
|
||||
|
||||
const params = await Plugin.trigger(
|
||||
"chat.params",
|
||||
{
|
||||
sessionID: input.sessionID,
|
||||
agent: input.agent,
|
||||
model: input.model,
|
||||
provider: Provider.getProvider(input.model.providerID),
|
||||
message: input.user,
|
||||
},
|
||||
{
|
||||
temperature: input.model.capabilities.temperature
|
||||
? (input.agent.temperature ?? ProviderTransform.temperature(input.model))
|
||||
: undefined,
|
||||
topP: input.agent.topP ?? ProviderTransform.topP(input.model),
|
||||
options: pipe(
|
||||
{},
|
||||
mergeDeep(ProviderTransform.options(input.model, input.sessionID)),
|
||||
input.small ? mergeDeep(ProviderTransform.smallOptions(input.model)) : mergeDeep({}),
|
||||
mergeDeep(input.model.options),
|
||||
mergeDeep(input.agent.options),
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
l.info("params", {
|
||||
params,
|
||||
})
|
||||
|
||||
const maxOutputTokens = ProviderTransform.maxOutputTokens(
|
||||
input.model.api.npm,
|
||||
params.options,
|
||||
input.model.limit.output,
|
||||
OUTPUT_TOKEN_MAX,
|
||||
)
|
||||
|
||||
const tools = await resolveTools(input)
|
||||
|
||||
return streamText({
|
||||
onError(error) {
|
||||
l.error("stream error", {
|
||||
error,
|
||||
})
|
||||
},
|
||||
async experimental_repairToolCall(failed) {
|
||||
const lower = failed.toolCall.toolName.toLowerCase()
|
||||
if (lower !== failed.toolCall.toolName && tools[lower]) {
|
||||
l.info("repairing tool call", {
|
||||
tool: failed.toolCall.toolName,
|
||||
repaired: lower,
|
||||
})
|
||||
return {
|
||||
...failed.toolCall,
|
||||
toolName: lower,
|
||||
}
|
||||
}
|
||||
return {
|
||||
...failed.toolCall,
|
||||
input: JSON.stringify({
|
||||
tool: failed.toolCall.toolName,
|
||||
error: failed.error.message,
|
||||
}),
|
||||
toolName: "invalid",
|
||||
}
|
||||
},
|
||||
temperature: params.temperature,
|
||||
topP: params.topP,
|
||||
providerOptions: ProviderTransform.providerOptions(input.model, params.options),
|
||||
activeTools: Object.keys(tools).filter((x) => x !== "invalid"),
|
||||
tools,
|
||||
maxOutputTokens,
|
||||
abortSignal: input.abort,
|
||||
headers: {
|
||||
...(input.model.providerID.startsWith("opencode")
|
||||
? {
|
||||
"x-opencode-project": Instance.project.id,
|
||||
"x-opencode-session": input.sessionID,
|
||||
"x-opencode-request": input.user.id,
|
||||
"x-opencode-client": Flag.OPENCODE_CLIENT,
|
||||
}
|
||||
: undefined),
|
||||
...input.model.headers,
|
||||
},
|
||||
maxRetries: input.retries ?? 0,
|
||||
messages: [
|
||||
...system.map(
|
||||
(x): ModelMessage => ({
|
||||
role: "system",
|
||||
content: x,
|
||||
}),
|
||||
),
|
||||
...input.messages,
|
||||
],
|
||||
model: wrapLanguageModel({
|
||||
model: language,
|
||||
middleware: [
|
||||
{
|
||||
async transformParams(args) {
|
||||
if (args.type === "stream") {
|
||||
// @ts-expect-error
|
||||
args.params.prompt = ProviderTransform.message(args.params.prompt, input.model)
|
||||
}
|
||||
return args.params
|
||||
},
|
||||
},
|
||||
],
|
||||
}),
|
||||
experimental_telemetry: { isEnabled: cfg.experimental?.openTelemetry },
|
||||
})
|
||||
}
|
||||
|
||||
async function resolveTools(input: Pick<StreamInput, "tools" | "agent" | "user">) {
|
||||
const enabled = pipe(
|
||||
input.agent.tools,
|
||||
mergeDeep(await ToolRegistry.enabled(input.agent)),
|
||||
mergeDeep(input.user.tools ?? {}),
|
||||
)
|
||||
for (const [key, value] of Object.entries(enabled)) {
|
||||
if (value === false) delete input.tools[key]
|
||||
}
|
||||
return input.tools
|
||||
}
|
||||
}
|
||||
@@ -348,7 +348,11 @@ export namespace MessageV2 {
|
||||
parentID: z.string(),
|
||||
modelID: z.string(),
|
||||
providerID: z.string(),
|
||||
/**
|
||||
* @deprecated
|
||||
*/
|
||||
mode: z.string(),
|
||||
agent: z.string(),
|
||||
path: z.object({
|
||||
cwd: z.string(),
|
||||
root: z.string(),
|
||||
@@ -412,12 +416,7 @@ export namespace MessageV2 {
|
||||
})
|
||||
export type WithParts = z.infer<typeof WithParts>
|
||||
|
||||
export function toModelMessage(
|
||||
input: {
|
||||
info: Info
|
||||
parts: Part[]
|
||||
}[],
|
||||
): ModelMessage[] {
|
||||
export function toModelMessage(input: WithParts[]): ModelMessage[] {
|
||||
const result: UIMessage[] = []
|
||||
|
||||
for (const msg of input) {
|
||||
@@ -461,6 +460,15 @@ export namespace MessageV2 {
|
||||
}
|
||||
|
||||
if (msg.info.role === "assistant") {
|
||||
if (
|
||||
msg.info.error &&
|
||||
!(
|
||||
MessageV2.AbortedError.isInstance(msg.info.error) &&
|
||||
msg.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning")
|
||||
)
|
||||
) {
|
||||
continue
|
||||
}
|
||||
const assistantMessage: UIMessage = {
|
||||
id: msg.info.id,
|
||||
role: "assistant",
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { MessageV2 } from "./message-v2"
|
||||
import { streamText } from "ai"
|
||||
import { Log } from "@/util/log"
|
||||
import { Identifier } from "@/id/id"
|
||||
import { Session } from "."
|
||||
@@ -12,6 +11,7 @@ import { SessionRetry } from "./retry"
|
||||
import { SessionStatus } from "./status"
|
||||
import { Plugin } from "@/plugin"
|
||||
import type { Provider } from "@/provider/provider"
|
||||
import { LLM } from "./llm"
|
||||
import { Config } from "@/config/config"
|
||||
|
||||
export namespace SessionProcessor {
|
||||
@@ -21,15 +21,6 @@ export namespace SessionProcessor {
|
||||
export type Info = Awaited<ReturnType<typeof create>>
|
||||
export type Result = Awaited<ReturnType<Info["process"]>>
|
||||
|
||||
export type StreamInput = Parameters<typeof streamText>[0]
|
||||
|
||||
export type TBD = {
|
||||
model: {
|
||||
modelID: string
|
||||
providerID: string
|
||||
}
|
||||
}
|
||||
|
||||
export function create(input: {
|
||||
assistantMessage: MessageV2.Assistant
|
||||
sessionID: string
|
||||
@@ -48,14 +39,14 @@ export namespace SessionProcessor {
|
||||
partFromToolCall(toolCallID: string) {
|
||||
return toolcalls[toolCallID]
|
||||
},
|
||||
async process(streamInput: StreamInput) {
|
||||
async process(streamInput: LLM.StreamInput) {
|
||||
log.info("process")
|
||||
const shouldBreak = (await Config.get()).experimental?.continue_loop_on_deny !== true
|
||||
while (true) {
|
||||
try {
|
||||
let currentText: MessageV2.TextPart | undefined
|
||||
let reasoningMap: Record<string, MessageV2.ReasoningPart> = {}
|
||||
const stream = streamText(streamInput)
|
||||
const stream = await LLM.stream(streamInput)
|
||||
|
||||
for await (const value of stream.fullStream) {
|
||||
input.abort.throwIfAborted()
|
||||
|
||||
@@ -5,32 +5,22 @@ import z from "zod"
|
||||
import { Identifier } from "../id/id"
|
||||
import { MessageV2 } from "./message-v2"
|
||||
import { Log } from "../util/log"
|
||||
import { Flag } from "../flag/flag"
|
||||
import { SessionRevert } from "./revert"
|
||||
import { Session } from "."
|
||||
import { Agent } from "../agent/agent"
|
||||
import { Provider } from "../provider/provider"
|
||||
import {
|
||||
generateText,
|
||||
type ModelMessage,
|
||||
type Tool as AITool,
|
||||
tool,
|
||||
wrapLanguageModel,
|
||||
stepCountIs,
|
||||
jsonSchema,
|
||||
} from "ai"
|
||||
import { type Tool as AITool, tool, jsonSchema } from "ai"
|
||||
import { SessionCompaction } from "./compaction"
|
||||
import { Instance } from "../project/instance"
|
||||
import { Bus } from "../bus"
|
||||
import { ProviderTransform } from "../provider/transform"
|
||||
import { SystemPrompt } from "./system"
|
||||
import { Plugin } from "../plugin"
|
||||
|
||||
import PROMPT_PLAN from "../session/prompt/plan.txt"
|
||||
import BUILD_SWITCH from "../session/prompt/build-switch.txt"
|
||||
import MAX_STEPS from "../session/prompt/max-steps.txt"
|
||||
import { defer } from "../util/defer"
|
||||
import { clone, mergeDeep, pipe } from "remeda"
|
||||
import { mergeDeep, pipe } from "remeda"
|
||||
import { ToolRegistry } from "../tool/registry"
|
||||
import { Wildcard } from "../util/wildcard"
|
||||
import { MCP } from "../mcp"
|
||||
@@ -44,12 +34,13 @@ import { Command } from "../command"
|
||||
import { $, fileURLToPath } from "bun"
|
||||
import { ConfigMarkdown } from "../config/markdown"
|
||||
import { SessionSummary } from "./summary"
|
||||
import { Config } from "../config/config"
|
||||
import { NamedError } from "@opencode-ai/util/error"
|
||||
import { fn } from "@/util/fn"
|
||||
import { SessionProcessor } from "./processor"
|
||||
import { TaskTool } from "@/tool/task"
|
||||
import { SessionStatus } from "./status"
|
||||
import { LLM } from "./llm"
|
||||
import { iife } from "@/util/iife"
|
||||
import { Shell } from "@/shell/shell"
|
||||
|
||||
// @ts-ignore
|
||||
@@ -96,8 +87,8 @@ export namespace SessionPrompt {
|
||||
.optional(),
|
||||
agent: z.string().optional(),
|
||||
noReply: z.boolean().optional(),
|
||||
system: z.string().optional(),
|
||||
tools: z.record(z.string(), z.boolean()).optional(),
|
||||
system: z.string().optional(),
|
||||
parts: z.array(
|
||||
z.discriminatedUnion("type", [
|
||||
MessageV2.TextPart.omit({
|
||||
@@ -145,6 +136,20 @@ export namespace SessionPrompt {
|
||||
})
|
||||
export type PromptInput = z.infer<typeof PromptInput>
|
||||
|
||||
export const prompt = fn(PromptInput, async (input) => {
|
||||
const session = await Session.get(input.sessionID)
|
||||
await SessionRevert.cleanup(session)
|
||||
|
||||
const message = await createUserMessage(input)
|
||||
await Session.touch(input.sessionID)
|
||||
|
||||
if (input.noReply === true) {
|
||||
return message
|
||||
}
|
||||
|
||||
return loop(input.sessionID)
|
||||
})
|
||||
|
||||
export async function resolvePromptParts(template: string): Promise<PromptInput["parts"]> {
|
||||
const parts: PromptInput["parts"] = [
|
||||
{
|
||||
@@ -196,20 +201,6 @@ export namespace SessionPrompt {
|
||||
return parts
|
||||
}
|
||||
|
||||
export const prompt = fn(PromptInput, async (input) => {
|
||||
const session = await Session.get(input.sessionID)
|
||||
await SessionRevert.cleanup(session)
|
||||
|
||||
const message = await createUserMessage(input)
|
||||
await Session.touch(input.sessionID)
|
||||
|
||||
if (input.noReply === true) {
|
||||
return message
|
||||
}
|
||||
|
||||
return loop(input.sessionID)
|
||||
})
|
||||
|
||||
function start(sessionID: string) {
|
||||
const s = state()
|
||||
if (s[sessionID]) return
|
||||
@@ -291,7 +282,6 @@ export namespace SessionPrompt {
|
||||
})
|
||||
|
||||
const model = await Provider.getModel(lastUser.model.providerID, lastUser.model.modelID)
|
||||
const language = await Provider.getLanguage(model)
|
||||
const task = tasks.pop()
|
||||
|
||||
// pending subtask
|
||||
@@ -304,6 +294,7 @@ export namespace SessionPrompt {
|
||||
parentID: lastUser.id,
|
||||
sessionID,
|
||||
mode: task.agent,
|
||||
agent: task.agent,
|
||||
path: {
|
||||
cwd: Instance.directory,
|
||||
root: Instance.worktree,
|
||||
@@ -414,11 +405,6 @@ export namespace SessionPrompt {
|
||||
messages: msgs,
|
||||
parentID: lastUser.id,
|
||||
abort,
|
||||
agent: lastUser.agent,
|
||||
model: {
|
||||
providerID: model.providerID,
|
||||
modelID: model.id,
|
||||
},
|
||||
sessionID,
|
||||
auto: task.auto,
|
||||
})
|
||||
@@ -442,7 +428,6 @@ export namespace SessionPrompt {
|
||||
}
|
||||
|
||||
// normal processing
|
||||
const cfg = await Config.get()
|
||||
const agent = await Agent.get(lastUser.agent)
|
||||
const maxSteps = agent.maxSteps ?? Infinity
|
||||
const isLastStep = step >= maxSteps
|
||||
@@ -450,12 +435,14 @@ export namespace SessionPrompt {
|
||||
messages: msgs,
|
||||
agent,
|
||||
})
|
||||
|
||||
const processor = SessionProcessor.create({
|
||||
assistantMessage: (await Session.updateMessage({
|
||||
id: Identifier.ascending("message"),
|
||||
parentID: lastUser.id,
|
||||
role: "assistant",
|
||||
mode: agent.name,
|
||||
agent: agent.name,
|
||||
path: {
|
||||
cwd: Instance.directory,
|
||||
root: Instance.worktree,
|
||||
@@ -478,12 +465,6 @@ export namespace SessionPrompt {
|
||||
model,
|
||||
abort,
|
||||
})
|
||||
const system = await resolveSystemPrompt({
|
||||
model,
|
||||
agent,
|
||||
system: lastUser.system,
|
||||
isLastStep,
|
||||
})
|
||||
const tools = await resolveTools({
|
||||
agent,
|
||||
sessionID,
|
||||
@@ -491,30 +472,6 @@ export namespace SessionPrompt {
|
||||
tools: lastUser.tools,
|
||||
processor,
|
||||
})
|
||||
const provider = await Provider.getProvider(model.providerID)
|
||||
const params = await Plugin.trigger(
|
||||
"chat.params",
|
||||
{
|
||||
sessionID: sessionID,
|
||||
agent: lastUser.agent,
|
||||
model: model,
|
||||
provider,
|
||||
message: lastUser,
|
||||
},
|
||||
{
|
||||
temperature: model.capabilities.temperature
|
||||
? (agent.temperature ?? ProviderTransform.temperature(model))
|
||||
: undefined,
|
||||
topP: agent.topP ?? ProviderTransform.topP(model),
|
||||
topK: ProviderTransform.topK(model),
|
||||
options: pipe(
|
||||
{},
|
||||
mergeDeep(ProviderTransform.options(model, sessionID, provider?.options)),
|
||||
mergeDeep(model.options),
|
||||
mergeDeep(agent.options),
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
if (step === 1) {
|
||||
SessionSummary.summarize({
|
||||
@@ -523,135 +480,25 @@ export namespace SessionPrompt {
|
||||
})
|
||||
}
|
||||
|
||||
// Deep copy message history so that modifications made by plugins do not
|
||||
// affect the original messages
|
||||
const sessionMessages = clone(
|
||||
msgs.filter((m) => {
|
||||
if (m.info.role !== "assistant" || m.info.error === undefined) {
|
||||
return true
|
||||
}
|
||||
if (
|
||||
MessageV2.AbortedError.isInstance(m.info.error) &&
|
||||
m.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning")
|
||||
) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}),
|
||||
)
|
||||
|
||||
await Plugin.trigger("experimental.chat.messages.transform", {}, { messages: sessionMessages })
|
||||
|
||||
const messages: ModelMessage[] = [
|
||||
...system.map(
|
||||
(x): ModelMessage => ({
|
||||
role: "system",
|
||||
content: x,
|
||||
}),
|
||||
),
|
||||
...MessageV2.toModelMessage(sessionMessages),
|
||||
...(isLastStep
|
||||
? [
|
||||
{
|
||||
role: "assistant" as const,
|
||||
content: MAX_STEPS,
|
||||
},
|
||||
]
|
||||
: []),
|
||||
]
|
||||
|
||||
const result = await processor.process({
|
||||
onError(error) {
|
||||
log.error("stream error", {
|
||||
error,
|
||||
})
|
||||
},
|
||||
async experimental_repairToolCall(input) {
|
||||
const lower = input.toolCall.toolName.toLowerCase()
|
||||
if (lower !== input.toolCall.toolName && tools[lower]) {
|
||||
log.info("repairing tool call", {
|
||||
tool: input.toolCall.toolName,
|
||||
repaired: lower,
|
||||
})
|
||||
return {
|
||||
...input.toolCall,
|
||||
toolName: lower,
|
||||
}
|
||||
}
|
||||
return {
|
||||
...input.toolCall,
|
||||
input: JSON.stringify({
|
||||
tool: input.toolCall.toolName,
|
||||
error: input.error.message,
|
||||
}),
|
||||
toolName: "invalid",
|
||||
}
|
||||
},
|
||||
headers: {
|
||||
...(model.providerID.startsWith("opencode")
|
||||
? {
|
||||
"x-opencode-project": Instance.project.id,
|
||||
"x-opencode-session": sessionID,
|
||||
"x-opencode-request": lastUser.id,
|
||||
"x-opencode-client": Flag.OPENCODE_CLIENT,
|
||||
}
|
||||
: undefined),
|
||||
...model.headers,
|
||||
},
|
||||
// set to 0, we handle loop
|
||||
maxRetries: 0,
|
||||
activeTools: Object.keys(tools).filter((x) => x !== "invalid"),
|
||||
maxOutputTokens: ProviderTransform.maxOutputTokens(
|
||||
model.api.npm,
|
||||
params.options,
|
||||
model.limit.output,
|
||||
OUTPUT_TOKEN_MAX,
|
||||
),
|
||||
abortSignal: abort,
|
||||
providerOptions: ProviderTransform.providerOptions(model, params.options),
|
||||
stopWhen: stepCountIs(1),
|
||||
temperature: params.temperature,
|
||||
topP: params.topP,
|
||||
topK: params.topK,
|
||||
toolChoice: isLastStep ? "none" : undefined,
|
||||
messages,
|
||||
tools: model.capabilities.toolcall === false ? undefined : tools,
|
||||
model: wrapLanguageModel({
|
||||
model: language,
|
||||
middleware: [
|
||||
{
|
||||
async transformParams(args) {
|
||||
if (args.type === "stream") {
|
||||
// @ts-expect-error - prompt types are compatible at runtime
|
||||
args.params.prompt = ProviderTransform.message(args.params.prompt, model)
|
||||
}
|
||||
// Transform tool schemas for provider compatibility
|
||||
if (args.params.tools && Array.isArray(args.params.tools)) {
|
||||
args.params.tools = args.params.tools.map((tool: any) => {
|
||||
// Tools at middleware level have inputSchema, not parameters
|
||||
if (tool.inputSchema && typeof tool.inputSchema === "object") {
|
||||
// Transform the inputSchema for provider compatibility
|
||||
return {
|
||||
...tool,
|
||||
inputSchema: ProviderTransform.schema(model, tool.inputSchema),
|
||||
}
|
||||
}
|
||||
// If no inputSchema, return tool unchanged
|
||||
return tool
|
||||
})
|
||||
}
|
||||
return args.params
|
||||
},
|
||||
},
|
||||
],
|
||||
}),
|
||||
experimental_telemetry: {
|
||||
isEnabled: cfg.experimental?.openTelemetry,
|
||||
metadata: {
|
||||
userId: cfg.username ?? "unknown",
|
||||
sessionId: sessionID,
|
||||
},
|
||||
},
|
||||
user: lastUser,
|
||||
agent,
|
||||
abort,
|
||||
sessionID,
|
||||
system: [...(await SystemPrompt.environment()), ...(await SystemPrompt.custom())],
|
||||
messages: [
|
||||
...MessageV2.toModelMessage(msgs),
|
||||
...(isLastStep
|
||||
? [
|
||||
{
|
||||
role: "assistant" as const,
|
||||
content: MAX_STEPS,
|
||||
},
|
||||
]
|
||||
: []),
|
||||
],
|
||||
tools,
|
||||
model,
|
||||
})
|
||||
if (result === "stop") break
|
||||
continue
|
||||
@@ -675,33 +522,6 @@ export namespace SessionPrompt {
|
||||
return Provider.defaultModel()
|
||||
}
|
||||
|
||||
async function resolveSystemPrompt(input: {
|
||||
system?: string
|
||||
agent: Agent.Info
|
||||
model: Provider.Model
|
||||
isLastStep?: boolean
|
||||
}) {
|
||||
let system = SystemPrompt.header(input.model.providerID)
|
||||
system.push(
|
||||
...(() => {
|
||||
if (input.system) return [input.system]
|
||||
if (input.agent.prompt) return [input.agent.prompt]
|
||||
return SystemPrompt.provider(input.model)
|
||||
})(),
|
||||
)
|
||||
system.push(...(await SystemPrompt.environment()))
|
||||
system.push(...(await SystemPrompt.custom()))
|
||||
|
||||
if (input.isLastStep) {
|
||||
system.push(MAX_STEPS)
|
||||
}
|
||||
|
||||
// max 2 system prompt messages for caching purposes
|
||||
const [first, ...rest] = system
|
||||
system = [first, rest.join("\n")]
|
||||
return system
|
||||
}
|
||||
|
||||
async function resolveTools(input: {
|
||||
agent: Agent.Info
|
||||
model: Provider.Model
|
||||
@@ -709,6 +529,7 @@ export namespace SessionPrompt {
|
||||
tools?: Record<string, boolean>
|
||||
processor: SessionProcessor.Info
|
||||
}) {
|
||||
using _ = log.time("resolveTools")
|
||||
const tools: Record<string, AITool> = {}
|
||||
const enabledTools = pipe(
|
||||
input.agent.tools,
|
||||
@@ -778,7 +599,6 @@ export namespace SessionPrompt {
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
for (const [key, item] of Object.entries(await MCP.tools())) {
|
||||
if (Wildcard.all(key, enabledTools) === false) continue
|
||||
const execute = item.execute
|
||||
@@ -857,7 +677,6 @@ export namespace SessionPrompt {
|
||||
created: Date.now(),
|
||||
},
|
||||
tools: input.tools,
|
||||
system: input.system,
|
||||
agent: agent.name,
|
||||
model: input.model ?? agent.model ?? (await lastModel(input.sessionID)),
|
||||
}
|
||||
@@ -1148,7 +967,7 @@ export namespace SessionPrompt {
|
||||
synthetic: true,
|
||||
})
|
||||
}
|
||||
const wasPlan = input.messages.some((msg) => msg.info.role === "assistant" && msg.info.mode === "plan")
|
||||
const wasPlan = input.messages.some((msg) => msg.info.role === "assistant" && msg.info.agent === "plan")
|
||||
if (wasPlan && input.agent.name === "build") {
|
||||
userMessage.parts.push({
|
||||
id: Identifier.ascending("part"),
|
||||
@@ -1216,6 +1035,7 @@ export namespace SessionPrompt {
|
||||
sessionID: input.sessionID,
|
||||
parentID: userMsg.id,
|
||||
mode: input.agent,
|
||||
agent: input.agent,
|
||||
cost: 0,
|
||||
path: {
|
||||
cwd: Instance.directory,
|
||||
@@ -1510,28 +1330,24 @@ export namespace SessionPrompt {
|
||||
input.history.filter((m) => m.info.role === "user" && !m.parts.every((p) => "synthetic" in p && p.synthetic))
|
||||
.length === 1
|
||||
if (!isFirst) return
|
||||
const cfg = await Config.get()
|
||||
const small =
|
||||
(await Provider.getSmallModel(input.providerID)) ?? (await Provider.getModel(input.providerID, input.modelID))
|
||||
const language = await Provider.getLanguage(small)
|
||||
const provider = await Provider.getProvider(small.providerID)
|
||||
const options = pipe(
|
||||
{},
|
||||
mergeDeep(ProviderTransform.options(small, input.session.id, provider?.options)),
|
||||
mergeDeep(ProviderTransform.smallOptions(small)),
|
||||
mergeDeep(small.options),
|
||||
)
|
||||
await generateText({
|
||||
// use higher # for reasoning models since reasoning tokens eat up a lot of the budget
|
||||
maxOutputTokens: small.capabilities.reasoning ? 3000 : 20,
|
||||
providerOptions: ProviderTransform.providerOptions(small, options),
|
||||
const agent = await Agent.get("title")
|
||||
if (!agent) return
|
||||
const result = await LLM.stream({
|
||||
agent,
|
||||
user: input.message.info as MessageV2.User,
|
||||
system: [],
|
||||
small: true,
|
||||
tools: {},
|
||||
model: await iife(async () => {
|
||||
if (agent.model) return await Provider.getModel(agent.model.providerID, agent.model.modelID)
|
||||
return (
|
||||
(await Provider.getSmallModel(input.providerID)) ?? (await Provider.getModel(input.providerID, input.modelID))
|
||||
)
|
||||
}),
|
||||
abort: new AbortController().signal,
|
||||
sessionID: input.session.id,
|
||||
retries: 2,
|
||||
messages: [
|
||||
...SystemPrompt.title(small.providerID).map(
|
||||
(x): ModelMessage => ({
|
||||
role: "system",
|
||||
content: x,
|
||||
}),
|
||||
),
|
||||
{
|
||||
role: "user",
|
||||
content: "Generate a title for this conversation:\n",
|
||||
@@ -1555,32 +1371,19 @@ export namespace SessionPrompt {
|
||||
},
|
||||
]),
|
||||
],
|
||||
headers: small.headers,
|
||||
model: language,
|
||||
experimental_telemetry: {
|
||||
isEnabled: cfg.experimental?.openTelemetry,
|
||||
metadata: {
|
||||
userId: cfg.username ?? "unknown",
|
||||
sessionId: input.session.id,
|
||||
},
|
||||
},
|
||||
})
|
||||
.then((result) => {
|
||||
if (result.text)
|
||||
return Session.update(input.session.id, (draft) => {
|
||||
const cleaned = result.text
|
||||
.replace(/<think>[\s\S]*?<\/think>\s*/g, "")
|
||||
.split("\n")
|
||||
.map((line) => line.trim())
|
||||
.find((line) => line.length > 0)
|
||||
if (!cleaned) return
|
||||
const text = await result.text.catch((err) => log.error("failed to generate title", { error: err }))
|
||||
if (text)
|
||||
return Session.update(input.session.id, (draft) => {
|
||||
const cleaned = text
|
||||
.replace(/<think>[\s\S]*?<\/think>\s*/g, "")
|
||||
.split("\n")
|
||||
.map((line) => line.trim())
|
||||
.find((line) => line.length > 0)
|
||||
if (!cleaned) return
|
||||
|
||||
const title = cleaned.length > 100 ? cleaned.substring(0, 97) + "..." : cleaned
|
||||
draft.title = title
|
||||
})
|
||||
})
|
||||
.catch((error) => {
|
||||
log.error("failed to generate title", { error, model: small.id })
|
||||
const title = cleaned.length > 100 ? cleaned.substring(0, 97) + "..." : cleaned
|
||||
draft.title = title
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +1,21 @@
|
||||
import { Provider } from "@/provider/provider"
|
||||
import { Config } from "@/config/config"
|
||||
|
||||
import { fn } from "@/util/fn"
|
||||
import z from "zod"
|
||||
import { Session } from "."
|
||||
import { generateText, type ModelMessage } from "ai"
|
||||
|
||||
import { MessageV2 } from "./message-v2"
|
||||
import { Identifier } from "@/id/id"
|
||||
import { Snapshot } from "@/snapshot"
|
||||
import { ProviderTransform } from "@/provider/transform"
|
||||
import { SystemPrompt } from "./system"
|
||||
|
||||
import { Log } from "@/util/log"
|
||||
import path from "path"
|
||||
import { Instance } from "@/project/instance"
|
||||
import { Storage } from "@/storage/storage"
|
||||
import { Bus } from "@/bus"
|
||||
import { mergeDeep, pipe } from "remeda"
|
||||
|
||||
import { LLM } from "./llm"
|
||||
import { Agent } from "@/agent/agent"
|
||||
|
||||
export namespace SessionSummary {
|
||||
const log = Log.create({ service: "session.summary" })
|
||||
@@ -61,7 +62,6 @@ export namespace SessionSummary {
|
||||
}
|
||||
|
||||
async function summarizeMessage(input: { messageID: string; messages: MessageV2.WithParts[] }) {
|
||||
const cfg = await Config.get()
|
||||
const messages = input.messages.filter(
|
||||
(m) => m.info.id === input.messageID || (m.info.role === "assistant" && m.info.parentID === input.messageID),
|
||||
)
|
||||
@@ -78,27 +78,17 @@ export namespace SessionSummary {
|
||||
const small =
|
||||
(await Provider.getSmallModel(assistantMsg.providerID)) ??
|
||||
(await Provider.getModel(assistantMsg.providerID, assistantMsg.modelID))
|
||||
const language = await Provider.getLanguage(small)
|
||||
|
||||
const options = pipe(
|
||||
{},
|
||||
mergeDeep(ProviderTransform.options(small, assistantMsg.sessionID)),
|
||||
mergeDeep(ProviderTransform.smallOptions(small)),
|
||||
mergeDeep(small.options),
|
||||
)
|
||||
|
||||
const textPart = msgWithParts.parts.find((p) => p.type === "text" && !p.synthetic) as MessageV2.TextPart
|
||||
if (textPart && !userMsg.summary?.title) {
|
||||
const result = await generateText({
|
||||
maxOutputTokens: small.capabilities.reasoning ? 1500 : 20,
|
||||
providerOptions: ProviderTransform.providerOptions(small, options),
|
||||
const agent = await Agent.get("title")
|
||||
const stream = await LLM.stream({
|
||||
agent,
|
||||
user: userMsg,
|
||||
tools: {},
|
||||
model: agent.model ? await Provider.getModel(agent.model.providerID, agent.model.modelID) : small,
|
||||
small: true,
|
||||
messages: [
|
||||
...SystemPrompt.title(small.providerID).map(
|
||||
(x): ModelMessage => ({
|
||||
role: "system",
|
||||
content: x,
|
||||
}),
|
||||
),
|
||||
{
|
||||
role: "user" as const,
|
||||
content: `
|
||||
@@ -109,18 +99,14 @@ export namespace SessionSummary {
|
||||
`,
|
||||
},
|
||||
],
|
||||
headers: small.headers,
|
||||
model: language,
|
||||
experimental_telemetry: {
|
||||
isEnabled: cfg.experimental?.openTelemetry,
|
||||
metadata: {
|
||||
userId: cfg.username ?? "unknown",
|
||||
sessionId: assistantMsg.sessionID,
|
||||
},
|
||||
},
|
||||
abort: new AbortController().signal,
|
||||
sessionID: userMsg.sessionID,
|
||||
system: [],
|
||||
retries: 3,
|
||||
})
|
||||
log.info("title", { title: result.text })
|
||||
userMsg.summary.title = result.text
|
||||
const result = await stream.text
|
||||
log.info("title", { title: result })
|
||||
userMsg.summary.title = result
|
||||
await Session.updateMessage(userMsg)
|
||||
}
|
||||
|
||||
@@ -138,34 +124,30 @@ export namespace SessionSummary {
|
||||
}
|
||||
}
|
||||
}
|
||||
const result = await generateText({
|
||||
model: language,
|
||||
maxOutputTokens: 100,
|
||||
providerOptions: ProviderTransform.providerOptions(small, options),
|
||||
const summaryAgent = await Agent.get("summary")
|
||||
const stream = await LLM.stream({
|
||||
agent: summaryAgent,
|
||||
user: userMsg,
|
||||
tools: {},
|
||||
model: summaryAgent.model
|
||||
? await Provider.getModel(summaryAgent.model.providerID, summaryAgent.model.modelID)
|
||||
: small,
|
||||
small: true,
|
||||
messages: [
|
||||
...SystemPrompt.summarize(small.providerID).map(
|
||||
(x): ModelMessage => ({
|
||||
role: "system",
|
||||
content: x,
|
||||
}),
|
||||
),
|
||||
...MessageV2.toModelMessage(messages),
|
||||
{
|
||||
role: "user",
|
||||
role: "user" as const,
|
||||
content: `Summarize the above conversation according to your system prompts.`,
|
||||
},
|
||||
],
|
||||
headers: small.headers,
|
||||
experimental_telemetry: {
|
||||
isEnabled: cfg.experimental?.openTelemetry,
|
||||
metadata: {
|
||||
userId: cfg.username ?? "unknown",
|
||||
sessionId: assistantMsg.sessionID,
|
||||
},
|
||||
},
|
||||
}).catch(() => {})
|
||||
abort: new AbortController().signal,
|
||||
sessionID: userMsg.sessionID,
|
||||
system: [],
|
||||
retries: 3,
|
||||
})
|
||||
const result = await stream.text
|
||||
if (result) {
|
||||
userMsg.summary.body = result.text
|
||||
userMsg.summary.body = result
|
||||
}
|
||||
}
|
||||
await Session.updateMessage(userMsg)
|
||||
|
||||
@@ -14,8 +14,7 @@ import PROMPT_BEAST from "./prompt/beast.txt"
|
||||
import PROMPT_GEMINI from "./prompt/gemini.txt"
|
||||
import PROMPT_ANTHROPIC_SPOOF from "./prompt/anthropic_spoof.txt"
|
||||
import PROMPT_COMPACTION from "./prompt/compaction.txt"
|
||||
import PROMPT_SUMMARIZE from "./prompt/summarize.txt"
|
||||
import PROMPT_TITLE from "./prompt/title.txt"
|
||||
|
||||
import PROMPT_CODEX from "./prompt/codex.txt"
|
||||
import type { Provider } from "@/provider/provider"
|
||||
|
||||
@@ -118,31 +117,4 @@ export namespace SystemPrompt {
|
||||
)
|
||||
return Promise.all(found).then((result) => result.filter(Boolean))
|
||||
}
|
||||
|
||||
export function compaction(providerID: string) {
|
||||
switch (providerID) {
|
||||
case "anthropic":
|
||||
return [PROMPT_ANTHROPIC_SPOOF.trim(), PROMPT_COMPACTION]
|
||||
default:
|
||||
return [PROMPT_COMPACTION]
|
||||
}
|
||||
}
|
||||
|
||||
export function summarize(providerID: string) {
|
||||
switch (providerID) {
|
||||
case "anthropic":
|
||||
return [PROMPT_ANTHROPIC_SPOOF.trim(), PROMPT_SUMMARIZE]
|
||||
default:
|
||||
return [PROMPT_SUMMARIZE]
|
||||
}
|
||||
}
|
||||
|
||||
export function title(providerID: string) {
|
||||
switch (providerID) {
|
||||
case "anthropic":
|
||||
return [PROMPT_ANTHROPIC_SPOOF.trim(), PROMPT_TITLE]
|
||||
default:
|
||||
return [PROMPT_TITLE]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,7 +50,6 @@ const parser = lazy(async () => {
|
||||
})
|
||||
|
||||
// TODO: we may wanna rename this tool so it works better on other shells
|
||||
|
||||
export const BashTool = Tool.define("bash", async () => {
|
||||
const shell = Shell.acceptable()
|
||||
log.info("bash tool using shell", { shell })
|
||||
|
||||
@@ -21,8 +21,11 @@ import { Plugin } from "../plugin"
|
||||
import { WebSearchTool } from "./websearch"
|
||||
import { CodeSearchTool } from "./codesearch"
|
||||
import { Flag } from "@/flag/flag"
|
||||
import { Log } from "@/util/log"
|
||||
|
||||
export namespace ToolRegistry {
|
||||
const log = Log.create({ service: "tool.registry" })
|
||||
|
||||
export const state = Instance.state(async () => {
|
||||
const custom = [] as Tool.Info[]
|
||||
const glob = new Bun.Glob("tool/*.{js,ts}")
|
||||
@@ -119,10 +122,13 @@ export namespace ToolRegistry {
|
||||
}
|
||||
return true
|
||||
})
|
||||
.map(async (t) => ({
|
||||
id: t.id,
|
||||
...(await t.init()),
|
||||
})),
|
||||
.map(async (t) => {
|
||||
using _ = log.time(t.id)
|
||||
return {
|
||||
id: t.id,
|
||||
...(await t.init()),
|
||||
}
|
||||
}),
|
||||
)
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -1203,10 +1203,10 @@ export class Session extends HeyApiClient {
|
||||
}
|
||||
agent?: string
|
||||
noReply?: boolean
|
||||
system?: string
|
||||
tools?: {
|
||||
[key: string]: boolean
|
||||
}
|
||||
system?: string
|
||||
parts?: Array<TextPartInput | FilePartInput | AgentPartInput | SubtaskPartInput>
|
||||
},
|
||||
options?: Options<never, ThrowOnError>,
|
||||
@@ -1222,8 +1222,8 @@ export class Session extends HeyApiClient {
|
||||
{ in: "body", key: "model" },
|
||||
{ in: "body", key: "agent" },
|
||||
{ in: "body", key: "noReply" },
|
||||
{ in: "body", key: "system" },
|
||||
{ in: "body", key: "tools" },
|
||||
{ in: "body", key: "system" },
|
||||
{ in: "body", key: "parts" },
|
||||
],
|
||||
},
|
||||
@@ -1289,10 +1289,10 @@ export class Session extends HeyApiClient {
|
||||
}
|
||||
agent?: string
|
||||
noReply?: boolean
|
||||
system?: string
|
||||
tools?: {
|
||||
[key: string]: boolean
|
||||
}
|
||||
system?: string
|
||||
parts?: Array<TextPartInput | FilePartInput | AgentPartInput | SubtaskPartInput>
|
||||
},
|
||||
options?: Options<never, ThrowOnError>,
|
||||
@@ -1308,8 +1308,8 @@ export class Session extends HeyApiClient {
|
||||
{ in: "body", key: "model" },
|
||||
{ in: "body", key: "agent" },
|
||||
{ in: "body", key: "noReply" },
|
||||
{ in: "body", key: "system" },
|
||||
{ in: "body", key: "tools" },
|
||||
{ in: "body", key: "system" },
|
||||
{ in: "body", key: "parts" },
|
||||
],
|
||||
},
|
||||
|
||||
@@ -147,6 +147,7 @@ export type AssistantMessage = {
|
||||
modelID: string
|
||||
providerID: string
|
||||
mode: string
|
||||
agent: string
|
||||
path: {
|
||||
cwd: string
|
||||
root: string
|
||||
@@ -475,6 +476,40 @@ export type EventPermissionReplied = {
|
||||
}
|
||||
}
|
||||
|
||||
export type EventFileEdited = {
|
||||
type: "file.edited"
|
||||
properties: {
|
||||
file: string
|
||||
}
|
||||
}
|
||||
|
||||
export type Todo = {
|
||||
/**
|
||||
* Brief description of the task
|
||||
*/
|
||||
content: string
|
||||
/**
|
||||
* Current status of the task: pending, in_progress, completed, cancelled
|
||||
*/
|
||||
status: string
|
||||
/**
|
||||
* Priority level of the task: high, medium, low
|
||||
*/
|
||||
priority: string
|
||||
/**
|
||||
* Unique identifier for the todo item
|
||||
*/
|
||||
id: string
|
||||
}
|
||||
|
||||
export type EventTodoUpdated = {
|
||||
type: "todo.updated"
|
||||
properties: {
|
||||
sessionID: string
|
||||
todos: Array<Todo>
|
||||
}
|
||||
}
|
||||
|
||||
export type SessionStatus =
|
||||
| {
|
||||
type: "idle"
|
||||
@@ -511,40 +546,6 @@ export type EventSessionCompacted = {
|
||||
}
|
||||
}
|
||||
|
||||
export type EventFileEdited = {
|
||||
type: "file.edited"
|
||||
properties: {
|
||||
file: string
|
||||
}
|
||||
}
|
||||
|
||||
export type Todo = {
|
||||
/**
|
||||
* Brief description of the task
|
||||
*/
|
||||
content: string
|
||||
/**
|
||||
* Current status of the task: pending, in_progress, completed, cancelled
|
||||
*/
|
||||
status: string
|
||||
/**
|
||||
* Priority level of the task: high, medium, low
|
||||
*/
|
||||
priority: string
|
||||
/**
|
||||
* Unique identifier for the todo item
|
||||
*/
|
||||
id: string
|
||||
}
|
||||
|
||||
export type EventTodoUpdated = {
|
||||
type: "todo.updated"
|
||||
properties: {
|
||||
sessionID: string
|
||||
todos: Array<Todo>
|
||||
}
|
||||
}
|
||||
|
||||
export type EventCommandExecuted = {
|
||||
type: "command.executed"
|
||||
properties: {
|
||||
@@ -745,11 +746,11 @@ export type Event =
|
||||
| EventMessagePartRemoved
|
||||
| EventPermissionUpdated
|
||||
| EventPermissionReplied
|
||||
| EventFileEdited
|
||||
| EventTodoUpdated
|
||||
| EventSessionStatus
|
||||
| EventSessionIdle
|
||||
| EventSessionCompacted
|
||||
| EventFileEdited
|
||||
| EventTodoUpdated
|
||||
| EventCommandExecuted
|
||||
| EventSessionCreated
|
||||
| EventSessionUpdated
|
||||
@@ -1738,7 +1739,8 @@ export type Agent = {
|
||||
name: string
|
||||
description?: string
|
||||
mode: "subagent" | "primary" | "all"
|
||||
builtIn: boolean
|
||||
native?: boolean
|
||||
hidden?: boolean
|
||||
topP?: number
|
||||
temperature?: number
|
||||
color?: string
|
||||
@@ -2801,10 +2803,10 @@ export type SessionPromptData = {
|
||||
}
|
||||
agent?: string
|
||||
noReply?: boolean
|
||||
system?: string
|
||||
tools?: {
|
||||
[key: string]: boolean
|
||||
}
|
||||
system?: string
|
||||
parts: Array<TextPartInput | FilePartInput | AgentPartInput | SubtaskPartInput>
|
||||
}
|
||||
path: {
|
||||
@@ -2896,10 +2898,10 @@ export type SessionPromptAsyncData = {
|
||||
}
|
||||
agent?: string
|
||||
noReply?: boolean
|
||||
system?: string
|
||||
tools?: {
|
||||
[key: string]: boolean
|
||||
}
|
||||
system?: string
|
||||
parts: Array<TextPartInput | FilePartInput | AgentPartInput | SubtaskPartInput>
|
||||
}
|
||||
path: {
|
||||
|
||||
@@ -1997,9 +1997,6 @@
|
||||
"noReply": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"system": {
|
||||
"type": "string"
|
||||
},
|
||||
"tools": {
|
||||
"type": "object",
|
||||
"propertyNames": {
|
||||
@@ -2009,6 +2006,9 @@
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"system": {
|
||||
"type": "string"
|
||||
},
|
||||
"parts": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -2202,9 +2202,6 @@
|
||||
"noReply": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"system": {
|
||||
"type": "string"
|
||||
},
|
||||
"tools": {
|
||||
"type": "object",
|
||||
"propertyNames": {
|
||||
@@ -2214,6 +2211,9 @@
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"system": {
|
||||
"type": "string"
|
||||
},
|
||||
"parts": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -5193,6 +5193,9 @@
|
||||
"mode": {
|
||||
"type": "string"
|
||||
},
|
||||
"agent": {
|
||||
"type": "string"
|
||||
},
|
||||
"path": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -5251,6 +5254,7 @@
|
||||
"modelID",
|
||||
"providerID",
|
||||
"mode",
|
||||
"agent",
|
||||
"path",
|
||||
"cost",
|
||||
"tokens"
|
||||
@@ -6152,6 +6156,72 @@
|
||||
},
|
||||
"required": ["type", "properties"]
|
||||
},
|
||||
"Event.file.edited": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "file.edited"
|
||||
},
|
||||
"properties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["file"]
|
||||
}
|
||||
},
|
||||
"required": ["type", "properties"]
|
||||
},
|
||||
"Todo": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"description": "Brief description of the task",
|
||||
"type": "string"
|
||||
},
|
||||
"status": {
|
||||
"description": "Current status of the task: pending, in_progress, completed, cancelled",
|
||||
"type": "string"
|
||||
},
|
||||
"priority": {
|
||||
"description": "Priority level of the task: high, medium, low",
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"description": "Unique identifier for the todo item",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["content", "status", "priority", "id"]
|
||||
},
|
||||
"Event.todo.updated": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "todo.updated"
|
||||
},
|
||||
"properties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"sessionID": {
|
||||
"type": "string"
|
||||
},
|
||||
"todos": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Todo"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["sessionID", "todos"]
|
||||
}
|
||||
},
|
||||
"required": ["type", "properties"]
|
||||
},
|
||||
"SessionStatus": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -6255,72 +6325,6 @@
|
||||
},
|
||||
"required": ["type", "properties"]
|
||||
},
|
||||
"Event.file.edited": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "file.edited"
|
||||
},
|
||||
"properties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["file"]
|
||||
}
|
||||
},
|
||||
"required": ["type", "properties"]
|
||||
},
|
||||
"Todo": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"description": "Brief description of the task",
|
||||
"type": "string"
|
||||
},
|
||||
"status": {
|
||||
"description": "Current status of the task: pending, in_progress, completed, cancelled",
|
||||
"type": "string"
|
||||
},
|
||||
"priority": {
|
||||
"description": "Priority level of the task: high, medium, low",
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"description": "Unique identifier for the todo item",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["content", "status", "priority", "id"]
|
||||
},
|
||||
"Event.todo.updated": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "todo.updated"
|
||||
},
|
||||
"properties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"sessionID": {
|
||||
"type": "string"
|
||||
},
|
||||
"todos": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Todo"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["sessionID", "todos"]
|
||||
}
|
||||
},
|
||||
"required": ["type", "properties"]
|
||||
},
|
||||
"Event.command.executed": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -6886,6 +6890,12 @@
|
||||
{
|
||||
"$ref": "#/components/schemas/Event.permission.replied"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/Event.file.edited"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/Event.todo.updated"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/Event.session.status"
|
||||
},
|
||||
@@ -6895,12 +6905,6 @@
|
||||
{
|
||||
"$ref": "#/components/schemas/Event.session.compacted"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/Event.file.edited"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/Event.todo.updated"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/Event.command.executed"
|
||||
},
|
||||
@@ -8920,7 +8924,10 @@
|
||||
"type": "string",
|
||||
"enum": ["subagent", "primary", "all"]
|
||||
},
|
||||
"builtIn": {
|
||||
"native": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"hidden": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"topP": {
|
||||
@@ -9001,7 +9008,7 @@
|
||||
"maximum": 9007199254740991
|
||||
}
|
||||
},
|
||||
"required": ["name", "mode", "builtIn", "permission", "tools", "options"]
|
||||
"required": ["name", "mode", "permission", "tools", "options"]
|
||||
},
|
||||
"MCPStatusConnected": {
|
||||
"type": "object",
|
||||
|
||||
Reference in New Issue
Block a user