diff --git a/packages/llm/AGENTS.md b/packages/llm/AGENTS.md index 8d51b5cfef..16a58fd866 100644 --- a/packages/llm/AGENTS.md +++ b/packages/llm/AGENTS.md @@ -8,6 +8,10 @@ - In `Effect.gen`, yield yieldable errors directly (`return yield* new MyError(...)`) instead of `Effect.fail(new MyError(...))`. - Use `Effect.void` instead of `Effect.succeed(undefined)` when the successful value is intentionally void. +## Conventions + +Per-type constructors live on the type's namespace, not as top-level re-exports. Use `Message.user(...)`, `Message.assistant(...)`, `Message.tool(...)`, `ToolDefinition.make(...)`, `ToolCallPart.make(...)`, `ToolResultPart.make(...)`, `ToolChoice.make(...)`, `ToolChoice.named(...)`, `SystemPart.make(...)`, and `GenerationOptions.make(...)` directly. The top-level `LLM` namespace is reserved for the request-shaped call API: `LLM.request`, `LLM.generate`, `LLM.stream`, `LLM.model`, `LLM.updateRequest`, `LLM.generateObject`. Two ways to construct the same thing is one too many. + ## Tests - Use `testEffect(...)` from `test/lib/effect.ts` for tests requiring Effect layers. @@ -166,12 +170,12 @@ If you find yourself copying a 3-to-5-line snippet between two protocols, lift i Tool loops are represented in common messages and events: ```ts -const call = LLM.toolCall({ id: "call_1", name: "lookup", input: { query: "weather" } }) -const result = LLM.toolMessage({ id: "call_1", name: "lookup", result: { forecast: "sunny" } }) +const call = ToolCallPart.make({ id: "call_1", name: "lookup", input: { query: "weather" } }) +const result = Message.tool({ id: "call_1", name: "lookup", result: { forecast: "sunny" } }) const followUp = LLM.request({ model, - messages: [LLM.user("Weather?"), LLM.assistant([call]), result], + messages: [Message.user("Weather?"), Message.assistant([call]), result], }) ``` diff --git a/packages/llm/src/llm.ts b/packages/llm/src/llm.ts index bca78c888a..6f6728216b 100644 --- a/packages/llm/src/llm.ts +++ b/packages/llm/src/llm.ts @@ -44,32 +44,8 @@ export type RequestInput = Omit< export const limits = modelLimits -export const text = Message.text - -export const system = SystemPart.make - -export const message = Message.make - -export const user = Message.user - -export const assistant = Message.assistant - export const model = modelRef -export const toolDefinition = ToolDefinition.make - -export const toolCall = ToolCallPart.make - -export const toolResult = ToolResultPart.make - -export const toolMessage = Message.tool - -export const toolChoiceName = ToolChoice.named - -export const toolChoice = ToolChoice.make - -export const generation = GenerationOptions.make - export const generate = LLMClient.generate export const stream = LLMClient.stream @@ -95,10 +71,10 @@ export const request = (input: RequestInput) => { return new LLMRequest({ ...rest, system: SystemPart.content(requestSystem), - messages: [...(messages?.map(message) ?? []), ...(prompt === undefined ? [] : [user(prompt)])], - tools: tools?.map(toolDefinition) ?? [], - toolChoice: requestToolChoice ? toolChoice(requestToolChoice) : undefined, - generation: requestGeneration === undefined ? undefined : generation(requestGeneration), + messages: [...(messages?.map(Message.make) ?? []), ...(prompt === undefined ? [] : [Message.user(prompt)])], + tools: tools?.map(ToolDefinition.make) ?? [], + toolChoice: requestToolChoice ? ToolChoice.make(requestToolChoice) : undefined, + generation: requestGeneration === undefined ? undefined : GenerationOptions.make(requestGeneration), providerOptions: requestProviderOptions, http: requestHttp === undefined ? undefined : HttpOptions.make(requestHttp), }) diff --git a/packages/llm/test/cache-policy.test.ts b/packages/llm/test/cache-policy.test.ts index e742ca5e69..bb65a56360 100644 --- a/packages/llm/test/cache-policy.test.ts +++ b/packages/llm/test/cache-policy.test.ts @@ -1,6 +1,6 @@ import { describe, expect, test } from "bun:test" import { Effect } from "effect" -import { CacheHint, LLM } from "../src" +import { CacheHint, LLM, Message } from "../src" import { LLMClient } from "../src/route" import * as AnthropicMessages from "../src/protocols/anthropic-messages" import * as BedrockConverse from "../src/protocols/bedrock-converse" @@ -59,7 +59,7 @@ describe("applyCachePolicy", () => { model: anthropicModel, system: "Sys A", tools: [{ name: "t1", description: "t1", inputSchema: { type: "object", properties: {} } }], - messages: [LLM.user("first user"), LLM.assistant("assistant reply"), LLM.user("latest user message")], + messages: [Message.user("first user"), Message.assistant("assistant reply"), Message.user("latest user message")], cache: "auto", }), ) @@ -122,7 +122,7 @@ describe("applyCachePolicy", () => { model: bedrockModel, system: "Sys", tools: [{ name: "t1", description: "t1", inputSchema: { type: "object", properties: {} } }], - messages: [LLM.user("first user"), LLM.assistant("reply"), LLM.user("latest user")], + messages: [Message.user("first user"), Message.assistant("reply"), Message.user("latest user")], cache: "auto", }), ) @@ -221,7 +221,7 @@ describe("applyCachePolicy", () => { const prepared = yield* LLMClient.prepare( LLM.request({ model: anthropicModel, - messages: [LLM.user("u1"), LLM.assistant("a1"), LLM.user("u2"), LLM.assistant("a2")], + messages: [Message.user("u1"), Message.assistant("a1"), Message.user("u2"), Message.assistant("a2")], cache: { messages: { tail: 2 } }, }), ) @@ -239,7 +239,7 @@ describe("applyCachePolicy", () => { const prepared = yield* LLMClient.prepare( LLM.request({ model: anthropicModel, - messages: [LLM.user("u1"), LLM.assistant("a1"), LLM.user("u2")], + messages: [Message.user("u1"), Message.assistant("a1"), Message.user("u2")], cache: { messages: "latest-assistant" }, }), ) diff --git a/packages/llm/test/llm.test.ts b/packages/llm/test/llm.test.ts index e9ef58afa8..c01fe33b29 100644 --- a/packages/llm/test/llm.test.ts +++ b/packages/llm/test/llm.test.ts @@ -1,6 +1,6 @@ import { describe, expect, test } from "bun:test" import { LLM, LLMResponse } from "../src" -import { LLMRequest, Message, ModelRef, ToolChoice, ToolDefinition } from "../src/schema" +import { LLMRequest, Message, ModelRef, ToolCallPart, ToolChoice, ToolDefinition, ToolResultPart } from "../src/schema" describe("llm constructors", () => { test("builds canonical schema classes from ergonomic input", () => { @@ -28,7 +28,7 @@ describe("llm constructors", () => { }) const updated = LLM.updateRequest(base, { generation: { maxTokens: 20 }, - messages: [...base.messages, LLM.assistant("Hi.")], + messages: [...base.messages, Message.assistant("Hi.")], }) expect(updated).toBeInstanceOf(LLMRequest) @@ -70,7 +70,7 @@ describe("llm constructors", () => { model: LLM.model({ id: "fake-model", provider: "fake", route: "openai-chat", baseURL: "https://fake.local" }), prompt: "Say hello.", }) - const updated = LLMRequest.update(base, { messages: [...base.messages, LLM.assistant("Hi.")] }) + const updated = LLMRequest.update(base, { messages: [...base.messages, Message.assistant("Hi.")] }) expect(updated).toBeInstanceOf(LLMRequest) expect(updated.id).toBe("req_1") @@ -91,18 +91,18 @@ describe("llm constructors", () => { }) test("builds tool choices from names and tools", () => { - const tool = LLM.toolDefinition({ name: "lookup", description: "Lookup data", inputSchema: { type: "object" } }) + const tool = ToolDefinition.make({ name: "lookup", description: "Lookup data", inputSchema: { type: "object" } }) expect(tool).toBeInstanceOf(ToolDefinition) - expect(LLM.toolChoice("lookup")).toEqual(new ToolChoice({ type: "tool", name: "lookup" })) - expect(LLM.toolChoiceName("required")).toEqual(new ToolChoice({ type: "tool", name: "required" })) - expect(LLM.toolChoice(tool)).toEqual(new ToolChoice({ type: "tool", name: "lookup" })) + expect(ToolChoice.make("lookup")).toEqual(new ToolChoice({ type: "tool", name: "lookup" })) + expect(ToolChoice.named("required")).toEqual(new ToolChoice({ type: "tool", name: "required" })) + expect(ToolChoice.make(tool)).toEqual(new ToolChoice({ type: "tool", name: "lookup" })) }) test("builds tool choice modes from reserved strings", () => { - expect(LLM.toolChoice("auto")).toEqual(new ToolChoice({ type: "auto" })) - expect(LLM.toolChoice("none")).toEqual(new ToolChoice({ type: "none" })) - expect(LLM.toolChoice("required")).toEqual(new ToolChoice({ type: "required" })) + expect(ToolChoice.make("auto")).toEqual(new ToolChoice({ type: "auto" })) + expect(ToolChoice.make("none")).toEqual(new ToolChoice({ type: "none" })) + expect(ToolChoice.make("required")).toEqual(new ToolChoice({ type: "required" })) expect( LLM.request({ model: LLM.model({ id: "fake-model", provider: "fake", route: "openai-chat", baseURL: "https://fake.local" }), @@ -113,11 +113,11 @@ describe("llm constructors", () => { }) test("builds assistant tool calls and tool result messages", () => { - const call = LLM.toolCall({ id: "call_1", name: "lookup", input: { query: "weather" } }) - const result = LLM.toolResult({ id: "call_1", name: "lookup", result: { temperature: 72 } }) + const call = ToolCallPart.make({ id: "call_1", name: "lookup", input: { query: "weather" } }) + const result = ToolResultPart.make({ id: "call_1", name: "lookup", result: { temperature: 72 } }) - expect(LLM.assistant([call]).content).toEqual([call]) - expect(LLM.toolMessage(result).content).toEqual([ + expect(Message.assistant([call]).content).toEqual([call]) + expect(Message.tool(result).content).toEqual([ { type: "tool-result", id: "call_1", name: "lookup", result: { type: "json", value: { temperature: 72 } } }, ]) }) diff --git a/packages/llm/test/provider/anthropic-messages.recorded.test.ts b/packages/llm/test/provider/anthropic-messages.recorded.test.ts index aa5b258d3d..5fefae51d4 100644 --- a/packages/llm/test/provider/anthropic-messages.recorded.test.ts +++ b/packages/llm/test/provider/anthropic-messages.recorded.test.ts @@ -1,7 +1,7 @@ import { Redactor } from "@opencode-ai/http-recorder" import { describe, expect } from "bun:test" import { Effect } from "effect" -import { LLM, LLMError } from "../../src" +import { LLM, LLMError, Message, ToolCallPart } from "../../src" import { LLMClient } from "../../src/route" import * as AnthropicMessages from "../../src/protocols/anthropic-messages" import { weatherToolName } from "../recorded-scenarios" @@ -16,12 +16,12 @@ const malformedToolOrderRequest = LLM.request({ id: "recorded_anthropic_malformed_tool_order", model, messages: [ - LLM.assistant([ - LLM.toolCall({ id: "call_1", name: weatherToolName, input: { city: "Paris" } }), + Message.assistant([ + ToolCallPart.make({ id: "call_1", name: weatherToolName, input: { city: "Paris" } }), { type: "text", text: "I will check the weather." }, ]), - LLM.toolMessage({ id: "call_1", name: weatherToolName, result: { temperature: "72F" } }), - LLM.user("Use that result to answer briefly."), + Message.tool({ id: "call_1", name: weatherToolName, result: { temperature: "72F" } }), + Message.user("Use that result to answer briefly."), ], tools: [{ name: weatherToolName, description: "Get weather", inputSchema: { type: "object", properties: {} } }], }) diff --git a/packages/llm/test/provider/anthropic-messages.test.ts b/packages/llm/test/provider/anthropic-messages.test.ts index a867d16591..0df3541d58 100644 --- a/packages/llm/test/provider/anthropic-messages.test.ts +++ b/packages/llm/test/provider/anthropic-messages.test.ts @@ -1,6 +1,6 @@ import { describe, expect } from "bun:test" import { Effect } from "effect" -import { CacheHint, LLM, LLMError, Usage } from "../../src" +import { CacheHint, LLM, LLMError, Message, ToolCallPart, Usage } from "../../src" import { LLMClient } from "../../src/route" import * as AnthropicMessages from "../../src/protocols/anthropic-messages" import { it } from "../lib/effect" @@ -47,9 +47,9 @@ describe("Anthropic Messages route", () => { id: "req_tool_result", model, messages: [ - LLM.user("What is the weather?"), - LLM.assistant([LLM.toolCall({ id: "call_1", name: "lookup", input: { query: "weather" } })]), - LLM.toolMessage({ id: "call_1", name: "lookup", result: { forecast: "sunny" } }), + Message.user("What is the weather?"), + Message.assistant([ToolCallPart.make({ id: "call_1", name: "lookup", input: { query: "weather" } })]), + Message.tool({ id: "call_1", name: "lookup", result: { forecast: "sunny" } }), ], cache: "none", }), @@ -77,7 +77,7 @@ describe("Anthropic Messages route", () => { LLM.request({ model, messages: [ - LLM.assistant([ + Message.assistant([ { type: "reasoning", text: "thinking", providerMetadata: { anthropic: { signature: "sig_1" } } }, ]), ], @@ -304,8 +304,8 @@ describe("Anthropic Messages route", () => { id: "req_round_trip", model, messages: [ - LLM.user("Search for something."), - LLM.assistant([ + Message.user("Search for something."), + Message.assistant([ { type: "tool-call", id: "srvtoolu_abc", @@ -322,7 +322,7 @@ describe("Anthropic Messages route", () => { }, { type: "text", text: "Found it." }, ]), - LLM.user("Thanks."), + Message.user("Thanks."), ], }), ) @@ -355,7 +355,7 @@ describe("Anthropic Messages route", () => { id: "req_unknown_server_tool", model, messages: [ - LLM.assistant([ + Message.assistant([ { type: "tool-result", id: "srvtoolu_abc", @@ -378,7 +378,7 @@ describe("Anthropic Messages route", () => { LLM.request({ id: "req_media", model, - messages: [LLM.user({ type: "media", mediaType: "image/png", data: "AAECAw==" })], + messages: [Message.user({ type: "media", mediaType: "image/png", data: "AAECAw==" })], }), ).pipe(Effect.flip) @@ -416,9 +416,9 @@ describe("Anthropic Messages route", () => { }, ], messages: [ - LLM.user("What's the weather?"), - LLM.assistant([LLM.toolCall({ id: "call_1", name: "lookup", input: {} })]), - LLM.toolMessage({ + Message.user("What's the weather?"), + Message.assistant([ToolCallPart.make({ id: "call_1", name: "lookup", input: {} })]), + Message.tool({ id: "call_1", name: "lookup", result: { temp: 72 }, @@ -501,7 +501,7 @@ describe("Anthropic Messages route", () => { }, ], system: [{ type: "text", text: "system-tail", cache: hint }], - messages: [LLM.user([{ type: "text", text: "message-tail", cache: hint }])], + messages: [Message.user([{ type: "text", text: "message-tail", cache: hint }])], }), ) diff --git a/packages/llm/test/provider/bedrock-converse.test.ts b/packages/llm/test/provider/bedrock-converse.test.ts index 208b565272..7d1ad3f309 100644 --- a/packages/llm/test/provider/bedrock-converse.test.ts +++ b/packages/llm/test/provider/bedrock-converse.test.ts @@ -2,7 +2,7 @@ import { EventStreamCodec } from "@smithy/eventstream-codec" import { fromUtf8, toUtf8 } from "@smithy/util-utf8" import { describe, expect } from "bun:test" import { Effect } from "effect" -import { CacheHint, LLM } from "../../src" +import { CacheHint, LLM, Message, ToolCallPart, ToolChoice } from "../../src" import { LLMClient } from "../../src/route" import * as BedrockConverse from "../../src/protocols/bedrock-converse" import { it } from "../lib/effect" @@ -94,7 +94,7 @@ describe("Bedrock Converse route", () => { inputSchema: { type: "object", properties: { query: { type: "string" } }, required: ["query"] }, }, ], - toolChoice: LLM.toolChoice({ type: "required" }), + toolChoice: ToolChoice.make({ type: "required" }), }), ) @@ -124,9 +124,9 @@ describe("Bedrock Converse route", () => { id: "req_history", model, messages: [ - LLM.user("What is the weather?"), - LLM.assistant([LLM.toolCall({ id: "tool_1", name: "lookup", input: { query: "weather" } })]), - LLM.toolMessage({ id: "tool_1", name: "lookup", result: { forecast: "sunny" } }), + Message.user("What is the weather?"), + Message.assistant([ToolCallPart.make({ id: "tool_1", name: "lookup", input: { query: "weather" } })]), + Message.tool({ id: "tool_1", name: "lookup", result: { forecast: "sunny" } }), ], cache: "none", }), @@ -294,8 +294,8 @@ describe("Bedrock Converse route", () => { model, system: [{ type: "text", text: "System prefix.", cache }], messages: [ - LLM.user([{ type: "text", text: "User prefix.", cache }]), - LLM.assistant([{ type: "text", text: "Assistant prefix.", cache }]), + Message.user([{ type: "text", text: "User prefix.", cache }]), + Message.assistant([{ type: "text", text: "Assistant prefix.", cache }]), ], generation: { maxTokens: 16, temperature: 0 }, }), @@ -335,7 +335,7 @@ describe("Bedrock Converse route", () => { id: "req_image", model, messages: [ - LLM.user([ + Message.user([ { type: "text", text: "What is in this image?" }, { type: "media", mediaType: "image/png", data: "AAAA" }, { type: "media", mediaType: "image/jpeg", data: "BBBB" }, @@ -371,7 +371,7 @@ describe("Bedrock Converse route", () => { LLM.request({ id: "req_image_bytes", model, - messages: [LLM.user([{ type: "media", mediaType: "image/png", data: new Uint8Array([1, 2, 3, 4, 5]) }])], + messages: [Message.user([{ type: "media", mediaType: "image/png", data: new Uint8Array([1, 2, 3, 4, 5]) }])], }), ) @@ -394,7 +394,7 @@ describe("Bedrock Converse route", () => { id: "req_doc", model, messages: [ - LLM.user([ + Message.user([ { type: "media", mediaType: "application/pdf", data: "PDFDATA", filename: "report.pdf" }, { type: "media", mediaType: "text/csv", data: "CSVDATA" }, ]), @@ -424,7 +424,7 @@ describe("Bedrock Converse route", () => { LLM.request({ id: "req_bad_image", model, - messages: [LLM.user([{ type: "media", mediaType: "image/svg+xml", data: "x" }])], + messages: [Message.user([{ type: "media", mediaType: "image/svg+xml", data: "x" }])], }), ).pipe(Effect.flip) @@ -438,7 +438,7 @@ describe("Bedrock Converse route", () => { LLM.request({ id: "req_bad_doc", model, - messages: [LLM.user([{ type: "media", mediaType: "application/x-tar", data: "x", filename: "a.tar" }])], + messages: [Message.user([{ type: "media", mediaType: "application/x-tar", data: "x", filename: "a.tar" }])], }), ).pipe(Effect.flip) @@ -471,9 +471,9 @@ describe("Bedrock Converse route", () => { model, tools: [{ name: "lookup", description: "lookup", inputSchema: { type: "object", properties: {} }, cache }], messages: [ - LLM.user("What's the weather?"), - LLM.assistant([LLM.toolCall({ id: "call_1", name: "lookup", input: {} })]), - LLM.toolMessage({ id: "call_1", name: "lookup", result: { temp: 72 }, cache }), + Message.user("What's the weather?"), + Message.assistant([ToolCallPart.make({ id: "call_1", name: "lookup", input: {} })]), + Message.tool({ id: "call_1", name: "lookup", result: { temp: 72 }, cache }), ], cache: "none", }), @@ -583,7 +583,7 @@ describe("Bedrock Converse recorded", () => { system: "Call tools exactly as requested.", prompt: "Call get_weather with city exactly Paris.", tools: [weatherTool], - toolChoice: LLM.toolChoice(weatherTool), + toolChoice: ToolChoice.make(weatherTool), cache: "none", generation: { maxTokens: 80, temperature: 0 }, }), diff --git a/packages/llm/test/provider/gemini.test.ts b/packages/llm/test/provider/gemini.test.ts index e0b3864a26..ea4eadc498 100644 --- a/packages/llm/test/provider/gemini.test.ts +++ b/packages/llm/test/provider/gemini.test.ts @@ -1,6 +1,6 @@ import { describe, expect } from "bun:test" import { Effect } from "effect" -import { LLM, LLMError, Usage } from "../../src" +import { LLM, LLMError, Message, ToolCallPart, Usage } from "../../src" import { LLMClient } from "../../src/route" import * as Gemini from "../../src/protocols/gemini" import { it } from "../lib/effect" @@ -49,12 +49,12 @@ describe("Gemini route", () => { ], toolChoice: { type: "tool", name: "lookup" }, messages: [ - LLM.user([ + Message.user([ { type: "text", text: "What is in this image?" }, { type: "media", mediaType: "image/png", data: "AAECAw==" }, ]), - LLM.assistant([LLM.toolCall({ id: "call_1", name: "lookup", input: { query: "weather" } })]), - LLM.toolMessage({ id: "call_1", name: "lookup", result: { forecast: "sunny" } }), + Message.assistant([ToolCallPart.make({ id: "call_1", name: "lookup", input: { query: "weather" } })]), + Message.tool({ id: "call_1", name: "lookup", result: { forecast: "sunny" } }), ], }), ) @@ -353,7 +353,7 @@ describe("Gemini route", () => { LLM.request({ id: "req_media", model, - messages: [LLM.assistant({ type: "media", mediaType: "image/png", data: "AAECAw==" })], + messages: [Message.assistant({ type: "media", mediaType: "image/png", data: "AAECAw==" })], }), ).pipe(Effect.flip) diff --git a/packages/llm/test/provider/openai-chat.test.ts b/packages/llm/test/provider/openai-chat.test.ts index 2c692dcd7d..9c81422639 100644 --- a/packages/llm/test/provider/openai-chat.test.ts +++ b/packages/llm/test/provider/openai-chat.test.ts @@ -1,7 +1,7 @@ import { describe, expect } from "bun:test" import { Effect, Schema, Stream } from "effect" import { HttpClientRequest } from "effect/unstable/http" -import { LLM, LLMError, Usage } from "../../src" +import { LLM, LLMError, Message, ToolCallPart, Usage } from "../../src" import * as Azure from "../../src/providers/azure" import * as OpenAI from "../../src/providers/openai" import * as OpenAIChat from "../../src/protocols/openai-chat" @@ -149,9 +149,9 @@ describe("OpenAI Chat route", () => { id: "req_tool_result", model, messages: [ - LLM.user("What is the weather?"), - LLM.assistant([LLM.toolCall({ id: "call_1", name: "lookup", input: { query: "weather" } })]), - LLM.toolMessage({ id: "call_1", name: "lookup", result: { forecast: "sunny" } }), + Message.user("What is the weather?"), + Message.assistant([ToolCallPart.make({ id: "call_1", name: "lookup", input: { query: "weather" } })]), + Message.tool({ id: "call_1", name: "lookup", result: { forecast: "sunny" } }), ], }), ) @@ -185,7 +185,7 @@ describe("OpenAI Chat route", () => { LLM.request({ id: "req_media", model, - messages: [LLM.user({ type: "media", mediaType: "image/png", data: "AAECAw==" })], + messages: [Message.user({ type: "media", mediaType: "image/png", data: "AAECAw==" })], }), ).pipe(Effect.flip) @@ -199,7 +199,7 @@ describe("OpenAI Chat route", () => { LLM.request({ id: "req_reasoning", model, - messages: [LLM.assistant({ type: "reasoning", text: "hidden" })], + messages: [Message.assistant({ type: "reasoning", text: "hidden" })], }), ).pipe(Effect.flip) diff --git a/packages/llm/test/provider/openai-compatible-chat.test.ts b/packages/llm/test/provider/openai-compatible-chat.test.ts index 627e6ef4a0..7759ff7202 100644 --- a/packages/llm/test/provider/openai-compatible-chat.test.ts +++ b/packages/llm/test/provider/openai-compatible-chat.test.ts @@ -1,7 +1,7 @@ import { describe, expect } from "bun:test" import { Effect, Schema } from "effect" import { HttpClientRequest } from "effect/unstable/http" -import { LLM } from "../../src" +import { LLM, Message, ToolCallPart } from "../../src" import { LLMClient } from "../../src/route" import * as OpenAICompatible from "../../src/providers/openai-compatible" import * as OpenAICompatibleChat from "../../src/protocols/openai-compatible-chat" @@ -157,9 +157,9 @@ describe("OpenAI-compatible Chat route", () => { ], toolChoice: "lookup", messages: [ - LLM.user("What is the weather?"), - LLM.assistant([LLM.toolCall({ id: "call_1", name: "lookup", input: { query: "weather" } })]), - LLM.toolMessage({ id: "call_1", name: "lookup", result: { forecast: "sunny" } }), + Message.user("What is the weather?"), + Message.assistant([ToolCallPart.make({ id: "call_1", name: "lookup", input: { query: "weather" } })]), + Message.tool({ id: "call_1", name: "lookup", result: { forecast: "sunny" } }), ], }), ) diff --git a/packages/llm/test/provider/openai-responses.test.ts b/packages/llm/test/provider/openai-responses.test.ts index 2319857ed1..da9dbd82c2 100644 --- a/packages/llm/test/provider/openai-responses.test.ts +++ b/packages/llm/test/provider/openai-responses.test.ts @@ -1,7 +1,7 @@ import { describe, expect } from "bun:test" import { ConfigProvider, Effect, Layer, Stream } from "effect" import { Headers, HttpClientRequest } from "effect/unstable/http" -import { LLM, LLMError, Usage } from "../../src" +import { LLM, LLMError, Message, ToolCallPart, Usage } from "../../src" import { Auth, LLMClient, RequestExecutor, WebSocketExecutor } from "../../src/route" import * as Azure from "../../src/providers/azure" import * as OpenAI from "../../src/providers/openai" @@ -251,9 +251,9 @@ describe("OpenAI Responses route", () => { id: "req_tool_result", model, messages: [ - LLM.user("What is the weather?"), - LLM.assistant([LLM.toolCall({ id: "call_1", name: "lookup", input: { query: "weather" } })]), - LLM.toolMessage({ id: "call_1", name: "lookup", result: { forecast: "sunny" } }), + Message.user("What is the weather?"), + Message.assistant([ToolCallPart.make({ id: "call_1", name: "lookup", input: { query: "weather" } })]), + Message.tool({ id: "call_1", name: "lookup", result: { forecast: "sunny" } }), ], }), ) @@ -508,7 +508,7 @@ describe("OpenAI Responses route", () => { LLM.request({ id: "req_media", model, - messages: [LLM.user({ type: "media", mediaType: "image/png", data: "AAECAw==" })], + messages: [Message.user({ type: "media", mediaType: "image/png", data: "AAECAw==" })], }), ).pipe(Effect.flip) diff --git a/packages/llm/test/recorded-scenarios.ts b/packages/llm/test/recorded-scenarios.ts index 127a444a16..bdba8580fd 100644 --- a/packages/llm/test/recorded-scenarios.ts +++ b/packages/llm/test/recorded-scenarios.ts @@ -1,6 +1,6 @@ import { expect } from "bun:test" import { Effect, Schema, Stream } from "effect" -import { LLM, LLMEvent, LLMResponse, type LLMRequest, type ModelRef } from "../src" +import { LLM, LLMEvent, LLMResponse, ToolChoice, ToolDefinition, type LLMRequest, type ModelRef } from "../src" import { LLMClient } from "../src/route" import { tool } from "../src/tool" @@ -18,7 +18,7 @@ export const LARGE_CACHEABLE_SYSTEM = (() => { return sentence.repeat(250) })() -export const weatherTool = LLM.toolDefinition({ +export const weatherTool = ToolDefinition.make({ name: weatherToolName, description: "Get current weather for a city.", inputSchema: { @@ -70,7 +70,7 @@ export const weatherToolRequest = (input: { system: "Call tools exactly as requested.", prompt: "Call get_weather with city exactly Paris.", tools: [weatherTool], - toolChoice: LLM.toolChoice(weatherTool), + toolChoice: ToolChoice.make(weatherTool), cache: "none", generation: input.temperature === false diff --git a/packages/llm/test/tool-runtime.test.ts b/packages/llm/test/tool-runtime.test.ts index 7251dee8af..8f4221784d 100644 --- a/packages/llm/test/tool-runtime.test.ts +++ b/packages/llm/test/tool-runtime.test.ts @@ -1,6 +1,6 @@ import { describe, expect } from "bun:test" import { Effect, Schema, Stream } from "effect" -import { LLM, LLMEvent, LLMRequest, LLMResponse } from "../src" +import { GenerationOptions, LLM, LLMEvent, LLMRequest, LLMResponse, ToolChoice } from "../src" import { LLMClient } from "../src/route" import * as AnthropicMessages from "../src/protocols/anthropic-messages" import * as OpenAIChat from "../src/protocols/openai-chat" @@ -78,8 +78,8 @@ describe("LLMClient tools", () => { yield* TestToolRuntime.runTools({ request: LLMRequest.update(baseRequest, { - generation: LLM.generation({ maxTokens: 50 }), - toolChoice: LLM.toolChoice("auto"), + generation: GenerationOptions.make({ maxTokens: 50 }), + toolChoice: ToolChoice.make("auto"), }), tools: { get_weather }, }).pipe(Stream.runCollect, Effect.provide(layer))