diff --git a/packages/opencode/test/session/llm.test.ts b/packages/opencode/test/session/llm.test.ts index 8bf710a9c3..1f7e17e1bd 100644 --- a/packages/opencode/test/session/llm.test.ts +++ b/packages/opencode/test/session/llm.test.ts @@ -197,12 +197,6 @@ async function loadFixture(providerID: string, modelID: string) { return { provider, model } } -async function writeModels(models: Record) { - const modelsPath = path.join(Global.Path.cache, "models.json") - await Bun.write(modelsPath, JSON.stringify(models)) - ModelsDev.Data.reset() -} - function createEventStream(chunks: unknown[], includeDone = false) { const lines = chunks.map((chunk) => `data: ${typeof chunk === "string" ? chunk : JSON.stringify(chunk)}`) if (includeDone) { @@ -246,8 +240,6 @@ describe("session.llm.stream", () => { }), ) - await writeModels({ [providerID]: provider }) - await using tmp = await tmpdir({ init: async (dir) => { await Bun.write( @@ -342,7 +334,7 @@ describe("session.llm.stream", () => { throw new Error("Server not initialized") } - const source = await loadFixture("github-copilot", "gpt-5.1") + const source = await loadFixture("openai", "gpt-5.2") const model = source.model const responseChunks = [ @@ -377,8 +369,6 @@ describe("session.llm.stream", () => { ] const request = waitRequest("/responses", createEventResponse(responseChunks, true)) - await writeModels({}) - await using tmp = await tmpdir({ init: async (dir) => { await Bun.write( @@ -513,8 +503,6 @@ describe("session.llm.stream", () => { ] const request = waitRequest("/messages", createEventResponse(chunks)) - await writeModels({ [providerID]: provider }) - await using tmp = await tmpdir({ init: async (dir) => { await Bun.write( @@ -623,8 +611,6 @@ describe("session.llm.stream", () => { ] const request = waitRequest(pathSuffix, createEventResponse(chunks)) - await writeModels({ [providerID]: provider }) - await using tmp = await tmpdir({ init: async (dir) => { await Bun.write(