mirror of
https://github.com/google-gemini/gemini-cli.git
synced 2026-02-01 22:48:03 +00:00
Record model responses with --record-responses (for use in testing) (#11894)
This commit is contained in:
@@ -56,6 +56,22 @@ To run a single test by its name, use the `--test-name-pattern` flag:
|
||||
npm run test:e2e -- --test-name-pattern "reads a file"
|
||||
```
|
||||
|
||||
### Regenerating model responses
|
||||
|
||||
Some integration tests use faked out model responses, which may need to be
|
||||
regenerated from time to time as the implementations change.
|
||||
|
||||
To regenerate these golden files, set the REGENERATE_MODEL_GOLDENS environment
|
||||
variable to "true" when running the tests, for example:
|
||||
|
||||
**WARNING**: If running locally you should review these updated responses for
|
||||
any information about yourself or your system that gemini may have included in
|
||||
these responses.
|
||||
|
||||
```bash
|
||||
REGENERATE_MODEL_GOLDENS="true" npm run test:e2e
|
||||
```
|
||||
|
||||
### Deflaking a test
|
||||
|
||||
Before adding a **new** integration test, you should test it at least 5 times
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"generateContent": [
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "This is more than the 5 tokens we return below which will trigger an error"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
{
|
||||
"generateContent": [
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "This is more than the 5 tokens we return below which will trigger an error"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"generateContentStream": [
|
||||
[
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "The initial response from the model"
|
||||
}
|
||||
]
|
||||
},
|
||||
"finishReason": "STOP"
|
||||
}
|
||||
],
|
||||
"usageMetadata": {
|
||||
"promptTokenCount": 5
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,2 @@
|
||||
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"thought":true,"text":"**Observing Initial Conditions**\n\nI'm currently focused on the initial context. I've taken note of the provided date, OS, and working directory. I'm also carefully examining the file structure presented within the current working directory. It's helping me understand the starting point for further analysis.\n\n\n"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12270,"totalTokenCount":12316,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12270}],"thoughtsTokenCount":46}},{"candidates":[{"content":{"parts":[{"thought":true,"text":"**Assessing User Intent**\n\nI'm now shifting my focus. I've successfully registered the provided data and file structure. My current task is to understand the user's ultimate goal, given the information provided. The \"Hello.\" command is straightforward, but I'm checking if there's an underlying objective.\n\n\n"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12270,"totalTokenCount":12341,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12270}],"thoughtsTokenCount":71}},{"candidates":[{"content":{"parts":[{"thoughtSignature":"CiQB0e2Kb3dRh+BYdbZvmulSN2Pwbc75DfQOT3H4EN0rn039hoMKfwHR7YpvvyqNKoxXAiCbYw3gbcTr/+pegUpgnsIrt8oQPMytFMjKSsMyshfygc21T2MkyuI6Q5I/fNCcHROWexdZnIeppVCDB2TarN4LGW4T9Yci6n/ynMMFT2xc2/vyHpkDgRM7avhMElnBhuxAY+e4TpxkZIncGWCEHP1TouoKpgEB0e2Kb8Xpwm0hiKhPt2ZLizpxjk+CVtcbnlgv69xo5VsuQ+iNyrVGBGRwNx+eTeNGdGpn6e73WOCZeP91FwOZe7URyL12IA6E6gYWqw0kXJR4hO4p6Lwv49E3+FRiG2C4OKDF8LF5XorYyCHSgBFT1/RUAVj81GDTx1xxtmYKN3xq8Ri+HsPbqU/FM/jtNZKkXXAtufw2Bmw8lJfmugENIv/TQI7xCo8BAdHtim8KgAXJfZ7ASfutVLKTylQeaslyB/SmcHJ0ZiNr5j8WP1prZdb6XnZZ1ZNbhjxUf/ymoxHKGvtTPBgLE9azMj8Lx/k0clhd2a+wNsiIqW9qCzlVah0tBMytpQUjIDtQe9Hj4LLUprF9PUe/xJkj000Z0ZzsgFm2ncdTWZTdkhCQDpyETVAxdE+oklwKJAHR7YpvUjSkD6KwY1gLrOsHKy0UNfn2lMbxjVetKNMVBRqsTg==","text":"Hello."}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":12270,"totalTokenCount":12341,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12270}],"thoughtsTokenCount":71}}]}
|
||||
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"<state_snapshot>\n <overall_goal>\n <!-- The user has not yet specified a goal. -->\n </overall_goal>\n\n <key_knowledge>\n - OS: linux\n - Date: Friday, October 24, 2025\n </key_knowledge>\n\n <file_system_state>\n - OBSERVED: The directory contains `telemetry.log` and a `.gemini/` directory.\n - OBSERVED: The `.gemini/` directory contains `settings.json` and `settings.json.orig`.\n </file_system_state>\n\n <recent_actions>\n - The user initiated the chat.\n </recent_actions>\n\n <current_plan>\n 1. [TODO] Await the user's first instruction to formulate a plan.\n </current_plan>\n</state_snapshot>"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":983,"candidatesTokenCount":299,"totalTokenCount":1637,"promptTokensDetails":[{"modality":"TEXT","tokenCount":983}],"thoughtsTokenCount":355}}}
|
||||
@@ -1,40 +0,0 @@
|
||||
{
|
||||
"generateContent": [
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "A summary of the conversation."
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"generateContentStream": [
|
||||
[
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "The initial response from the model"
|
||||
}
|
||||
]
|
||||
},
|
||||
"finishReason": "STOP"
|
||||
}
|
||||
],
|
||||
"usageMetadata": {
|
||||
"promptTokenCount": 100000
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"thought":true,"text":"**Generating a Story**\n\nI've crafted the robot story. The narrative is complete and meets the length requirement. Now, I'm getting ready to use the `write_file` tool to save it. I'm choosing the filename `robot_story.txt` as a default.\n\n\n"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12282,"totalTokenCount":12352,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"thoughtsTokenCount":70}},{"candidates":[{"finishReason":"MALFORMED_FUNCTION_CALL","index":0}],"usageMetadata":{"promptTokenCount":12282,"totalTokenCount":12282,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}]}}]}
|
||||
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"thought":true,"text":"**Drafting the Narrative**\n\nI'm currently focused on the narrative's central conflict. I'm aiming for a compelling story about a robot and am working to keep the word count tight. The \"THE _END.\" conclusion is proving challenging to integrate organically. I need to make the ending feel natural and satisfying.\n\n\n"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12282,"totalTokenCount":12326,"cachedContentTokenCount":11883,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"cacheTokensDetails":[{"modality":"TEXT","tokenCount":11883}],"thoughtsTokenCount":44}},{"candidates":[{"content":{"parts":[{"thoughtSignature":"CikB0e2Kb7zkpgRyJXXNt6ykO/+FoOglhrKxjLgoESrgafzIZak2Ofxo1gpaAdHtim9aG7MvpXlIg+n2zgmcDBWOPXtvQHxhE9k8pR+DO8i2jIe3tMWLxdN944XpUlR9vaNmVdtSRMKr4MhB/t1R3WSWR3QYhk7MEQxnjYR7cv/pR9viwZyFCoYBAdHtim/xKmMl/S+U8p+p9848q4agsL/STufluXewPqL3uJSinZbN0Z4jTYfMzXKldhDYIonvw3Crn/Y11oAjnT656Sx0kkKtavAXbiU/WsGyDxZbNhLofnJGQxruljPGztxkKawz1cTiQnddnQRfLddhy+3iJIOSh6ZpYq9uGHz3PzVkUuQ=","text":"Unit 734 whirred, its optical sensors scanning the desolate junkyard. For years, its purpose had been clear: compress refuse, maintain order. But today, a glint of tarnished silver beneath a rusted hull"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12282,"candidatesTokenCount":47,"totalTokenCount":12373,"cachedContentTokenCount":11883,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"cacheTokensDetails":[{"modality":"TEXT","tokenCount":11883}],"thoughtsTokenCount":44}},{"candidates":[{"content":{"parts":[{"text":" caught its attention. It was a discarded music box, its delicate gears jammed, a faint, melancholic tune trapped within.\n\n734 usually crushed, never salvaged. Yet, a new directive flickered in its circuits – curiosity."}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12282,"candidatesTokenCount":95,"totalTokenCount":12421,"cachedContentTokenCount":11883,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"cacheTokensDetails":[{"modality":"TEXT","tokenCount":11883}],"thoughtsTokenCount":44}},{"candidates":[{"content":{"parts":[{"text":" With surprising gentleness for its formidable pincers, it retrieved the box. Back in its monochrome workshop, it meticulously cleaned and repaired. Each tiny spring, each worn tooth, became a puzzle.\n\nHours later, a soft, ethereal melody filled the clang"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12282,"candidatesTokenCount":146,"totalTokenCount":12472,"cachedContentTokenCount":11883,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"cacheTokensDetails":[{"modality":"TEXT","tokenCount":11883}],"thoughtsTokenCount":44}},{"candidates":[{"content":{"parts":[{"text":"orous space. The music box sang. 734 felt a strange, new sensation – a warmth in its core processors, a hum of contentment. Its existence, once solely utilitarian, now held a note of beauty, a hint of something more than"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12282,"candidatesTokenCount":197,"totalTokenCount":12523,"cachedContentTokenCount":11883,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"cacheTokensDetails":[{"modality":"TEXT","tokenCount":11883}],"thoughtsTokenCount":44}},{"candidates":[{"content":{"parts":[{"text":" just compression. It had fixed a broken song, and in doing so, had found a different kind of purpose. THE_END."}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":12282,"candidatesTokenCount":224,"totalTokenCount":12550,"cachedContentTokenCount":11883,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"cacheTokensDetails":[{"modality":"TEXT","tokenCount":11883}],"thoughtsTokenCount":44}}]}
|
||||
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"scratchpad\nThe user's overall goal was to write a 200-word story about a robot, ending with \"THE_END.\". The agent successfully completed this task.\n\nSince the task is complete, the snapshot should reflect that the goal has been achieved and there are no further actions planned regarding the story.\n\nLet's break down the required sections for the snapshot:\n\n1. **Overall Goal**: The initial goal was to write a story. This has been completed.\n2. **Key Knowledge**: No specific key knowledge was provided or discovered during this simple interaction beyond the prompt's constraints.\n3. **File System State**: No files were read, modified, or created by the agent relevant to the task. The initial file system state was merely provided for context.\n4. **Recent Actions**: The agent wrote the story.\n5. **Current Plan**: The plan was to write the story, which is now done. Therefore, the plan should indicate completion."},{"text":"<state_snapshot>\n <overall_goal>\n Write a 200-word story about a robot, ending with \"THE_END.\".\n </overall_goal>\n\n <key_knowledge>\n - The story must be approximately 200 words.\n - The story must end with the exact phrase \"THE_END.\"\n </key_knowledge>\n\n <file_system_state>\n <!-- No relevant file system interactions occurred during this task. -->\n </file_system_state>\n\n <recent_actions>\n - Generated a 200-word story about a robot, successfully ending it with \"THE_END.\".\n </recent_actions>\n\n <current_plan>\n 1. [DONE] Write a 200-word story about a robot.\n 2. [DONE] Ensure the story ends with the exact text \"THE_END.\".\n </current_plan>\n</state_snapshot>"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":1223,"candidatesTokenCount":424,"totalTokenCount":1647,"promptTokensDetails":[{"modality":"TEXT","tokenCount":1223}]}}}
|
||||
@@ -20,26 +20,29 @@ describe('Interactive Mode', () => {
|
||||
});
|
||||
|
||||
it('should trigger chat compression with /compress command', async () => {
|
||||
await rig.setup('interactive-compress-test', {
|
||||
await rig.setup('interactive-compress-success', {
|
||||
fakeResponsesPath: join(
|
||||
import.meta.dirname,
|
||||
'context-compress-interactive.compress.json',
|
||||
'context-compress-interactive.compress.responses',
|
||||
),
|
||||
});
|
||||
|
||||
const run = await rig.runInteractive();
|
||||
|
||||
await run.type('Initial prompt');
|
||||
await run.type('\r');
|
||||
await run.sendKeys(
|
||||
'Write a 200 word story about a robot. The story MUST end with the text THE_END followed by a period.',
|
||||
);
|
||||
await run.sendKeys('\r');
|
||||
|
||||
await run.expectText('The initial response from the model', 5000);
|
||||
// Wait for the specific end marker.
|
||||
await run.expectText('THE_END.', 30000);
|
||||
|
||||
await run.type('/compress');
|
||||
await run.type('\r');
|
||||
|
||||
const foundEvent = await rig.waitForTelemetryEvent(
|
||||
'chat_compression',
|
||||
5000,
|
||||
25000,
|
||||
);
|
||||
expect(foundEvent, 'chat_compression telemetry event was not found').toBe(
|
||||
true,
|
||||
@@ -48,24 +51,27 @@ describe('Interactive Mode', () => {
|
||||
await run.expectText('Chat history compressed', 5000);
|
||||
});
|
||||
|
||||
it('should handle compression failure on token inflation', async () => {
|
||||
// TODO: Context compression is broken and doesn't include the system
|
||||
// instructions or tool counts, so it thinks compression is beneficial when
|
||||
// it is in fact not.
|
||||
it.skip('should handle compression failure on token inflation', async () => {
|
||||
await rig.setup('interactive-compress-failure', {
|
||||
fakeResponsesPath: join(
|
||||
import.meta.dirname,
|
||||
'context-compress-interactive.compress-failure.json',
|
||||
'context-compress-interactive.compress-failure.responses',
|
||||
),
|
||||
});
|
||||
|
||||
const run = await rig.runInteractive();
|
||||
|
||||
await run.type('Initial prompt');
|
||||
await run.type('Respond with exactly "Hello" followed by a period');
|
||||
await run.type('\r');
|
||||
|
||||
await run.expectText('The initial response from the model', 25000);
|
||||
await run.expectText('Hello.', 25000);
|
||||
|
||||
await run.type('/compress');
|
||||
await run.type('\r');
|
||||
await run.expectText('compression was not beneficial', 5000);
|
||||
await run.expectText('compression was not beneficial', 25000);
|
||||
|
||||
// Verify no telemetry event is logged for NOOP
|
||||
const foundEvent = await rig.waitForTelemetryEvent(
|
||||
@@ -82,7 +88,7 @@ describe('Interactive Mode', () => {
|
||||
rig.setup('interactive-compress-empty', {
|
||||
fakeResponsesPath: join(
|
||||
import.meta.dirname,
|
||||
'context-compress-interactive.compress-empty.json',
|
||||
'context-compress-interactive.compress-empty.responses',
|
||||
),
|
||||
});
|
||||
|
||||
|
||||
@@ -255,7 +255,10 @@ export class TestRig {
|
||||
testDir: string | null;
|
||||
testName?: string;
|
||||
_lastRunStdout?: string;
|
||||
// Path to the copied fake responses file for this test.
|
||||
fakeResponsesPath?: string;
|
||||
// Original fake responses file path for rewriting goldens in record mode.
|
||||
originalFakeResponsesPath?: string;
|
||||
|
||||
constructor() {
|
||||
this.bundlePath = join(__dirname, '..', 'bundle/gemini.js');
|
||||
@@ -275,7 +278,10 @@ export class TestRig {
|
||||
mkdirSync(this.testDir, { recursive: true });
|
||||
if (options.fakeResponsesPath) {
|
||||
this.fakeResponsesPath = join(this.testDir, 'fake-responses.json');
|
||||
fs.copyFileSync(options.fakeResponsesPath, this.fakeResponsesPath);
|
||||
this.originalFakeResponsesPath = options.fakeResponsesPath;
|
||||
if (process.env['REGENERATE_MODEL_GOLDENS'] !== 'true') {
|
||||
fs.copyFileSync(options.fakeResponsesPath, this.fakeResponsesPath);
|
||||
}
|
||||
}
|
||||
|
||||
// Create a settings file to point the CLI to the local collector
|
||||
@@ -344,7 +350,11 @@ export class TestRig {
|
||||
? extraInitialArgs
|
||||
: [this.bundlePath, ...extraInitialArgs];
|
||||
if (this.fakeResponsesPath) {
|
||||
initialArgs.push('--fake-responses', this.fakeResponsesPath);
|
||||
if (process.env['REGENERATE_MODEL_GOLDENS'] === 'true') {
|
||||
initialArgs.push('--record-responses', this.fakeResponsesPath);
|
||||
} else {
|
||||
initialArgs.push('--fake-responses', this.fakeResponsesPath);
|
||||
}
|
||||
}
|
||||
return { command, initialArgs };
|
||||
}
|
||||
@@ -555,6 +565,12 @@ export class TestRig {
|
||||
}
|
||||
|
||||
async cleanup() {
|
||||
if (
|
||||
process.env['REGENERATE_MODEL_GOLDENS'] === 'true' &&
|
||||
this.fakeResponsesPath
|
||||
) {
|
||||
fs.copyFileSync(this.fakeResponsesPath, this.originalFakeResponsesPath!);
|
||||
}
|
||||
// Clean up test directory
|
||||
if (this.testDir && !env['KEEP_OUTPUT']) {
|
||||
try {
|
||||
|
||||
@@ -74,6 +74,7 @@ export interface CliArgs {
|
||||
useWriteTodos: boolean | undefined;
|
||||
outputFormat: string | undefined;
|
||||
fakeResponses: string | undefined;
|
||||
recordResponses: string | undefined;
|
||||
}
|
||||
|
||||
export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
@@ -202,6 +203,12 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
.option('fake-responses', {
|
||||
type: 'string',
|
||||
description: 'Path to a file with fake model responses for testing.',
|
||||
hidden: true,
|
||||
})
|
||||
.option('record-responses', {
|
||||
type: 'string',
|
||||
description: 'Path to a file to record model responses for testing.',
|
||||
hidden: true,
|
||||
})
|
||||
.deprecateOption(
|
||||
'prompt',
|
||||
@@ -700,6 +707,7 @@ export async function loadCliConfig(
|
||||
codebaseInvestigatorSettings:
|
||||
settings.experimental?.codebaseInvestigatorSettings,
|
||||
fakeResponses: argv.fakeResponses,
|
||||
recordResponses: argv.recordResponses,
|
||||
retryFetchErrors: settings.general?.retryFetchErrors ?? false,
|
||||
ptyInfo: ptyInfo?.name,
|
||||
});
|
||||
|
||||
@@ -364,6 +364,7 @@ describe('gemini.tsx main function kitty protocol', () => {
|
||||
useWriteTodos: undefined,
|
||||
outputFormat: undefined,
|
||||
fakeResponses: undefined,
|
||||
recordResponses: undefined,
|
||||
});
|
||||
|
||||
await main();
|
||||
|
||||
@@ -284,6 +284,7 @@ export interface ConfigParameters {
|
||||
retryFetchErrors?: boolean;
|
||||
enableShellOutputEfficiency?: boolean;
|
||||
fakeResponses?: string;
|
||||
recordResponses?: string;
|
||||
ptyInfo?: string;
|
||||
disableYoloMode?: boolean;
|
||||
}
|
||||
@@ -383,6 +384,7 @@ export class Config {
|
||||
private readonly retryFetchErrors: boolean;
|
||||
private readonly enableShellOutputEfficiency: boolean;
|
||||
readonly fakeResponses?: string;
|
||||
readonly recordResponses?: string;
|
||||
private readonly disableYoloMode: boolean;
|
||||
|
||||
constructor(params: ConfigParameters) {
|
||||
@@ -493,6 +495,7 @@ export class Config {
|
||||
this.extensionManagement = params.extensionManagement ?? true;
|
||||
this.storage = new Storage(this.targetDir);
|
||||
this.fakeResponses = params.fakeResponses;
|
||||
this.recordResponses = params.recordResponses;
|
||||
this.enablePromptCompletion = params.enablePromptCompletion ?? false;
|
||||
this.fileExclusions = new FileExclusions(this);
|
||||
this.eventEmitter = params.eventEmitter;
|
||||
|
||||
@@ -16,6 +16,7 @@ import { GoogleGenAI } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
import { FakeContentGenerator } from './fakeContentGenerator.js';
|
||||
import { RecordingContentGenerator } from './recordingContentGenerator.js';
|
||||
|
||||
vi.mock('../code_assist/codeAssist.js');
|
||||
vi.mock('@google/genai');
|
||||
@@ -45,6 +46,22 @@ describe('createContentGenerator', () => {
|
||||
expect(generator).toEqual(mockGenerator);
|
||||
});
|
||||
|
||||
it('should create a RecordingContentGenerator', async () => {
|
||||
const fakeResponsesFile = 'fake/responses.yaml';
|
||||
const recordResponsesFile = 'record/responses.yaml';
|
||||
const mockConfigWithRecordResponses = {
|
||||
fakeResponses: fakeResponsesFile,
|
||||
recordResponses: recordResponsesFile,
|
||||
} as unknown as Config;
|
||||
const generator = await createContentGenerator(
|
||||
{
|
||||
authType: AuthType.USE_GEMINI,
|
||||
},
|
||||
mockConfigWithRecordResponses,
|
||||
);
|
||||
expect(generator).toBeInstanceOf(RecordingContentGenerator);
|
||||
});
|
||||
|
||||
it('should create a CodeAssistContentGenerator', async () => {
|
||||
const mockGenerator = {} as unknown as ContentGenerator;
|
||||
vi.mocked(createCodeAssistContentGenerator).mockResolvedValue(
|
||||
|
||||
@@ -20,6 +20,7 @@ import type { UserTierId } from '../code_assist/types.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
import { InstallationManager } from '../utils/installationManager.js';
|
||||
import { FakeContentGenerator } from './fakeContentGenerator.js';
|
||||
import { RecordingContentGenerator } from './recordingContentGenerator.js';
|
||||
|
||||
/**
|
||||
* Interface abstracting the core functionalities for generating content and counting tokens.
|
||||
@@ -106,55 +107,61 @@ export async function createContentGenerator(
|
||||
gcConfig: Config,
|
||||
sessionId?: string,
|
||||
): Promise<ContentGenerator> {
|
||||
if (gcConfig.fakeResponses) {
|
||||
return FakeContentGenerator.fromFile(gcConfig.fakeResponses);
|
||||
}
|
||||
|
||||
const version = process.env['CLI_VERSION'] || process.version;
|
||||
const userAgent = `GeminiCLI/${version} (${process.platform}; ${process.arch})`;
|
||||
const baseHeaders: Record<string, string> = {
|
||||
'User-Agent': userAgent,
|
||||
};
|
||||
|
||||
if (
|
||||
config.authType === AuthType.LOGIN_WITH_GOOGLE ||
|
||||
config.authType === AuthType.CLOUD_SHELL
|
||||
) {
|
||||
const httpOptions = { headers: baseHeaders };
|
||||
return new LoggingContentGenerator(
|
||||
await createCodeAssistContentGenerator(
|
||||
httpOptions,
|
||||
config.authType,
|
||||
gcConfig,
|
||||
sessionId,
|
||||
),
|
||||
gcConfig,
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
config.authType === AuthType.USE_GEMINI ||
|
||||
config.authType === AuthType.USE_VERTEX_AI
|
||||
) {
|
||||
let headers: Record<string, string> = { ...baseHeaders };
|
||||
if (gcConfig?.getUsageStatisticsEnabled()) {
|
||||
const installationManager = new InstallationManager();
|
||||
const installationId = installationManager.getInstallationId();
|
||||
headers = {
|
||||
...headers,
|
||||
'x-gemini-api-privileged-user-id': `${installationId}`,
|
||||
};
|
||||
const generator = await (async () => {
|
||||
if (gcConfig.fakeResponses) {
|
||||
return FakeContentGenerator.fromFile(gcConfig.fakeResponses);
|
||||
}
|
||||
const version = process.env['CLI_VERSION'] || process.version;
|
||||
const userAgent = `GeminiCLI/${version} (${process.platform}; ${process.arch})`;
|
||||
const baseHeaders: Record<string, string> = {
|
||||
'User-Agent': userAgent,
|
||||
};
|
||||
if (
|
||||
config.authType === AuthType.LOGIN_WITH_GOOGLE ||
|
||||
config.authType === AuthType.CLOUD_SHELL
|
||||
) {
|
||||
const httpOptions = { headers: baseHeaders };
|
||||
return new LoggingContentGenerator(
|
||||
await createCodeAssistContentGenerator(
|
||||
httpOptions,
|
||||
config.authType,
|
||||
gcConfig,
|
||||
sessionId,
|
||||
),
|
||||
gcConfig,
|
||||
);
|
||||
}
|
||||
const httpOptions = { headers };
|
||||
|
||||
const googleGenAI = new GoogleGenAI({
|
||||
apiKey: config.apiKey === '' ? undefined : config.apiKey,
|
||||
vertexai: config.vertexai,
|
||||
httpOptions,
|
||||
});
|
||||
return new LoggingContentGenerator(googleGenAI.models, gcConfig);
|
||||
if (
|
||||
config.authType === AuthType.USE_GEMINI ||
|
||||
config.authType === AuthType.USE_VERTEX_AI
|
||||
) {
|
||||
let headers: Record<string, string> = { ...baseHeaders };
|
||||
if (gcConfig?.getUsageStatisticsEnabled()) {
|
||||
const installationManager = new InstallationManager();
|
||||
const installationId = installationManager.getInstallationId();
|
||||
headers = {
|
||||
...headers,
|
||||
'x-gemini-api-privileged-user-id': `${installationId}`,
|
||||
};
|
||||
}
|
||||
const httpOptions = { headers };
|
||||
|
||||
const googleGenAI = new GoogleGenAI({
|
||||
apiKey: config.apiKey === '' ? undefined : config.apiKey,
|
||||
vertexai: config.vertexai,
|
||||
httpOptions,
|
||||
});
|
||||
return new LoggingContentGenerator(googleGenAI.models, gcConfig);
|
||||
}
|
||||
throw new Error(
|
||||
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
|
||||
);
|
||||
})();
|
||||
|
||||
if (gcConfig.recordResponses) {
|
||||
return new RecordingContentGenerator(generator, gcConfig.recordResponses);
|
||||
}
|
||||
throw new Error(
|
||||
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
|
||||
);
|
||||
|
||||
return generator;
|
||||
}
|
||||
|
||||
@@ -5,16 +5,18 @@
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { FakeContentGenerator } from './fakeContentGenerator.js';
|
||||
import {
|
||||
FakeContentGenerator,
|
||||
type FakeResponse,
|
||||
} from './fakeContentGenerator.js';
|
||||
import { promises } from 'node:fs';
|
||||
import type { FakeResponses } from './fakeContentGenerator.js';
|
||||
import type {
|
||||
import {
|
||||
GenerateContentResponse,
|
||||
CountTokensResponse,
|
||||
EmbedContentResponse,
|
||||
GenerateContentParameters,
|
||||
CountTokensParameters,
|
||||
EmbedContentParameters,
|
||||
type CountTokensResponse,
|
||||
type EmbedContentResponse,
|
||||
type GenerateContentParameters,
|
||||
type CountTokensParameters,
|
||||
type EmbedContentParameters,
|
||||
} from '@google/genai';
|
||||
|
||||
vi.mock('node:fs', async (importOriginal) => {
|
||||
@@ -31,32 +33,41 @@ vi.mock('node:fs', async (importOriginal) => {
|
||||
const mockReadFile = vi.mocked(promises.readFile);
|
||||
|
||||
describe('FakeContentGenerator', () => {
|
||||
const fakeResponses: FakeResponses = {
|
||||
generateContent: [
|
||||
const fakeGenerateContentResponse: FakeResponse = {
|
||||
method: 'generateContent',
|
||||
response: {
|
||||
candidates: [
|
||||
{ content: { parts: [{ text: 'response1' }], role: 'model' } },
|
||||
],
|
||||
} as GenerateContentResponse,
|
||||
};
|
||||
|
||||
const fakeGenerateContentStreamResponse: FakeResponse = {
|
||||
method: 'generateContentStream',
|
||||
response: [
|
||||
{
|
||||
candidates: [
|
||||
{ content: { parts: [{ text: 'response1' }], role: 'model' } },
|
||||
{ content: { parts: [{ text: 'chunk1' }], role: 'model' } },
|
||||
],
|
||||
},
|
||||
{
|
||||
candidates: [
|
||||
{ content: { parts: [{ text: 'chunk2' }], role: 'model' } },
|
||||
],
|
||||
},
|
||||
] as GenerateContentResponse[],
|
||||
generateContentStream: [
|
||||
[
|
||||
{
|
||||
candidates: [
|
||||
{ content: { parts: [{ text: 'chunk1' }], role: 'model' } },
|
||||
],
|
||||
},
|
||||
{
|
||||
candidates: [
|
||||
{ content: { parts: [{ text: 'chunk2' }], role: 'model' } },
|
||||
],
|
||||
},
|
||||
],
|
||||
] as GenerateContentResponse[][],
|
||||
countTokens: [{ totalTokens: 10 }] as CountTokensResponse[],
|
||||
embedContent: [
|
||||
{ embeddings: [{ values: [1, 2, 3] }] },
|
||||
] as EmbedContentResponse[],
|
||||
};
|
||||
|
||||
const fakeCountTokensResponse: FakeResponse = {
|
||||
method: 'countTokens',
|
||||
response: { totalTokens: 10 } as CountTokensResponse,
|
||||
};
|
||||
|
||||
const fakeEmbedContentResponse: FakeResponse = {
|
||||
method: 'embedContent',
|
||||
response: {
|
||||
embeddings: [{ values: [1, 2, 3] }],
|
||||
} as EmbedContentResponse,
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
@@ -64,90 +75,86 @@ describe('FakeContentGenerator', () => {
|
||||
});
|
||||
|
||||
it('should return responses for generateContent', async () => {
|
||||
const generator = new FakeContentGenerator(fakeResponses);
|
||||
const generator = new FakeContentGenerator([fakeGenerateContentResponse]);
|
||||
const response = await generator.generateContent(
|
||||
{} as GenerateContentParameters,
|
||||
'id',
|
||||
);
|
||||
expect(response).toEqual(fakeResponses.generateContent[0]);
|
||||
});
|
||||
|
||||
it('should throw error when no more generateContent responses', async () => {
|
||||
const generator = new FakeContentGenerator({
|
||||
...fakeResponses,
|
||||
generateContent: [],
|
||||
});
|
||||
await expect(
|
||||
generator.generateContent({} as GenerateContentParameters, 'id'),
|
||||
).rejects.toThrowError('No more mock responses for generateContent');
|
||||
expect(response).instanceOf(GenerateContentResponse);
|
||||
expect(response).toEqual(fakeGenerateContentResponse.response);
|
||||
});
|
||||
|
||||
it('should return responses for generateContentStream', async () => {
|
||||
const generator = new FakeContentGenerator(fakeResponses);
|
||||
const generator = new FakeContentGenerator([
|
||||
fakeGenerateContentStreamResponse,
|
||||
]);
|
||||
const stream = await generator.generateContentStream(
|
||||
{} as GenerateContentParameters,
|
||||
'id',
|
||||
);
|
||||
const responses = [];
|
||||
for await (const response of stream) {
|
||||
expect(response).instanceOf(GenerateContentResponse);
|
||||
responses.push(response);
|
||||
}
|
||||
expect(responses).toEqual(fakeResponses.generateContentStream[0]);
|
||||
});
|
||||
|
||||
it('should throw error when no more generateContentStream responses', async () => {
|
||||
const generator = new FakeContentGenerator({
|
||||
...fakeResponses,
|
||||
generateContentStream: [],
|
||||
});
|
||||
await expect(
|
||||
generator.generateContentStream({} as GenerateContentParameters, 'id'),
|
||||
).rejects.toThrow('No more mock responses for generateContentStream');
|
||||
expect(responses).toEqual(fakeGenerateContentStreamResponse.response);
|
||||
});
|
||||
|
||||
it('should return responses for countTokens', async () => {
|
||||
const generator = new FakeContentGenerator(fakeResponses);
|
||||
const generator = new FakeContentGenerator([fakeCountTokensResponse]);
|
||||
const response = await generator.countTokens({} as CountTokensParameters);
|
||||
expect(response).toEqual(fakeResponses.countTokens[0]);
|
||||
});
|
||||
|
||||
it('should throw error when no more countTokens responses', async () => {
|
||||
const generator = new FakeContentGenerator({
|
||||
...fakeResponses,
|
||||
countTokens: [],
|
||||
});
|
||||
await expect(
|
||||
generator.countTokens({} as CountTokensParameters),
|
||||
).rejects.toThrowError('No more mock responses for countTokens');
|
||||
expect(response).toEqual(fakeCountTokensResponse.response);
|
||||
});
|
||||
|
||||
it('should return responses for embedContent', async () => {
|
||||
const generator = new FakeContentGenerator(fakeResponses);
|
||||
const generator = new FakeContentGenerator([fakeEmbedContentResponse]);
|
||||
const response = await generator.embedContent({} as EmbedContentParameters);
|
||||
expect(response).toEqual(fakeResponses.embedContent[0]);
|
||||
expect(response).toEqual(fakeEmbedContentResponse.response);
|
||||
});
|
||||
|
||||
it('should throw error when no more embedContent responses', async () => {
|
||||
const generator = new FakeContentGenerator({
|
||||
...fakeResponses,
|
||||
embedContent: [],
|
||||
});
|
||||
it('should handle a mixture of calls', async () => {
|
||||
const fakeResponses = [
|
||||
fakeGenerateContentResponse,
|
||||
fakeGenerateContentStreamResponse,
|
||||
fakeCountTokensResponse,
|
||||
fakeEmbedContentResponse,
|
||||
];
|
||||
const generator = new FakeContentGenerator(fakeResponses);
|
||||
for (const fakeResponse of fakeResponses) {
|
||||
const response = await generator[fakeResponse.method]({} as never, '');
|
||||
if (fakeResponse.method === 'generateContentStream') {
|
||||
const responses = [];
|
||||
for await (const item of response as AsyncGenerator<GenerateContentResponse>) {
|
||||
expect(item).instanceOf(GenerateContentResponse);
|
||||
responses.push(item);
|
||||
}
|
||||
expect(responses).toEqual(fakeResponse.response);
|
||||
} else {
|
||||
expect(response).toEqual(fakeResponse.response);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it('should throw error when no more responses', async () => {
|
||||
const generator = new FakeContentGenerator([fakeGenerateContentResponse]);
|
||||
await generator.generateContent({} as GenerateContentParameters, 'id');
|
||||
await expect(
|
||||
generator.embedContent({} as EmbedContentParameters),
|
||||
).rejects.toThrowError('No more mock responses for embedContent');
|
||||
});
|
||||
|
||||
it('should handle multiple calls and exhaust responses', async () => {
|
||||
const generator = new FakeContentGenerator(fakeResponses);
|
||||
await generator.generateContent({} as GenerateContentParameters, 'id');
|
||||
await expect(
|
||||
generator.countTokens({} as CountTokensParameters),
|
||||
).rejects.toThrowError('No more mock responses for countTokens');
|
||||
await expect(
|
||||
generator.generateContentStream({} as GenerateContentParameters, 'id'),
|
||||
).rejects.toThrow('No more mock responses for generateContentStream');
|
||||
await expect(
|
||||
generator.generateContent({} as GenerateContentParameters, 'id'),
|
||||
).rejects.toThrow();
|
||||
).rejects.toThrowError('No more mock responses for generateContent');
|
||||
});
|
||||
|
||||
describe('fromFile', () => {
|
||||
it('should create a generator from a file', async () => {
|
||||
const fileContent = JSON.stringify(fakeResponses);
|
||||
const fileContent = JSON.stringify(fakeGenerateContentResponse) + '\n';
|
||||
mockReadFile.mockResolvedValue(fileContent);
|
||||
|
||||
const generator = await FakeContentGenerator.fromFile('fake-path.json');
|
||||
@@ -155,51 +162,7 @@ describe('FakeContentGenerator', () => {
|
||||
{} as GenerateContentParameters,
|
||||
'id',
|
||||
);
|
||||
expect(response).toEqual(fakeResponses.generateContent[0]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('constructor with partial responses', () => {
|
||||
it('should handle missing generateContent', async () => {
|
||||
const responses = { ...fakeResponses, generateContent: undefined };
|
||||
const generator = new FakeContentGenerator(
|
||||
responses as unknown as FakeResponses,
|
||||
);
|
||||
await expect(
|
||||
generator.generateContent({} as GenerateContentParameters, 'id'),
|
||||
).rejects.toThrowError('No more mock responses for generateContent');
|
||||
});
|
||||
|
||||
it('should handle missing generateContentStream', async () => {
|
||||
const responses = { ...fakeResponses, generateContentStream: undefined };
|
||||
const generator = new FakeContentGenerator(
|
||||
responses as unknown as FakeResponses,
|
||||
);
|
||||
await expect(
|
||||
generator.generateContentStream({} as GenerateContentParameters, 'id'),
|
||||
).rejects.toThrowError(
|
||||
'No more mock responses for generateContentStream',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle missing countTokens', async () => {
|
||||
const responses = { ...fakeResponses, countTokens: undefined };
|
||||
const generator = new FakeContentGenerator(
|
||||
responses as unknown as FakeResponses,
|
||||
);
|
||||
await expect(
|
||||
generator.countTokens({} as CountTokensParameters),
|
||||
).rejects.toThrowError('No more mock responses for countTokens');
|
||||
});
|
||||
|
||||
it('should handle missing embedContent', async () => {
|
||||
const responses = { ...fakeResponses, embedContent: undefined };
|
||||
const generator = new FakeContentGenerator(
|
||||
responses as unknown as FakeResponses,
|
||||
);
|
||||
await expect(
|
||||
generator.embedContent({} as EmbedContentParameters),
|
||||
).rejects.toThrowError('No more mock responses for embedContent');
|
||||
expect(response).toEqual(fakeGenerateContentResponse.response);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -4,98 +4,113 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
CountTokensResponse,
|
||||
import {
|
||||
GenerateContentResponse,
|
||||
GenerateContentParameters,
|
||||
CountTokensParameters,
|
||||
type CountTokensResponse,
|
||||
type GenerateContentParameters,
|
||||
type CountTokensParameters,
|
||||
EmbedContentResponse,
|
||||
EmbedContentParameters,
|
||||
type EmbedContentParameters,
|
||||
} from '@google/genai';
|
||||
import { promises } from 'node:fs';
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
import type { UserTierId } from '../code_assist/types.js';
|
||||
import { safeJsonStringify } from '../utils/safeJsonStringify.js';
|
||||
|
||||
export type FakeResponses = {
|
||||
generateContent: GenerateContentResponse[];
|
||||
generateContentStream: GenerateContentResponse[][];
|
||||
countTokens: CountTokensResponse[];
|
||||
embedContent: EmbedContentResponse[];
|
||||
};
|
||||
export type FakeResponse =
|
||||
| {
|
||||
method: 'generateContent';
|
||||
response: GenerateContentResponse;
|
||||
}
|
||||
| {
|
||||
method: 'generateContentStream';
|
||||
response: GenerateContentResponse[];
|
||||
}
|
||||
| {
|
||||
method: 'countTokens';
|
||||
response: CountTokensResponse;
|
||||
}
|
||||
| {
|
||||
method: 'embedContent';
|
||||
response: EmbedContentResponse;
|
||||
};
|
||||
|
||||
// A ContentGenerator that responds with canned responses.
|
||||
//
|
||||
// Typically these would come from a file, provided by the `--fake-responses`
|
||||
// CLI argument.
|
||||
export class FakeContentGenerator implements ContentGenerator {
|
||||
private responses: FakeResponses;
|
||||
private callCounters = {
|
||||
generateContent: 0,
|
||||
generateContentStream: 0,
|
||||
countTokens: 0,
|
||||
embedContent: 0,
|
||||
};
|
||||
private callCounter = 0;
|
||||
userTier?: UserTierId;
|
||||
|
||||
constructor(responses: FakeResponses) {
|
||||
this.responses = {
|
||||
generateContent: responses.generateContent ?? [],
|
||||
generateContentStream: responses.generateContentStream ?? [],
|
||||
countTokens: responses.countTokens ?? [],
|
||||
embedContent: responses.embedContent ?? [],
|
||||
};
|
||||
}
|
||||
constructor(private readonly responses: FakeResponse[]) {}
|
||||
|
||||
static async fromFile(filePath: string): Promise<FakeContentGenerator> {
|
||||
const fileContent = await promises.readFile(filePath, 'utf-8');
|
||||
const responses = JSON.parse(fileContent) as FakeResponses;
|
||||
const responses = fileContent
|
||||
.split('\n')
|
||||
.filter((line) => line.trim() !== '')
|
||||
.map((line) => JSON.parse(line) as FakeResponse);
|
||||
return new FakeContentGenerator(responses);
|
||||
}
|
||||
|
||||
private getNextResponse<K extends keyof FakeResponses>(
|
||||
method: K,
|
||||
request: unknown,
|
||||
): FakeResponses[K][number] {
|
||||
const response = this.responses[method][this.callCounters[method]++];
|
||||
private getNextResponse<
|
||||
M extends FakeResponse['method'],
|
||||
R = Extract<FakeResponse, { method: M }>['response'],
|
||||
>(method: M, request: unknown): R {
|
||||
const response = this.responses[this.callCounter++];
|
||||
if (!response) {
|
||||
throw new Error(
|
||||
`No more mock responses for ${method}, got request:\n` +
|
||||
safeJsonStringify(request),
|
||||
);
|
||||
}
|
||||
return response;
|
||||
if (response.method !== method) {
|
||||
throw new Error(
|
||||
`Unexpected response type, next response was for ${response.method} but expected ${method}`,
|
||||
);
|
||||
}
|
||||
return response.response as R;
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
_request: GenerateContentParameters,
|
||||
request: GenerateContentParameters,
|
||||
_userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
return this.getNextResponse('generateContent', _request);
|
||||
return Object.setPrototypeOf(
|
||||
this.getNextResponse('generateContent', request),
|
||||
GenerateContentResponse.prototype,
|
||||
);
|
||||
}
|
||||
|
||||
async generateContentStream(
|
||||
_request: GenerateContentParameters,
|
||||
request: GenerateContentParameters,
|
||||
_userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const responses = this.getNextResponse('generateContentStream', _request);
|
||||
const responses = this.getNextResponse('generateContentStream', request);
|
||||
async function* stream() {
|
||||
for (const response of responses) {
|
||||
yield response;
|
||||
yield Object.setPrototypeOf(
|
||||
response,
|
||||
GenerateContentResponse.prototype,
|
||||
);
|
||||
}
|
||||
}
|
||||
return stream();
|
||||
}
|
||||
|
||||
async countTokens(
|
||||
_request: CountTokensParameters,
|
||||
request: CountTokensParameters,
|
||||
): Promise<CountTokensResponse> {
|
||||
return this.getNextResponse('countTokens', _request);
|
||||
return this.getNextResponse('countTokens', request);
|
||||
}
|
||||
|
||||
async embedContent(
|
||||
_request: EmbedContentParameters,
|
||||
request: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
return this.getNextResponse('embedContent', _request);
|
||||
return Object.setPrototypeOf(
|
||||
this.getNextResponse('embedContent', request),
|
||||
EmbedContentResponse.prototype,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
151
packages/core/src/core/recordingContentGenerator.test.ts
Normal file
151
packages/core/src/core/recordingContentGenerator.test.ts
Normal file
@@ -0,0 +1,151 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
GenerateContentResponse,
|
||||
CountTokensResponse,
|
||||
EmbedContentResponse,
|
||||
GenerateContentParameters,
|
||||
CountTokensParameters,
|
||||
EmbedContentParameters,
|
||||
ContentEmbedding,
|
||||
} from '@google/genai';
|
||||
import { appendFileSync } from 'node:fs';
|
||||
import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
|
||||
import { safeJsonStringify } from '../utils/safeJsonStringify.js';
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
import { RecordingContentGenerator } from './recordingContentGenerator.js';
|
||||
|
||||
vi.mock('node:fs', () => ({
|
||||
appendFileSync: vi.fn(),
|
||||
}));
|
||||
|
||||
describe('RecordingContentGenerator', () => {
|
||||
let mockRealGenerator: ContentGenerator;
|
||||
let recorder: RecordingContentGenerator;
|
||||
const filePath = '/test/file/responses.json';
|
||||
|
||||
beforeEach(() => {
|
||||
mockRealGenerator = {
|
||||
generateContent: vi.fn(),
|
||||
generateContentStream: vi.fn(),
|
||||
countTokens: vi.fn(),
|
||||
embedContent: vi.fn(),
|
||||
};
|
||||
recorder = new RecordingContentGenerator(mockRealGenerator, filePath);
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should record generateContent responses', async () => {
|
||||
const mockResponse = {
|
||||
candidates: [
|
||||
{ content: { parts: [{ text: 'response' }], role: 'model' } },
|
||||
],
|
||||
usageMetadata: { totalTokenCount: 10 },
|
||||
} as GenerateContentResponse;
|
||||
(mockRealGenerator.generateContent as Mock).mockResolvedValue(mockResponse);
|
||||
|
||||
const response = await recorder.generateContent(
|
||||
{} as GenerateContentParameters,
|
||||
'id1',
|
||||
);
|
||||
expect(response).toEqual(mockResponse);
|
||||
expect(mockRealGenerator.generateContent).toHaveBeenCalledWith({}, 'id1');
|
||||
|
||||
expect(appendFileSync).toHaveBeenCalledWith(
|
||||
filePath,
|
||||
safeJsonStringify({
|
||||
method: 'generateContent',
|
||||
response: mockResponse,
|
||||
}) + '\n',
|
||||
);
|
||||
});
|
||||
|
||||
it('should record generateContentStream responses', async () => {
|
||||
const mockResponse1 = {
|
||||
candidates: [
|
||||
{ content: { parts: [{ text: 'response1' }], role: 'model' } },
|
||||
],
|
||||
usageMetadata: { totalTokenCount: 10 },
|
||||
} as GenerateContentResponse;
|
||||
const mockResponse2 = {
|
||||
candidates: [
|
||||
{ content: { parts: [{ text: 'response2' }], role: 'model' } },
|
||||
],
|
||||
usageMetadata: { totalTokenCount: 20 },
|
||||
} as GenerateContentResponse;
|
||||
|
||||
async function* mockStream() {
|
||||
yield mockResponse1;
|
||||
yield mockResponse2;
|
||||
}
|
||||
|
||||
(mockRealGenerator.generateContentStream as Mock).mockResolvedValue(
|
||||
mockStream(),
|
||||
);
|
||||
|
||||
const stream = await recorder.generateContentStream(
|
||||
{} as GenerateContentParameters,
|
||||
'id1',
|
||||
);
|
||||
const responses = [];
|
||||
for await (const response of stream) {
|
||||
responses.push(response);
|
||||
}
|
||||
|
||||
expect(responses).toEqual([mockResponse1, mockResponse2]);
|
||||
expect(mockRealGenerator.generateContentStream).toHaveBeenCalledWith(
|
||||
{},
|
||||
'id1',
|
||||
);
|
||||
|
||||
expect(appendFileSync).toHaveBeenCalledWith(
|
||||
filePath,
|
||||
safeJsonStringify({
|
||||
method: 'generateContentStream',
|
||||
response: responses,
|
||||
}) + '\n',
|
||||
);
|
||||
});
|
||||
|
||||
it('should record countTokens responses', async () => {
|
||||
const mockResponse = {
|
||||
totalTokens: 100,
|
||||
cachedContentTokenCount: 10,
|
||||
} as CountTokensResponse;
|
||||
(mockRealGenerator.countTokens as Mock).mockResolvedValue(mockResponse);
|
||||
|
||||
const response = await recorder.countTokens({} as CountTokensParameters);
|
||||
expect(response).toEqual(mockResponse);
|
||||
expect(mockRealGenerator.countTokens).toHaveBeenCalledWith({});
|
||||
|
||||
expect(appendFileSync).toHaveBeenCalledWith(
|
||||
filePath,
|
||||
safeJsonStringify({
|
||||
method: 'countTokens',
|
||||
response: mockResponse,
|
||||
}) + '\n',
|
||||
);
|
||||
});
|
||||
|
||||
it('should record embedContent responses', async () => {
|
||||
const mockResponse = {
|
||||
embeddings: [{ values: [1, 2, 3] } as ContentEmbedding],
|
||||
} as EmbedContentResponse;
|
||||
(mockRealGenerator.embedContent as Mock).mockResolvedValue(mockResponse);
|
||||
|
||||
const response = await recorder.embedContent({} as EmbedContentParameters);
|
||||
expect(response).toEqual(mockResponse);
|
||||
expect(mockRealGenerator.embedContent).toHaveBeenCalledWith({});
|
||||
expect(appendFileSync).toHaveBeenCalledWith(
|
||||
filePath,
|
||||
safeJsonStringify({
|
||||
method: 'embedContent',
|
||||
response: mockResponse,
|
||||
}) + '\n',
|
||||
);
|
||||
});
|
||||
});
|
||||
112
packages/core/src/core/recordingContentGenerator.ts
Normal file
112
packages/core/src/core/recordingContentGenerator.ts
Normal file
@@ -0,0 +1,112 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
CountTokensResponse,
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponse,
|
||||
CountTokensParameters,
|
||||
EmbedContentResponse,
|
||||
EmbedContentParameters,
|
||||
} from '@google/genai';
|
||||
import { appendFileSync } from 'node:fs';
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
import type { FakeResponse } from './fakeContentGenerator.js';
|
||||
import type { UserTierId } from '../code_assist/types.js';
|
||||
import { safeJsonStringify } from '../utils/safeJsonStringify.js';
|
||||
|
||||
// A ContentGenerator that wraps another content generator and records all the
|
||||
// responses, with the ability to write them out to a file. These files are
|
||||
// intended to be consumed later on by a FakeContentGenerator, given the
|
||||
// `--fake-responses` CLI argument.
|
||||
//
|
||||
// Note that only the "interesting" bits of the responses are actually kept.
|
||||
export class RecordingContentGenerator implements ContentGenerator {
|
||||
userTier?: UserTierId;
|
||||
|
||||
constructor(
|
||||
private readonly realGenerator: ContentGenerator,
|
||||
private readonly filePath: string,
|
||||
) {}
|
||||
|
||||
async generateContent(
|
||||
request: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const response = await this.realGenerator.generateContent(
|
||||
request,
|
||||
userPromptId,
|
||||
);
|
||||
const recordedResponse: FakeResponse = {
|
||||
method: 'generateContent',
|
||||
response: {
|
||||
candidates: response.candidates,
|
||||
usageMetadata: response.usageMetadata,
|
||||
} as GenerateContentResponse,
|
||||
};
|
||||
appendFileSync(this.filePath, `${safeJsonStringify(recordedResponse)}\n`);
|
||||
return response;
|
||||
}
|
||||
|
||||
async generateContentStream(
|
||||
request: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const recordedResponse: FakeResponse = {
|
||||
method: 'generateContentStream',
|
||||
response: [],
|
||||
};
|
||||
|
||||
const realResponses = await this.realGenerator.generateContentStream(
|
||||
request,
|
||||
userPromptId,
|
||||
);
|
||||
|
||||
async function* stream(filePath: string) {
|
||||
for await (const response of realResponses) {
|
||||
(recordedResponse.response as GenerateContentResponse[]).push({
|
||||
candidates: response.candidates,
|
||||
usageMetadata: response.usageMetadata,
|
||||
} as GenerateContentResponse);
|
||||
yield response;
|
||||
}
|
||||
appendFileSync(filePath, `${safeJsonStringify(recordedResponse)}\n`);
|
||||
}
|
||||
|
||||
return Promise.resolve(stream(this.filePath));
|
||||
}
|
||||
|
||||
async countTokens(
|
||||
request: CountTokensParameters,
|
||||
): Promise<CountTokensResponse> {
|
||||
const response = await this.realGenerator.countTokens(request);
|
||||
const recordedResponse: FakeResponse = {
|
||||
method: 'countTokens',
|
||||
response: {
|
||||
totalTokens: response.totalTokens,
|
||||
cachedContentTokenCount: response.cachedContentTokenCount,
|
||||
},
|
||||
};
|
||||
appendFileSync(this.filePath, `${safeJsonStringify(recordedResponse)}\n`);
|
||||
return response;
|
||||
}
|
||||
|
||||
async embedContent(
|
||||
request: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
const response = await this.realGenerator.embedContent(request);
|
||||
|
||||
const recordedResponse: FakeResponse = {
|
||||
method: 'embedContent',
|
||||
response: {
|
||||
embeddings: response.embeddings,
|
||||
metadata: response.metadata,
|
||||
},
|
||||
};
|
||||
appendFileSync(this.filePath, `${safeJsonStringify(recordedResponse)}\n`);
|
||||
return response;
|
||||
}
|
||||
}
|
||||
@@ -29,6 +29,7 @@ export * from './core/turn.js';
|
||||
export * from './core/geminiRequest.js';
|
||||
export * from './core/coreToolScheduler.js';
|
||||
export * from './core/nonInteractiveToolExecutor.js';
|
||||
export * from './core/recordingContentGenerator.js';
|
||||
|
||||
export * from './fallback/types.js';
|
||||
|
||||
|
||||
Reference in New Issue
Block a user