refactor: Pass and handle a dedicated timeout signal for streaming content generation and update mock to return an AsyncGenerator.

This commit is contained in:
kevin-ramdass
2026-01-31 18:23:15 -08:00
parent d03b9b95b3
commit b8ad178bee
2 changed files with 118 additions and 87 deletions

View File

@@ -1022,18 +1022,27 @@ describe('GeminiChat', () => {
// 2. Mock generateContentStream to hang UNTIL aborted
vi.mocked(mockContentGenerator.generateContentStream).mockImplementation(
(request) => new Promise((resolve, reject) => {
const config = request?.config;
if (config?.abortSignal) {
if (config.abortSignal.aborted) {
async (request) => {
const signal = request.config?.abortSignal;
return {
async *[Symbol.asyncIterator]() {
if (signal) {
await new Promise((resolve, reject) => {
if (signal.aborted) {
reject(new Error('Aborted'));
return;
}
config.abortSignal.addEventListener('abort', () => {
signal.addEventListener('abort', () => {
reject(new Error('Aborted'));
});
});
} else {
await new Promise(() => {}); // Hang indefinitely
}
}),
yield {} as GenerateContentResponse; // Dummy yield to satisfy require-yield lint rule
},
} as AsyncGenerator<GenerateContentResponse>;
},
);
// 3. Start the request

View File

@@ -592,7 +592,9 @@ export class GeminiChat {
lastContentsToUse = contentsToUse;
try {
return await this.config.getContentGenerator().generateContentStream(
const stream = await this.config
.getContentGenerator()
.generateContentStream(
{
model: modelToUse,
contents: contentsToUse,
@@ -600,6 +602,10 @@ export class GeminiChat {
},
prompt_id,
);
return {
stream,
timeoutSignal,
};
} catch (error) {
if (timeoutSignal.aborted) {
const timeoutError = new Error(
@@ -632,7 +638,9 @@ export class GeminiChat {
);
};
const streamResponse = await retryWithBackoff(apiCall, {
const { stream: streamResponse, timeoutSignal } = await retryWithBackoff(
apiCall,
{
onPersistent429: onPersistent429Callback,
onValidationRequired: onValidationRequiredCallback,
authType: this.config.getContentGeneratorConfig()?.authType,
@@ -649,7 +657,8 @@ export class GeminiChat {
model: lastModelToUse,
});
},
});
},
);
// Store the original request for AfterModel hooks
const originalRequest: GenerateContentParameters = {
@@ -662,6 +671,7 @@ export class GeminiChat {
lastModelToUse,
streamResponse,
originalRequest,
timeoutSignal,
);
}
@@ -820,12 +830,14 @@ export class GeminiChat {
model: string,
streamResponse: AsyncGenerator<GenerateContentResponse>,
originalRequest: GenerateContentParameters,
timeoutSignal: AbortSignal,
): AsyncGenerator<GenerateContentResponse> {
const modelResponseParts: Part[] = [];
let hasToolCall = false;
let finishReason: FinishReason | undefined;
try {
for await (const chunk of streamResponse) {
const candidateWithReason = chunk?.candidates?.find(
(candidate) => candidate.finishReason,
@@ -884,6 +896,16 @@ export class GeminiChat {
yield chunk;
}
}
} catch (error) {
if (timeoutSignal.aborted) {
const timeoutError = new Error(
`Request timed out after ${TIMEOUT_MS}ms`,
);
(timeoutError as unknown as { code: string }).code = 'ETIMEDOUT';
throw timeoutError;
}
throw error;
}
// String thoughts and consolidate text parts.
const consolidatedParts: Part[] = [];