fix(core): Compression was broken when routing enabled (#9183)

This commit is contained in:
Abhi
2025-09-23 00:44:07 -04:00
committed by GitHub
parent 5218323831
commit c93eed6384
2 changed files with 1 additions and 51 deletions

View File

@@ -733,55 +733,6 @@ describe('Gemini Client (client.ts)', () => {
// Assert that the chat was reset
expect(newChat).not.toBe(initialChat);
});
it('should use current model from config for token counting after sendMessage', async () => {
const initialModel = mockConfig.getModel();
// mock the model has been changed between calls of `countTokens`
const firstCurrentModel = initialModel + '-changed-1';
const secondCurrentModel = initialModel + '-changed-2';
vi.mocked(mockConfig.getModel)
.mockReturnValueOnce(firstCurrentModel)
.mockReturnValueOnce(secondCurrentModel);
vi.mocked(mockContentGenerator.countTokens)
.mockResolvedValueOnce({ totalTokens: 100000 })
.mockResolvedValueOnce({ totalTokens: 5000 });
const mockSendMessage = vi.fn().mockResolvedValue({ text: 'Summary' });
const mockChatHistory = [
{ role: 'user', parts: [{ text: 'Long conversation' }] },
{ role: 'model', parts: [{ text: 'Long response' }] },
];
const mockChat = {
getHistory: vi.fn().mockImplementation(() => [...mockChatHistory]),
setHistory: vi.fn(),
sendMessage: mockSendMessage,
} as unknown as GeminiChat;
client['chat'] = mockChat;
client['startChat'] = vi.fn().mockResolvedValue(mockChat);
const result = await client.tryCompressChat('prompt-id-4', false);
expect(mockContentGenerator.countTokens).toHaveBeenCalledTimes(2);
expect(mockContentGenerator.countTokens).toHaveBeenNthCalledWith(1, {
model: firstCurrentModel,
contents: [...mockChatHistory],
});
expect(mockContentGenerator.countTokens).toHaveBeenNthCalledWith(2, {
model: secondCurrentModel,
contents: expect.any(Array),
});
expect(result).toEqual({
compressionStatus: CompressionStatus.COMPRESSED,
originalTokenCount: 100000,
newTokenCount: 5000,
});
});
});
describe('sendMessageStream', () => {

View File

@@ -758,8 +758,7 @@ export class GeminiClient {
const { totalTokens: newTokenCount } =
await this.getContentGeneratorOrFail().countTokens({
// model might change after calling `sendMessage`, so we get the newest value from config
model: this.config.getModel(),
model,
contents: chat.getHistory(),
});
if (newTokenCount === undefined) {