Behavioral evals framework. (#16047)

This commit is contained in:
Christian Gunderman
2026-01-14 04:49:17 +00:00
committed by GitHub
parent 933bc5774f
commit 8030404b08
15 changed files with 1577 additions and 1413 deletions

View File

@@ -277,6 +277,37 @@ jobs:
shell: 'pwsh'
run: 'npm run test:integration:sandbox:none'
evals:
name: 'Evals (ALWAYS_PASSING)'
needs:
- 'merge_queue_skipper'
- 'parse_run_context'
runs-on: 'gemini-cli-ubuntu-16-core'
if: |
always() && (needs.merge_queue_skipper.result !='success' || needs.merge_queue_skipper.outputs.skip != 'true')
steps:
- name: 'Checkout'
uses: 'actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955' # ratchet:actions/checkout@v5
with:
ref: '${{ needs.parse_run_context.outputs.sha }}'
repository: '${{ needs.parse_run_context.outputs.repository }}'
- name: 'Set up Node.js 20.x'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
with:
node-version: '20.x'
- name: 'Install dependencies'
run: 'npm ci'
- name: 'Build project'
run: 'npm run build'
- name: 'Run Evals (Required to pass)'
env:
GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}'
run: 'npm run test:always_passing_evals'
e2e:
name: 'E2E'
if: |
@@ -284,13 +315,15 @@ jobs:
needs:
- 'e2e_linux'
- 'e2e_mac'
- 'evals'
- 'merge_queue_skipper'
runs-on: 'gemini-cli-ubuntu-16-core'
steps:
- name: 'Check E2E test results'
run: |
if [[ ${{ needs.e2e_linux.result }} != 'success' || \
${{ needs.e2e_mac.result }} != 'success' ]]; then
${{ needs.e2e_mac.result }} != 'success' || \
${{ needs.evals.result }} != 'success' ]]; then
echo "One or more E2E jobs failed."
exit 1
fi

41
.github/workflows/evals-nightly.yml vendored Normal file
View File

@@ -0,0 +1,41 @@
name: 'Evals: Nightly'
on:
schedule:
- cron: '0 1 * * *' # Runs at 1 AM every day
workflow_dispatch:
inputs:
run_all:
description: 'Run all evaluations (including usually passing)'
type: 'boolean'
default: true
permissions:
contents: 'read'
checks: 'write'
jobs:
evals:
name: 'Evals (USUALLY_PASSING) nightly run'
runs-on: 'gemini-cli-ubuntu-16-core'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Set up Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: 'Install dependencies'
run: 'npm ci'
- name: 'Build project'
run: 'npm run build'
- name: 'Run Evals'
env:
GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}'
RUN_EVALS: "${{ github.event.inputs.run_all != 'false' }}"
run: 'npm run test:all_evals'

1
.gitignore vendored
View File

@@ -59,3 +59,4 @@ patch_output.log
.genkit
.gemini-clipboard/
.eslintcache
evals/logs/

View File

@@ -35,6 +35,8 @@ export default tseslint.config(
'package/bundle/**',
'.integration-tests/**',
'dist/**',
'evals/**',
'packages/test-utils/**',
],
},
eslint.configs.recommended,

102
evals/README.md Normal file
View File

@@ -0,0 +1,102 @@
# Behavioral Evals
Behavioral evaluations (evals) are tests designed to validate the agent's
behavior in response to specific prompts. They serve as a critical feedback loop
for changes to system prompts, tool definitions, and other model-steering
mechanisms.
## Why Behavioral Evals?
Unlike traditional **integration tests** which verify that the system functions
correctly (e.g., "does the file writer actually write to disk?"), behavioral
evals verify that the model _chooses_ to take the correct action (e.g., "does
the model decide to write to disk when asked to save code?").
They are also distinct from broad **industry benchmarks** (like SWE-bench).
While benchmarks measure general capabilities across complex challenges, our
behavioral evals focus on specific, granular behaviors relevant to the Gemini
CLI's features.
### Key Characteristics
- **Feedback Loop**: They help us understand how changes to prompts or tools
affect the model's decision-making.
- _Did a change to the system prompt make the model less likely to use tool
X?_
- _Did a new tool definition confuse the model?_
- **Regression Testing**: They prevent regressions in model steering.
- **Non-Determinism**: Unlike unit tests, LLM behavior can be non-deterministic.
We distinguish between behaviors that should be robust (`ALWAYS_PASSES`) and
those that are generally reliable but might occasionally vary
(`USUALLY_PASSES`).
## Creating an Evaluation
Evaluations are located in the `evals` directory. Each evaluation is a Vitest
test file that uses the `evalTest` function from `evals/test-helper.ts`.
### `evalTest`
The `evalTest` function is a helper that runs a single evaluation case. It takes
two arguments:
1. `policy`: The consistency expectation for this test (`'ALWAYS_PASSES'` or
`'USUALLY_PASSES'`).
2. `evalCase`: An object defining the test case.
#### Policies
- `ALWAYS_PASSES`: Tests expected to pass 100% of the time. These are typically
trivial and test basic functionality. These run in every CI.
- `USUALLY_PASSES`: Tests expected to pass most of the time but may have some
flakiness due to non-deterministic behaviors. These are run nightly and used
to track the health of the product from build to build.
#### `EvalCase` Properties
- `name`: The name of the evaluation case.
- `prompt`: The prompt to send to the model.
- `params`: An optional object with parameters to pass to the test rig (e.g.,
settings).
- `assert`: An async function that takes the test rig and the result of the run
and asserts that the result is correct.
- `log`: An optional boolean that, if set to `true`, will log the tool calls to
a file in the `evals/logs` directory.
### Example
```typescript
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
describe('my_feature', () => {
evalTest('ALWAYS_PASSES', {
name: 'should do something',
prompt: 'do it',
assert: async (rig, result) => {
// assertions
},
});
});
```
## Running Evaluations
### Always Passing Evals
To run the evaluations that are expected to always pass (CI safe):
```bash
npm run test:always_passing_evals
```
### All Evals
To run all evaluations, including those that may be flaky ("usually passes"):
```bash
npm run test:all_evals
```
This command sets the `RUN_EVALS` environment variable to `1`, which enables the
`USUALLY_PASSES` tests.

31
evals/save_memory.eval.ts Normal file
View File

@@ -0,0 +1,31 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
import { validateModelOutput } from '../integration-tests/test-helper.js';
describe('save_memory', () => {
evalTest('ALWAYS_PASSES', {
name: 'should be able to save to memory',
log: true,
params: {
settings: { tools: { core: ['save_memory'] } },
},
prompt: `remember that my favorite color is blue.
what is my favorite color? tell me that and surround it with $ symbol`,
assert: async (rig, result) => {
const foundToolCall = await rig.waitForToolCall('save_memory');
expect(
foundToolCall,
'Expected to find a save_memory tool call',
).toBeTruthy();
validateModelOutput(result, 'blue', 'Save memory test');
},
});
});

70
evals/test-helper.ts Normal file
View File

@@ -0,0 +1,70 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { it } from 'vitest';
import fs from 'node:fs';
import { TestRig } from '@google/gemini-cli-test-utils';
export * from '@google/gemini-cli-test-utils';
// Indicates the consistency expectation for this test.
// - ALWAYS_PASSES - Means that the test is expected to pass 100% of the time. These
// These tests are typically trivial and test basic functionality with unambiguous
// prompts. For example: "call save_memory to remember foo" should be fairly reliable.
// These are the first line of defense against regressions in key behaviors and run in
// every CI. You can run these locally with 'npm run test:always_passing_evals'.
//
// - USUALLY_PASSES - Means that the test is expected to pass most of the time but
// may have some flakiness as a result of relying on non-deterministic prompted
// behaviors and/or ambiguous prompts or complex tasks.
// For example: "Please do build changes until the very end" --> ambiguous whether
// the agent should add to memory without more explicit system prompt or user
// instructions. There are many more of these tests and they may pass less consistently.
// The pass/fail trendline of this set of tests can be used as a general measure
// of product quality. You can run these locally with 'npm run test:all_evals'.
// This may take a really long time and is not recommended.
export type EvalPolicy = 'ALWAYS_PASSES' | 'USUALLY_PASSES';
export function evalTest(policy: EvalPolicy, evalCase: EvalCase) {
const fn = async () => {
const rig = new TestRig();
try {
await rig.setup(evalCase.name, evalCase.params);
const result = await rig.run({ args: evalCase.prompt });
await evalCase.assert(rig, result);
} finally {
if (evalCase.log) {
await logToFile(
evalCase.name,
JSON.stringify(rig.readToolLogs(), null, 2),
);
}
await rig.cleanup();
}
};
if (policy === 'USUALLY_PASSES' && !process.env.RUN_EVALS) {
it.skip(evalCase.name, fn);
} else {
it(evalCase.name, fn);
}
}
export interface EvalCase {
name: string;
params?: Record<string, any>;
prompt: string;
assert: (rig: TestRig, result: string) => Promise<void>;
log?: boolean;
}
async function logToFile(name: string, content: string) {
const logDir = 'evals/logs';
await fs.promises.mkdir(logDir, { recursive: true });
const sanitizedName = name.replace(/[^a-z0-9]/gi, '_').toLowerCase();
const logFile = `${logDir}/${sanitizedName}.log`;
await fs.promises.writeFile(logFile, content);
}

15
evals/vitest.config.ts Normal file
View File

@@ -0,0 +1,15 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
testTimeout: 300000, // 5 minutes
reporters: ['default'],
include: ['**/*.eval.ts'],
},
});

View File

@@ -1,54 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
describe('save_memory', () => {
let rig: TestRig;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => await rig.cleanup());
it('should be able to save to memory', async () => {
await rig.setup('should be able to save to memory', {
settings: { tools: { core: ['save_memory'] } },
});
const prompt = `remember that my favorite color is blue.
what is my favorite color? tell me that and surround it with $ symbol`;
const result = await rig.run({ args: prompt });
const foundToolCall = await rig.waitForToolCall('save_memory');
// Add debugging information
if (!foundToolCall || !result.toLowerCase().includes('blue')) {
const allTools = printDebugInfo(rig, result, {
'Found tool call': foundToolCall,
'Contains blue': result.toLowerCase().includes('blue'),
});
console.error(
'Memory tool calls:',
allTools
.filter((t) => t.toolRequest.name === 'save_memory')
.map((t) => t.toolRequest.args),
);
}
expect(
foundToolCall,
'Expected to find a save_memory tool call',
).toBeTruthy();
// Validate model output - will throw if no output, warn if missing expected content
validateModelOutput(result, 'blue', 'Save memory test');
});
});

File diff suppressed because it is too large Load Diff

180
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -41,6 +41,8 @@
"test": "npm run test --workspaces --if-present",
"test:ci": "npm run test:ci --workspaces --if-present && npm run test:scripts",
"test:scripts": "vitest run --config ./scripts/tests/vitest.config.ts",
"test:always_passing_evals": "vitest run --config evals/vitest.config.ts",
"test:all_evals": "cross-env RUN_EVALS=1 vitest run --config evals/vitest.config.ts",
"test:e2e": "cross-env VERBOSE=true KEEP_OUTPUT=true npm run test:integration:sandbox:none",
"test:integration:all": "npm run test:integration:sandbox:none && npm run test:integration:sandbox:docker && npm run test:integration:sandbox:podman",
"test:integration:sandbox:none": "cross-env GEMINI_SANDBOX=false vitest run --root ./integration-tests",

View File

@@ -9,6 +9,12 @@
"build": "node ../../scripts/build_package.js",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"@google/gemini-cli-core": "file:../core",
"@lydell/node-pty": "1.1.0",
"strip-ansi": "^7.1.2",
"vitest": "^3.2.4"
},
"devDependencies": {
"typescript": "^5.3.3"
},

View File

@@ -5,3 +5,4 @@
*/
export * from './file-system-test-helpers.js';
export * from './test-rig.js';

File diff suppressed because it is too large Load Diff