feat: added basic dev otel trace instrumentation (#11690)

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
Pavel Jbanov
2025-10-27 19:16:44 -04:00
committed by GitHub
parent 2dfb813c90
commit a9cb8f4958
7 changed files with 656 additions and 323 deletions

View File

@@ -306,6 +306,8 @@ gemini
corporate environment.
- [**Telemetry & Monitoring**](./docs/cli/telemetry.md) - Usage tracking.
- [**Tools API Development**](./docs/core/tools-api.md) - Create custom tools.
- [**Local development**](./docs/local-development.md) - Local development
tooling.
### Troubleshooting & Support

92
docs/local-development.md Normal file
View File

@@ -0,0 +1,92 @@
# Local Development Guide
This guide provides instructions for setting up and using local development
features, such as development tracing.
## Development Tracing
Development traces (dev traces) are OpenTelemetry (OTel) traces that help you
debug your code by instrumenting interesting events like model calls, tool
scheduler, tool calls, etc.
Dev traces are verbose and are specifically meant for understanding agent
behaviour and debugging issues. They are disabled by default.
To enable dev traces, set the `GEMINI_DEV_TRACING=true` environment variable
when running Gemini CLI.
### Viewing Dev Traces
You can view dev traces in the Jaeger UI. To get started, follow these steps:
1. **Start the telemetry collector:**
Run the following command in your terminal to download and start Jaeger and
an OTEL collector:
```bash
npm run telemetry -- --target=local
```
This command also configures your workspace for local telemetry and provides
a link to the Jaeger UI (usually `http://localhost:16686`).
2. **Run Gemini CLI with dev tracing:**
In a separate terminal, run your Gemini CLI command with the
`GEMINI_DEV_TRACING` environment variable:
```bash
GEMINI_DEV_TRACING=true gemini [your-command]
```
3. **View the traces:**
After running your command, open the Jaeger UI link in your browser to view
the traces.
For more detailed information on telemetry, see the
[telemetry documentation](./cli/telemetry.md).
### Instrumenting Code with Dev Traces
You can add dev traces to your own code for more detailed instrumentation. This
is useful for debugging and understanding the flow of execution.
Use the `runInDevTraceSpan` function to wrap any section of code in a trace
span.
Here is a basic example:
```typescript
import { runInDevTraceSpan } from '@google/gemini-cli-core';
await runInDevTraceSpan({ name: 'my-custom-span' }, async ({ metadata }) => {
// The `metadata` object allows you to record the input and output of the
// operation as well as other attributes.
metadata.input = { key: 'value' };
// Set custom attributes.
metadata.attributes['gen_ai.request.model'] = 'gemini-4.0-mega';
// Your code to be traced goes here
try {
const output = await somethingRisky();
metadata.output = output;
return output;
} catch (e) {
metadata.error = e;
throw e;
}
});
```
In this example:
- `name`: The name of the span, which will be displayed in the trace.
- `metadata.input`: (Optional) An object containing the input data for the
traced operation.
- `metadata.output`: (Optional) An object containing the output data from the
traced operation.
- `metadata.attributes`: (Optional) A record of custom attributes to add to the
span.
- `metadata.error`: (Optional) An error object to record if the operation fails.

View File

@@ -35,6 +35,7 @@ import {
promptIdContext,
WRITE_FILE_TOOL_NAME,
tokenLimit,
runInDevTraceSpan,
} from '@google/gemini-cli-core';
import { type Part, type PartListUnion, FinishReason } from '@google/genai';
import type {
@@ -805,152 +806,160 @@ export const useGeminiStream = (
query: PartListUnion,
options?: { isContinuation: boolean },
prompt_id?: string,
) => {
const queryId = `${Date.now()}-${Math.random()}`;
activeQueryIdRef.current = queryId;
if (
(streamingState === StreamingState.Responding ||
streamingState === StreamingState.WaitingForConfirmation) &&
!options?.isContinuation
)
return;
const userMessageTimestamp = Date.now();
// Reset quota error flag when starting a new query (not a continuation)
if (!options?.isContinuation) {
setModelSwitchedFromQuotaError(false);
config.setQuotaErrorOccurred(false);
}
abortControllerRef.current = new AbortController();
const abortSignal = abortControllerRef.current.signal;
turnCancelledRef.current = false;
if (!prompt_id) {
prompt_id = config.getSessionId() + '########' + getPromptCount();
}
return promptIdContext.run(prompt_id, async () => {
const { queryToSend, shouldProceed } = await prepareQueryForGemini(
query,
userMessageTimestamp,
abortSignal,
prompt_id,
);
if (!shouldProceed || queryToSend === null) {
return;
}
if (!options?.isContinuation) {
if (typeof queryToSend === 'string') {
// logging the text prompts only for now
const promptText = queryToSend;
logUserPrompt(
config,
new UserPromptEvent(
promptText.length,
prompt_id,
config.getContentGeneratorConfig()?.authType,
promptText,
),
);
}
startNewPrompt();
setThought(null); // Reset thought when starting a new prompt
}
setIsResponding(true);
setInitError(null);
// Store query and prompt_id for potential retry on loop detection
lastQueryRef.current = queryToSend;
lastPromptIdRef.current = prompt_id;
try {
const stream = geminiClient.sendMessageStream(
queryToSend,
abortSignal,
prompt_id,
);
const processingStatus = await processGeminiStreamEvents(
stream,
userMessageTimestamp,
abortSignal,
);
if (processingStatus === StreamProcessingStatus.UserCancelled) {
) =>
runInDevTraceSpan(
{ name: 'submitQuery' },
async ({ metadata: spanMetadata }) => {
spanMetadata.input = query;
const queryId = `${Date.now()}-${Math.random()}`;
activeQueryIdRef.current = queryId;
if (
(streamingState === StreamingState.Responding ||
streamingState === StreamingState.WaitingForConfirmation) &&
!options?.isContinuation
)
return;
const userMessageTimestamp = Date.now();
// Reset quota error flag when starting a new query (not a continuation)
if (!options?.isContinuation) {
setModelSwitchedFromQuotaError(false);
config.setQuotaErrorOccurred(false);
}
if (pendingHistoryItemRef.current) {
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
setPendingHistoryItem(null);
}
if (loopDetectedRef.current) {
loopDetectedRef.current = false;
// Show the confirmation dialog to choose whether to disable loop detection
setLoopDetectionConfirmationRequest({
onComplete: (result: { userSelection: 'disable' | 'keep' }) => {
setLoopDetectionConfirmationRequest(null);
abortControllerRef.current = new AbortController();
const abortSignal = abortControllerRef.current.signal;
turnCancelledRef.current = false;
if (result.userSelection === 'disable') {
config
.getGeminiClient()
.getLoopDetectionService()
.disableForSession();
addItem(
{
type: 'info',
text: `Loop detection has been disabled for this session. Retrying request...`,
},
Date.now(),
);
if (lastQueryRef.current && lastPromptIdRef.current) {
submitQuery(
lastQueryRef.current,
{ isContinuation: true },
lastPromptIdRef.current,
);
}
} else {
addItem(
{
type: 'info',
text: `A potential loop was detected. This can happen due to repetitive tool calls or other model behavior. The request has been halted.`,
},
Date.now(),
);
}
},
});
if (!prompt_id) {
prompt_id = config.getSessionId() + '########' + getPromptCount();
}
} catch (error: unknown) {
if (error instanceof UnauthorizedError) {
onAuthError('Session expired or is unauthorized.');
} else if (!isNodeError(error) || error.name !== 'AbortError') {
addItem(
{
type: MessageType.ERROR,
text: parseAndFormatApiError(
getErrorMessage(error) || 'Unknown error',
config.getContentGeneratorConfig()?.authType,
undefined,
config.getModel(),
DEFAULT_GEMINI_FLASH_MODEL,
),
},
return promptIdContext.run(prompt_id, async () => {
const { queryToSend, shouldProceed } = await prepareQueryForGemini(
query,
userMessageTimestamp,
abortSignal,
prompt_id!,
);
}
} finally {
if (activeQueryIdRef.current === queryId) {
setIsResponding(false);
}
}
});
},
if (!shouldProceed || queryToSend === null) {
return;
}
if (!options?.isContinuation) {
if (typeof queryToSend === 'string') {
// logging the text prompts only for now
const promptText = queryToSend;
logUserPrompt(
config,
new UserPromptEvent(
promptText.length,
prompt_id!,
config.getContentGeneratorConfig()?.authType,
promptText,
),
);
}
startNewPrompt();
setThought(null); // Reset thought when starting a new prompt
}
setIsResponding(true);
setInitError(null);
// Store query and prompt_id for potential retry on loop detection
lastQueryRef.current = queryToSend;
lastPromptIdRef.current = prompt_id!;
try {
const stream = geminiClient.sendMessageStream(
queryToSend,
abortSignal,
prompt_id!,
);
const processingStatus = await processGeminiStreamEvents(
stream,
userMessageTimestamp,
abortSignal,
);
if (processingStatus === StreamProcessingStatus.UserCancelled) {
return;
}
if (pendingHistoryItemRef.current) {
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
setPendingHistoryItem(null);
}
if (loopDetectedRef.current) {
loopDetectedRef.current = false;
// Show the confirmation dialog to choose whether to disable loop detection
setLoopDetectionConfirmationRequest({
onComplete: (result: {
userSelection: 'disable' | 'keep';
}) => {
setLoopDetectionConfirmationRequest(null);
if (result.userSelection === 'disable') {
config
.getGeminiClient()
.getLoopDetectionService()
.disableForSession();
addItem(
{
type: 'info',
text: `Loop detection has been disabled for this session. Retrying request...`,
},
Date.now(),
);
if (lastQueryRef.current && lastPromptIdRef.current) {
submitQuery(
lastQueryRef.current,
{ isContinuation: true },
lastPromptIdRef.current,
);
}
} else {
addItem(
{
type: 'info',
text: `A potential loop was detected. This can happen due to repetitive tool calls or other model behavior. The request has been halted.`,
},
Date.now(),
);
}
},
});
}
} catch (error: unknown) {
spanMetadata.error = error;
if (error instanceof UnauthorizedError) {
onAuthError('Session expired or is unauthorized.');
} else if (!isNodeError(error) || error.name !== 'AbortError') {
addItem(
{
type: MessageType.ERROR,
text: parseAndFormatApiError(
getErrorMessage(error) || 'Unknown error',
config.getContentGeneratorConfig()?.authType,
undefined,
config.getModel(),
DEFAULT_GEMINI_FLASH_MODEL,
),
},
userMessageTimestamp,
);
}
} finally {
if (activeQueryIdRef.current === queryId) {
setIsResponding(false);
}
}
});
},
),
[
streamingState,
setModelSwitchedFromQuotaError,

View File

@@ -25,6 +25,7 @@ import {
ToolCallEvent,
logToolOutputTruncated,
ToolOutputTruncatedEvent,
runInDevTraceSpan,
} from '../index.js';
import { READ_FILE_TOOL_NAME, SHELL_TOOL_NAME } from '../tools/tool-names.js';
import type { Part, PartListUnion } from '@google/genai';
@@ -668,36 +669,42 @@ export class CoreToolScheduler {
request: ToolCallRequestInfo | ToolCallRequestInfo[],
signal: AbortSignal,
): Promise<void> {
if (this.isRunning() || this.isScheduling) {
return new Promise((resolve, reject) => {
const abortHandler = () => {
// Find and remove the request from the queue
const index = this.requestQueue.findIndex(
(item) => item.request === request,
);
if (index > -1) {
this.requestQueue.splice(index, 1);
reject(new Error('Tool call cancelled while in queue.'));
}
};
return runInDevTraceSpan(
{ name: 'schedule' },
async ({ metadata: spanMetadata }) => {
spanMetadata.input = request;
if (this.isRunning() || this.isScheduling) {
return new Promise((resolve, reject) => {
const abortHandler = () => {
// Find and remove the request from the queue
const index = this.requestQueue.findIndex(
(item) => item.request === request,
);
if (index > -1) {
this.requestQueue.splice(index, 1);
reject(new Error('Tool call cancelled while in queue.'));
}
};
signal.addEventListener('abort', abortHandler, { once: true });
signal.addEventListener('abort', abortHandler, { once: true });
this.requestQueue.push({
request,
signal,
resolve: () => {
signal.removeEventListener('abort', abortHandler);
resolve();
},
reject: (reason?: Error) => {
signal.removeEventListener('abort', abortHandler);
reject(reason);
},
});
});
}
return this._schedule(request, signal);
this.requestQueue.push({
request,
signal,
resolve: () => {
signal.removeEventListener('abort', abortHandler);
resolve();
},
reject: (reason?: Error) => {
signal.removeEventListener('abort', abortHandler);
reject(reason);
},
});
});
}
return this._schedule(request, signal);
},
);
}
cancelAll(signal: AbortSignal): void {
@@ -1091,134 +1098,153 @@ export class CoreToolScheduler {
const shellExecutionConfig = this.config.getShellExecutionConfig();
// TODO: Refactor to remove special casing for ShellToolInvocation.
// Introduce a generic callbacks object for the execute method to handle
// things like `onPid` and `onLiveOutput`. This will make the scheduler
// agnostic to the invocation type.
let promise: Promise<ToolResult>;
if (invocation instanceof ShellToolInvocation) {
const setPidCallback = (pid: number) => {
this.toolCalls = this.toolCalls.map((tc) =>
tc.request.callId === callId && tc.status === 'executing'
? { ...tc, pid }
: tc,
);
this.notifyToolCallsUpdate();
};
promise = invocation.execute(
signal,
liveOutputCallback,
shellExecutionConfig,
setPidCallback,
);
} else {
promise = invocation.execute(
signal,
liveOutputCallback,
shellExecutionConfig,
);
}
try {
const toolResult: ToolResult = await promise;
if (signal.aborted) {
this.setStatusInternal(
callId,
'cancelled',
signal,
'User cancelled tool execution.',
);
} else if (toolResult.error === undefined) {
let content = toolResult.llmContent;
let outputFile: string | undefined = undefined;
const contentLength =
typeof content === 'string' ? content.length : undefined;
if (
typeof content === 'string' &&
toolName === SHELL_TOOL_NAME &&
this.config.getEnableToolOutputTruncation() &&
this.config.getTruncateToolOutputThreshold() > 0 &&
this.config.getTruncateToolOutputLines() > 0
) {
const originalContentLength = content.length;
const threshold = this.config.getTruncateToolOutputThreshold();
const lines = this.config.getTruncateToolOutputLines();
const truncatedResult = await truncateAndSaveToFile(
content,
callId,
this.config.storage.getProjectTempDir(),
threshold,
lines,
await runInDevTraceSpan(
{
name: toolCall.tool.name,
attributes: { type: 'tool-call' },
},
async ({ metadata: spanMetadata }) => {
spanMetadata.input = {
request: toolCall.request,
};
// TODO: Refactor to remove special casing for ShellToolInvocation.
// Introduce a generic callbacks object for the execute method to handle
// things like `onPid` and `onLiveOutput`. This will make the scheduler
// agnostic to the invocation type.
let promise: Promise<ToolResult>;
if (invocation instanceof ShellToolInvocation) {
const setPidCallback = (pid: number) => {
this.toolCalls = this.toolCalls.map((tc) =>
tc.request.callId === callId && tc.status === 'executing'
? { ...tc, pid }
: tc,
);
this.notifyToolCallsUpdate();
};
promise = invocation.execute(
signal,
liveOutputCallback,
shellExecutionConfig,
setPidCallback,
);
content = truncatedResult.content;
outputFile = truncatedResult.outputFile;
} else {
promise = invocation.execute(
signal,
liveOutputCallback,
shellExecutionConfig,
);
}
if (outputFile) {
logToolOutputTruncated(
this.config,
new ToolOutputTruncatedEvent(
scheduledCall.request.prompt_id,
{
toolName,
originalContentLength,
truncatedContentLength: content.length,
threshold,
lines,
},
try {
const toolResult: ToolResult = await promise;
spanMetadata.output = toolResult;
if (signal.aborted) {
this.setStatusInternal(
callId,
'cancelled',
signal,
'User cancelled tool execution.',
);
} else if (toolResult.error === undefined) {
let content = toolResult.llmContent;
let outputFile: string | undefined = undefined;
const contentLength =
typeof content === 'string' ? content.length : undefined;
if (
typeof content === 'string' &&
toolName === SHELL_TOOL_NAME &&
this.config.getEnableToolOutputTruncation() &&
this.config.getTruncateToolOutputThreshold() > 0 &&
this.config.getTruncateToolOutputLines() > 0
) {
const originalContentLength = content.length;
const threshold =
this.config.getTruncateToolOutputThreshold();
const lines = this.config.getTruncateToolOutputLines();
const truncatedResult = await truncateAndSaveToFile(
content,
callId,
this.config.storage.getProjectTempDir(),
threshold,
lines,
);
content = truncatedResult.content;
outputFile = truncatedResult.outputFile;
if (outputFile) {
logToolOutputTruncated(
this.config,
new ToolOutputTruncatedEvent(
scheduledCall.request.prompt_id,
{
toolName,
originalContentLength,
truncatedContentLength: content.length,
threshold,
lines,
},
),
);
}
}
const response = convertToFunctionResponse(
toolName,
callId,
content,
);
const successResponse: ToolCallResponseInfo = {
callId,
responseParts: response,
resultDisplay: toolResult.returnDisplay,
error: undefined,
errorType: undefined,
outputFile,
contentLength,
};
this.setStatusInternal(
callId,
'success',
signal,
successResponse,
);
} else {
// It is a failure
const error = new Error(toolResult.error.message);
const errorResponse = createErrorResponse(
scheduledCall.request,
error,
toolResult.error.type,
);
this.setStatusInternal(callId, 'error', signal, errorResponse);
}
} catch (executionError: unknown) {
spanMetadata.error = executionError;
if (signal.aborted) {
this.setStatusInternal(
callId,
'cancelled',
signal,
'User cancelled tool execution.',
);
} else {
this.setStatusInternal(
callId,
'error',
signal,
createErrorResponse(
scheduledCall.request,
executionError instanceof Error
? executionError
: new Error(String(executionError)),
ToolErrorType.UNHANDLED_EXCEPTION,
),
);
}
}
const response = convertToFunctionResponse(
toolName,
callId,
content,
);
const successResponse: ToolCallResponseInfo = {
callId,
responseParts: response,
resultDisplay: toolResult.returnDisplay,
error: undefined,
errorType: undefined,
outputFile,
contentLength,
};
this.setStatusInternal(callId, 'success', signal, successResponse);
} else {
// It is a failure
const error = new Error(toolResult.error.message);
const errorResponse = createErrorResponse(
scheduledCall.request,
error,
toolResult.error.type,
);
this.setStatusInternal(callId, 'error', signal, errorResponse);
}
} catch (executionError: unknown) {
if (signal.aborted) {
this.setStatusInternal(
callId,
'cancelled',
signal,
'User cancelled tool execution.',
);
} else {
this.setStatusInternal(
callId,
'error',
signal,
createErrorResponse(
scheduledCall.request,
executionError instanceof Error
? executionError
: new Error(String(executionError)),
ToolErrorType.UNHANDLED_EXCEPTION,
),
);
}
}
await this.checkAndNotifyCompletion(signal);
await this.checkAndNotifyCompletion(signal);
},
);
}
}
}

View File

@@ -28,6 +28,7 @@ import {
import type { ContentGenerator } from './contentGenerator.js';
import { toContents } from '../code_assist/converter.js';
import { isStructuredError } from '../utils/quotaErrorDetection.js';
import { runInDevTraceSpan, type SpanMetadata } from '../telemetry/trace.js';
interface StructuredError {
status: number;
@@ -107,47 +108,74 @@ export class LoggingContentGenerator implements ContentGenerator {
req: GenerateContentParameters,
userPromptId: string,
): Promise<GenerateContentResponse> {
const startTime = Date.now();
this.logApiRequest(toContents(req.contents), req.model, userPromptId);
try {
const response = await this.wrapped.generateContent(req, userPromptId);
const durationMs = Date.now() - startTime;
this._logApiResponse(
durationMs,
response.modelVersion || req.model,
userPromptId,
response.usageMetadata,
JSON.stringify(response),
);
return response;
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError(durationMs, error, req.model, userPromptId);
throw error;
}
return runInDevTraceSpan(
{
name: 'generateContent',
},
async ({ metadata: spanMetadata }) => {
spanMetadata.input = { request: req, userPromptId, model: req.model };
const startTime = Date.now();
this.logApiRequest(toContents(req.contents), req.model, userPromptId);
try {
const response = await this.wrapped.generateContent(
req,
userPromptId,
);
spanMetadata.output = {
response,
usageMetadata: response.usageMetadata,
};
const durationMs = Date.now() - startTime;
this._logApiResponse(
durationMs,
response.modelVersion || req.model,
userPromptId,
response.usageMetadata,
JSON.stringify(response),
);
return response;
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError(durationMs, error, req.model, userPromptId);
throw error;
}
},
);
}
async generateContentStream(
req: GenerateContentParameters,
userPromptId: string,
): Promise<AsyncGenerator<GenerateContentResponse>> {
const startTime = Date.now();
this.logApiRequest(toContents(req.contents), req.model, userPromptId);
return runInDevTraceSpan(
{
name: 'generateContentStream',
noAutoEnd: true,
},
async ({ metadata: spanMetadata, endSpan }) => {
spanMetadata.input = { request: req, userPromptId, model: req.model };
const startTime = Date.now();
this.logApiRequest(toContents(req.contents), req.model, userPromptId);
let stream: AsyncGenerator<GenerateContentResponse>;
try {
stream = await this.wrapped.generateContentStream(req, userPromptId);
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError(durationMs, error, req.model, userPromptId);
throw error;
}
let stream: AsyncGenerator<GenerateContentResponse>;
try {
stream = await this.wrapped.generateContentStream(req, userPromptId);
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError(durationMs, error, req.model, userPromptId);
throw error;
}
return this.loggingStreamWrapper(
stream,
startTime,
userPromptId,
req.model,
return this.loggingStreamWrapper(
stream,
startTime,
userPromptId,
req.model,
spanMetadata,
endSpan,
);
},
);
}
@@ -156,6 +184,8 @@ export class LoggingContentGenerator implements ContentGenerator {
startTime: number,
userPromptId: string,
model: string,
spanMetadata: SpanMetadata,
endSpan: () => void,
): AsyncGenerator<GenerateContentResponse> {
const responses: GenerateContentResponse[] = [];
@@ -177,7 +207,15 @@ export class LoggingContentGenerator implements ContentGenerator {
lastUsageMetadata,
JSON.stringify(responses),
);
spanMetadata.output = {
streamChunks: responses.map((r) => ({
content: r.candidates?.[0]?.content ?? null,
})),
usageMetadata: lastUsageMetadata,
durationMs,
};
} catch (error) {
spanMetadata.error = error;
const durationMs = Date.now() - startTime;
this._logApiError(
durationMs,
@@ -186,6 +224,8 @@ export class LoggingContentGenerator implements ContentGenerator {
userPromptId,
);
throw error;
} finally {
endSpan();
}
}
@@ -196,6 +236,16 @@ export class LoggingContentGenerator implements ContentGenerator {
async embedContent(
req: EmbedContentParameters,
): Promise<EmbedContentResponse> {
return this.wrapped.embedContent(req);
return runInDevTraceSpan(
{
name: 'embedContent',
},
async ({ metadata: spanMetadata }) => {
spanMetadata.input = { request: req };
const output = await this.wrapped.embedContent(req);
spanMetadata.output = output;
return output;
},
);
}
}

View File

@@ -137,3 +137,4 @@ export {
GenAiProviderName,
GenAiTokenType,
} from './metrics.js';
export { runInDevTraceSpan, type SpanMetadata } from './trace.js';

View File

@@ -0,0 +1,153 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
diag,
SpanStatusCode,
trace,
type AttributeValue,
type SpanOptions,
} from '@opentelemetry/api';
import { safeJsonStringify } from '../utils/safeJsonStringify.js';
const TRACER_NAME = 'gemini-cli';
const TRACER_VERSION = 'v1';
/**
* Metadata for a span.
*/
export interface SpanMetadata {
/** The name of the span. */
name: string;
/** The input to the span. */
input?: unknown;
/** The output of the span. */
output?: unknown;
error?: unknown;
/** Additional attributes for the span. */
attributes: Record<string, AttributeValue>;
}
/**
* Runs a function in a new OpenTelemetry span.
*
* The `meta` object will be automatically used to set the span's status and attributes upon completion.
*
* @example
* ```typescript
* runInDevTraceSpan({ name: 'my-operation' }, ({ metadata }) => {
* metadata.input = { foo: 'bar' };
* // ... do work ...
* metadata.output = { result: 'baz' };
* metadata.attributes['my.custom.attribute'] = 'some-value';
* });
* ```
*
* @param opts The options for the span.
* @param fn The function to run in the span.
* @returns The result of the function.
*/
export async function runInDevTraceSpan<R>(
opts: SpanOptions & { name: string; noAutoEnd?: boolean },
fn: ({
metadata,
}: {
metadata: SpanMetadata;
endSpan: () => void;
}) => Promise<R>,
): Promise<R> {
const { name: spanName, noAutoEnd, ...restOfSpanOpts } = opts;
if (process.env['GEMINI_DEV_TRACING'] !== 'true') {
// If GEMINI_DEV_TRACING env var not set, we do not trace.
return await fn({
metadata: {
name: spanName,
attributes: {},
},
endSpan: () => {
// noop
},
});
}
const tracer = trace.getTracer(TRACER_NAME, TRACER_VERSION);
return await tracer.startActiveSpan(
opts.name,
restOfSpanOpts,
async (span) => {
const meta: SpanMetadata = {
name: spanName,
attributes: {},
};
const endSpan = () => {
try {
if (meta.input !== undefined) {
span.setAttribute('input-json', safeJsonStringify(meta.input));
}
if (meta.output !== undefined) {
span.setAttribute('output-json', safeJsonStringify(meta.output));
}
for (const [key, value] of Object.entries(meta.attributes)) {
span.setAttribute(key, value as AttributeValue);
}
if (meta.error) {
span.setStatus({
code: SpanStatusCode.ERROR,
message: getErrorMessage(meta.error),
});
if (meta.error instanceof Error) {
span.recordException(meta.error);
}
} else {
span.setStatus({ code: SpanStatusCode.OK });
}
} catch (e) {
// Log the error but don't rethrow, to ensure span.end() is called.
diag.error('Error setting span attributes in endSpan', e);
span.setStatus({
code: SpanStatusCode.ERROR,
message: `Error in endSpan: ${getErrorMessage(e)}`,
});
} finally {
span.end();
}
};
try {
return await fn({ metadata: meta, endSpan });
} catch (e) {
meta.error = e;
if (noAutoEnd) {
// For streaming operations, the delegated endSpan call will not be reached
// on an exception, so we must end the span here to prevent a leak.
endSpan();
}
throw e;
} finally {
if (!noAutoEnd) {
// For non-streaming operations, this ensures the span is always closed,
// and if an error occurred, it will be recorded correctly by endSpan.
endSpan();
}
}
},
);
}
/**
* Gets the error message from an error object.
*
* @param e The error object.
* @returns The error message.
*/
function getErrorMessage(e: unknown): string {
if (e instanceof Error) {
return e.message;
}
if (typeof e === 'string') {
return e;
}
return safeJsonStringify(e);
}