mirror of
https://github.com/openai/codex.git
synced 2026-04-24 22:54:54 +00:00
This Pull Request addresses an issue where the output of commands executed in the raw-exec utility was being truncated due to restrictive limits on the number of lines and bytes collected. The truncation caused the message [Output truncated: too many lines or bytes] to appear when processing large outputs, which could hinder the functionality of the CLI. Changes Made Increased the maximum output limits in the [createTruncatingCollector](https://github.com/openai/codex/pull/575) utility: Bytes: Increased from 10 KB to 100 KB. Lines: Increased from 256 lines to 1024 lines. Installed the @types/node package to resolve missing type definitions for [NodeJS](https://github.com/openai/codex/pull/575) and [Buffer](https://github.com/openai/codex/pull/575). Verified and fixed any related errors in the [createTruncatingCollector](https://github.com/openai/codex/pull/575) implementation. Issue Solved: This PR ensures that larger outputs can be processed without truncation, improving the usability of the CLI for commands that generate extensive output. https://github.com/openai/codex/issues/509 --------- Co-authored-by: Michael Bolin <bolinfest@gmail.com>
78 lines
2.1 KiB
TypeScript
78 lines
2.1 KiB
TypeScript
// Maximum output cap: either MAX_OUTPUT_LINES lines or MAX_OUTPUT_BYTES bytes,
|
|
// whichever limit is reached first.
|
|
import { DEFAULT_SHELL_MAX_BYTES, DEFAULT_SHELL_MAX_LINES } from "../../config";
|
|
|
|
/**
|
|
* Creates a collector that accumulates data Buffers from a stream up to
|
|
* specified byte and line limits. After either limit is exceeded, further
|
|
* data is ignored.
|
|
*/
|
|
export function createTruncatingCollector(
|
|
stream: NodeJS.ReadableStream,
|
|
byteLimit: number = DEFAULT_SHELL_MAX_BYTES,
|
|
lineLimit: number = DEFAULT_SHELL_MAX_LINES,
|
|
): {
|
|
getString: () => string;
|
|
hit: boolean;
|
|
} {
|
|
const chunks: Array<Buffer> = [];
|
|
let totalBytes = 0;
|
|
let totalLines = 0;
|
|
let hitLimit = false;
|
|
|
|
stream?.on("data", (data: Buffer) => {
|
|
if (hitLimit) {
|
|
return;
|
|
}
|
|
const dataLength = data.length;
|
|
let newlineCount = 0;
|
|
for (let i = 0; i < dataLength; i++) {
|
|
if (data[i] === 0x0a) {
|
|
newlineCount++;
|
|
}
|
|
}
|
|
// If entire chunk fits within byte and line limits, take it whole
|
|
if (
|
|
totalBytes + dataLength <= byteLimit &&
|
|
totalLines + newlineCount <= lineLimit
|
|
) {
|
|
chunks.push(data);
|
|
totalBytes += dataLength;
|
|
totalLines += newlineCount;
|
|
} else {
|
|
// Otherwise, take a partial slice up to the first limit breach
|
|
const allowedBytes = byteLimit - totalBytes;
|
|
const allowedLines = lineLimit - totalLines;
|
|
let bytesTaken = 0;
|
|
let linesSeen = 0;
|
|
for (let i = 0; i < dataLength; i++) {
|
|
// Stop if byte or line limit is reached
|
|
if (bytesTaken === allowedBytes || linesSeen === allowedLines) {
|
|
break;
|
|
}
|
|
const byte = data[i];
|
|
if (byte === 0x0a) {
|
|
linesSeen++;
|
|
}
|
|
bytesTaken++;
|
|
}
|
|
if (bytesTaken > 0) {
|
|
chunks.push(data.slice(0, bytesTaken));
|
|
totalBytes += bytesTaken;
|
|
totalLines += linesSeen;
|
|
}
|
|
hitLimit = true;
|
|
}
|
|
});
|
|
|
|
return {
|
|
getString() {
|
|
return Buffer.concat(chunks).toString("utf8");
|
|
},
|
|
/** True if either byte or line limit was exceeded */
|
|
get hit(): boolean {
|
|
return hitLimit;
|
|
},
|
|
};
|
|
}
|