mirror of
https://github.com/openai/codex.git
synced 2026-02-06 17:03:42 +00:00
Compare commits
1 Commits
codex/impl
...
sec
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a34f6c9f08 |
18
.vscode/launch.json
vendored
18
.vscode/launch.json
vendored
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Cargo launch",
|
||||
"cargo": {
|
||||
"cwd": "${workspaceFolder}/codex-rs",
|
||||
"args": [
|
||||
"build",
|
||||
"--bin=codex-tui"
|
||||
]
|
||||
},
|
||||
"args": []
|
||||
}
|
||||
]
|
||||
}
|
||||
10
.vscode/settings.json
vendored
10
.vscode/settings.json
vendored
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"rust-analyzer.checkOnSave": true,
|
||||
"rust-analyzer.check.command": "clippy",
|
||||
"rust-analyzer.check.extraArgs": ["--all-features", "--tests"],
|
||||
"rust-analyzer.rustfmt.extraArgs": ["--config", "imports_granularity=Item"],
|
||||
"[rust]": {
|
||||
"editor.defaultFormatter": "rust-lang.rust-analyzer",
|
||||
"editor.formatOnSave": true,
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,3 @@
|
||||
In the codex-rs folder where the rust code lives:
|
||||
|
||||
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR`. You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
|
||||
|
||||
Before creating a pull request with changes to `codex-rs`, run `just fmt` (in `codex-rs` directory) to format the code and `just fix` (in `codex-rs` directory) to fix any linter issues in the code, ensure the test suite passes by running `cargo test --all-features` in the `codex-rs` directory.
|
||||
|
||||
When making individual changes prefer running tests on individual files or projects first.
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
* current platform / architecture, an error is thrown.
|
||||
*/
|
||||
|
||||
import { spawnSync } from "child_process";
|
||||
import fs from "fs";
|
||||
import path from "path";
|
||||
import { fileURLToPath, pathToFileURL } from "url";
|
||||
@@ -34,7 +35,7 @@ const wantsNative = fs.existsSync(path.join(__dirname, "use-native")) ||
|
||||
: false);
|
||||
|
||||
// Try native binary if requested.
|
||||
if (wantsNative && process.platform !== 'win32') {
|
||||
if (wantsNative) {
|
||||
const { platform, arch } = process;
|
||||
|
||||
let targetTriple = null;
|
||||
@@ -73,76 +74,22 @@ if (wantsNative && process.platform !== 'win32') {
|
||||
}
|
||||
|
||||
const binaryPath = path.join(__dirname, "..", "bin", `codex-${targetTriple}`);
|
||||
|
||||
// Use an asynchronous spawn instead of spawnSync so that Node is able to
|
||||
// respond to signals (e.g. Ctrl-C / SIGINT) while the native binary is
|
||||
// executing. This allows us to forward those signals to the child process
|
||||
// and guarantees that when either the child terminates or the parent
|
||||
// receives a fatal signal, both processes exit in a predictable manner.
|
||||
const { spawn } = await import("child_process");
|
||||
|
||||
const child = spawn(binaryPath, process.argv.slice(2), {
|
||||
const result = spawnSync(binaryPath, process.argv.slice(2), {
|
||||
stdio: "inherit",
|
||||
});
|
||||
|
||||
child.on("error", (err) => {
|
||||
// Typically triggered when the binary is missing or not executable.
|
||||
// Re-throwing here will terminate the parent with a non-zero exit code
|
||||
// while still printing a helpful stack trace.
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
const exitCode = typeof result.status === "number" ? result.status : 1;
|
||||
process.exit(exitCode);
|
||||
}
|
||||
|
||||
// Forward common termination signals to the child so that it shuts down
|
||||
// gracefully. In the handler we temporarily disable the default behavior of
|
||||
// exiting immediately; once the child has been signaled we simply wait for
|
||||
// its exit event which will in turn terminate the parent (see below).
|
||||
const forwardSignal = (signal) => {
|
||||
if (child.killed) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
child.kill(signal);
|
||||
} catch {
|
||||
/* ignore */
|
||||
}
|
||||
};
|
||||
// Fallback: execute the original JavaScript CLI.
|
||||
|
||||
["SIGINT", "SIGTERM", "SIGHUP"].forEach((sig) => {
|
||||
process.on(sig, () => forwardSignal(sig));
|
||||
});
|
||||
// Resolve the path to the compiled CLI bundle
|
||||
const cliPath = path.resolve(__dirname, "../dist/cli.js");
|
||||
const cliUrl = pathToFileURL(cliPath).href;
|
||||
|
||||
// When the child exits, mirror its termination reason in the parent so that
|
||||
// shell scripts and other tooling observe the correct exit status.
|
||||
// Wrap the lifetime of the child process in a Promise so that we can await
|
||||
// its termination in a structured way. The Promise resolves with an object
|
||||
// describing how the child exited: either via exit code or due to a signal.
|
||||
const childResult = await new Promise((resolve) => {
|
||||
child.on("exit", (code, signal) => {
|
||||
if (signal) {
|
||||
resolve({ type: "signal", signal });
|
||||
} else {
|
||||
resolve({ type: "code", exitCode: code ?? 1 });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
if (childResult.type === "signal") {
|
||||
// Re-emit the same signal so that the parent terminates with the expected
|
||||
// semantics (this also sets the correct exit code of 128 + n).
|
||||
process.kill(process.pid, childResult.signal);
|
||||
} else {
|
||||
process.exit(childResult.exitCode);
|
||||
}
|
||||
} else {
|
||||
// Fallback: execute the original JavaScript CLI.
|
||||
|
||||
// Resolve the path to the compiled CLI bundle
|
||||
const cliPath = path.resolve(__dirname, "../dist/cli.js");
|
||||
const cliUrl = pathToFileURL(cliPath).href;
|
||||
|
||||
// Load and execute the CLI
|
||||
// Load and execute the CLI
|
||||
(async () => {
|
||||
try {
|
||||
await import(cliUrl);
|
||||
} catch (err) {
|
||||
@@ -150,4 +97,4 @@ if (wantsNative && process.platform !== 'win32') {
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
})();
|
||||
|
||||
@@ -10,19 +10,25 @@ import type {
|
||||
import MultilineTextEditor from "./multiline-editor";
|
||||
import { TerminalChatCommandReview } from "./terminal-chat-command-review.js";
|
||||
import TextCompletions from "./terminal-chat-completions.js";
|
||||
import { loadConfig } from "../../utils/config.js";
|
||||
import { loadConfig, type AppConfig } from "../../utils/config.js";
|
||||
import { getFileSystemSuggestions } from "../../utils/file-system-suggestions.js";
|
||||
import { expandFileTags } from "../../utils/file-tag-utils";
|
||||
import { createInputItem } from "../../utils/input-utils.js";
|
||||
import { log } from "../../utils/logger/log.js";
|
||||
import { setSessionId } from "../../utils/session.js";
|
||||
import { SLASH_COMMANDS, type SlashCommand } from "../../utils/slash-commands";
|
||||
import {
|
||||
runSecurityReview,
|
||||
SecurityReviewError,
|
||||
} from "../../utils/security-review.js";
|
||||
import type { SecurityReviewMode } from "../../utils/security-review.js";
|
||||
import {
|
||||
loadCommandHistory,
|
||||
addToHistory,
|
||||
} from "../../utils/storage/command-history.js";
|
||||
import { clearTerminal, onExit } from "../../utils/terminal.js";
|
||||
import { Box, Text, useApp, useInput, useStdin } from "ink";
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import React, {
|
||||
useCallback,
|
||||
@@ -39,6 +45,130 @@ const suggestions = [
|
||||
"are there any bugs in my code?",
|
||||
];
|
||||
|
||||
const SEC_REVIEW_COMMAND = "/secreview";
|
||||
|
||||
type SecReviewCommandOptions = {
|
||||
mode: SecurityReviewMode;
|
||||
includePaths: Array<string>;
|
||||
outputPath?: string;
|
||||
repoPath?: string;
|
||||
modelName?: string;
|
||||
};
|
||||
|
||||
function tokenizeCommand(input: string): Array<string> {
|
||||
const tokens: Array<string> = [];
|
||||
const regex = /"([^"]*)"|'([^']*)'|(\S+)/g;
|
||||
let match: RegExpExecArray | null;
|
||||
while ((match = regex.exec(input)) !== null) {
|
||||
if (match[1] != null) {
|
||||
tokens.push(match[1]);
|
||||
} else if (match[2] != null) {
|
||||
tokens.push(match[2]);
|
||||
} else if (match[3] != null) {
|
||||
tokens.push(match[3]);
|
||||
}
|
||||
}
|
||||
return tokens;
|
||||
}
|
||||
|
||||
function parseSecReviewCommand(input: string): SecReviewCommandOptions {
|
||||
const tokens = tokenizeCommand(input).slice(1); // drop the command itself
|
||||
let mode: SecurityReviewMode = "full";
|
||||
const includePaths: Array<string> = [];
|
||||
let outputPath: string | undefined;
|
||||
let repoPath: string | undefined;
|
||||
let modelName: string | undefined;
|
||||
|
||||
const parseMode = (value: string, option: string) => {
|
||||
if (value === "bugs") {
|
||||
mode = "bugs";
|
||||
} else if (value === "full") {
|
||||
mode = "full";
|
||||
} else {
|
||||
throw new Error(`Unknown ${option} value "${value}". Use "full" or "bugs".`);
|
||||
}
|
||||
};
|
||||
|
||||
for (let i = 0; i < tokens.length; i += 1) {
|
||||
const token = tokens[i];
|
||||
|
||||
const expectValue = (label: string): string => {
|
||||
if (i + 1 >= tokens.length) {
|
||||
throw new Error(`Expected value after ${label}`);
|
||||
}
|
||||
i += 1;
|
||||
return tokens[i];
|
||||
};
|
||||
|
||||
if (token === "--") {
|
||||
break;
|
||||
} else if (token === "bugs" || token === "--bugs" || token === "--mode=bugs") {
|
||||
mode = "bugs";
|
||||
} else if (token === "full" || token === "--full" || token === "--mode=full") {
|
||||
mode = "full";
|
||||
} else if (token === "--mode") {
|
||||
parseMode(expectValue("--mode"), "--mode");
|
||||
} else if (token.startsWith("--mode=")) {
|
||||
parseMode(token.slice("--mode=".length), "--mode");
|
||||
} else if (token === "--path" || token === "-p") {
|
||||
includePaths.push(expectValue(token));
|
||||
} else if (token.startsWith("--path=")) {
|
||||
includePaths.push(token.slice("--path=".length));
|
||||
} else if (token.startsWith("-p=")) {
|
||||
includePaths.push(token.slice("-p=".length));
|
||||
} else if (
|
||||
token === "--output" ||
|
||||
token === "-o" ||
|
||||
token === "--output-location"
|
||||
) {
|
||||
outputPath = expectValue(token);
|
||||
} else if (token.startsWith("--output=")) {
|
||||
outputPath = token.slice("--output=".length);
|
||||
} else if (token.startsWith("-o=")) {
|
||||
outputPath = token.slice("-o=".length);
|
||||
} else if (
|
||||
token === "--repo" ||
|
||||
token === "--repo-location" ||
|
||||
token === "--repository"
|
||||
) {
|
||||
repoPath = expectValue(token);
|
||||
} else if (token.startsWith("--repo=")) {
|
||||
repoPath = token.slice("--repo=".length);
|
||||
} else if (token.startsWith("--repo-location=")) {
|
||||
repoPath = token.slice("--repo-location=".length);
|
||||
} else if (token === "--model" || token === "--model-name") {
|
||||
modelName = expectValue(token);
|
||||
} else if (token.startsWith("--model=")) {
|
||||
modelName = token.slice("--model=".length);
|
||||
} else if (token.startsWith("--model-name=")) {
|
||||
modelName = token.slice("--model-name=".length);
|
||||
} else if (token.length > 0) {
|
||||
includePaths.push(token);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
mode,
|
||||
includePaths: includePaths.filter((p) => p.length > 0),
|
||||
outputPath,
|
||||
repoPath,
|
||||
modelName,
|
||||
};
|
||||
}
|
||||
|
||||
function trimLogOutput(logText: string, maxLines: number = 40): string {
|
||||
const normalised = logText.replace(/\r\n/g, "\n").trimEnd();
|
||||
if (normalised === "") {
|
||||
return "(empty)";
|
||||
}
|
||||
const lines = normalised.split("\n");
|
||||
if (lines.length <= maxLines) {
|
||||
return normalised;
|
||||
}
|
||||
const tail = lines.slice(-maxLines);
|
||||
return ["… (showing last " + maxLines + " lines)", ...tail].join("\n");
|
||||
}
|
||||
|
||||
export default function TerminalChatInput({
|
||||
isNew,
|
||||
loading,
|
||||
@@ -60,6 +190,7 @@ export default function TerminalChatInput({
|
||||
active,
|
||||
thinkingSeconds,
|
||||
items = [],
|
||||
config,
|
||||
}: {
|
||||
isNew: boolean;
|
||||
loading: boolean;
|
||||
@@ -85,6 +216,7 @@ export default function TerminalChatInput({
|
||||
thinkingSeconds: number;
|
||||
// New: current conversation items so we can include them in bug reports
|
||||
items?: Array<ResponseItem>;
|
||||
config: AppConfig;
|
||||
}): React.ReactElement {
|
||||
// Slash command suggestion index
|
||||
const [selectedSlashSuggestion, setSelectedSlashSuggestion] =
|
||||
@@ -512,6 +644,230 @@ export default function TerminalChatInput({
|
||||
} else if (inputValue.startsWith("/approval")) {
|
||||
setInput("");
|
||||
openApprovalOverlay();
|
||||
return;
|
||||
} else if (inputValue.startsWith(SEC_REVIEW_COMMAND)) {
|
||||
setInput("");
|
||||
const commandId = `secreview-${Date.now()}`;
|
||||
|
||||
let parsed: SecReviewCommandOptions;
|
||||
try {
|
||||
parsed = parseSecReviewCommand(inputValue);
|
||||
} catch (error) {
|
||||
const message =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `${commandId}-parse-error`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [
|
||||
{
|
||||
type: "input_text",
|
||||
text: `⚠️ Unable to parse ${SEC_REVIEW_COMMAND} arguments: ${message}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
return;
|
||||
}
|
||||
|
||||
const repoPath = parsed.repoPath
|
||||
? path.isAbsolute(parsed.repoPath)
|
||||
? parsed.repoPath
|
||||
: path.resolve(process.cwd(), parsed.repoPath)
|
||||
: process.cwd();
|
||||
|
||||
const resolvedOutputPath =
|
||||
parsed.outputPath != null
|
||||
? path.isAbsolute(parsed.outputPath)
|
||||
? parsed.outputPath
|
||||
: path.resolve(repoPath, parsed.outputPath)
|
||||
: undefined;
|
||||
|
||||
const scopeDescription =
|
||||
parsed.includePaths.length > 0
|
||||
? parsed.includePaths.join(", ")
|
||||
: "entire repository";
|
||||
|
||||
const introLines = [
|
||||
`🔐 Running AppSec security review (mode: ${parsed.mode}).`,
|
||||
`Repository: ${repoPath}`,
|
||||
`Scope: ${scopeDescription}`,
|
||||
];
|
||||
|
||||
if (resolvedOutputPath) {
|
||||
introLines.push(`Output: ${resolvedOutputPath}`);
|
||||
}
|
||||
|
||||
if (parsed.modelName) {
|
||||
introLines.push(`Model override: ${parsed.modelName}`);
|
||||
}
|
||||
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `${commandId}-start`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [
|
||||
{
|
||||
type: "input_text",
|
||||
text: introLines.join("\n"),
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
|
||||
try {
|
||||
const result = await runSecurityReview({
|
||||
repoPath,
|
||||
includePaths: parsed.includePaths,
|
||||
outputPath: resolvedOutputPath,
|
||||
modelName: parsed.modelName,
|
||||
mode: parsed.mode,
|
||||
config,
|
||||
});
|
||||
|
||||
const summaryLines = [
|
||||
"✅ AppSec review complete.",
|
||||
`Artifacts: ${result.outputRoot}`,
|
||||
];
|
||||
if (!result.reportContent) {
|
||||
summaryLines.push("ℹ️ report.md not found in output.");
|
||||
}
|
||||
if (!result.bugsContent) {
|
||||
summaryLines.push("ℹ️ context/bugs.md not found in output.");
|
||||
}
|
||||
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `${commandId}-complete`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [
|
||||
{
|
||||
type: "input_text",
|
||||
text: summaryLines.join("\n"),
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
|
||||
if (parsed.mode === "full" && result.reportContent) {
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `${commandId}-report`,
|
||||
type: "message",
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "output_text",
|
||||
text: `# AppSec Security Review Report\n\n${result.reportContent}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
}
|
||||
|
||||
if (result.bugsContent) {
|
||||
const heading =
|
||||
parsed.mode === "full"
|
||||
? "## Bugs Summary"
|
||||
: "# AppSec Bugs Summary";
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `${commandId}-bugs`,
|
||||
type: "message",
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "output_text",
|
||||
text: `${heading}\n\n${result.bugsContent}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
} else {
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `${commandId}-no-bugs`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [
|
||||
{
|
||||
type: "input_text",
|
||||
text:
|
||||
"No bug summary produced. Check the output directory for details.",
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
}
|
||||
|
||||
if (parsed.mode === "bugs" && result.reportContent) {
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `${commandId}-report-location`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [
|
||||
{
|
||||
type: "input_text",
|
||||
text: `Full report available at ${result.reportPath}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
}
|
||||
if (result.stdout.trim()) {
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `${commandId}-logs`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [
|
||||
{
|
||||
type: "input_text",
|
||||
text: `Logs:\n${trimLogOutput(result.stdout)}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
}
|
||||
} catch (error) {
|
||||
const message =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
const stderr =
|
||||
error instanceof SecurityReviewError && error.stderr
|
||||
? `\n\nstderr last lines:\n${trimLogOutput(error.stderr)}`
|
||||
: "";
|
||||
const stdout =
|
||||
error instanceof SecurityReviewError && error.stdout
|
||||
? `\n\nstdout last lines:\n${trimLogOutput(error.stdout)}`
|
||||
: "";
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: `${commandId}-error`,
|
||||
type: "message",
|
||||
role: "system",
|
||||
content: [
|
||||
{
|
||||
type: "input_text",
|
||||
text: `❌ AppSec review failed: ${message}${stderr}${stdout}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
}
|
||||
|
||||
return;
|
||||
} else if (["exit", "q", ":q"].includes(inputValue)) {
|
||||
setInput("");
|
||||
@@ -707,13 +1063,13 @@ export default function TerminalChatInput({
|
||||
submitInput([inputItem]);
|
||||
|
||||
// Get config for history persistence.
|
||||
const config = loadConfig();
|
||||
const historyConfig = loadConfig();
|
||||
|
||||
// Add to history and update state.
|
||||
const updatedHistory = await addToHistory(value, history, {
|
||||
maxSize: config.history?.maxSize ?? 1000,
|
||||
saveHistory: config.history?.saveHistory ?? true,
|
||||
sensitivePatterns: config.history?.sensitivePatterns ?? [],
|
||||
maxSize: historyConfig.history?.maxSize ?? 1000,
|
||||
saveHistory: historyConfig.history?.saveHistory ?? true,
|
||||
sensitivePatterns: historyConfig.history?.sensitivePatterns ?? [],
|
||||
});
|
||||
|
||||
setHistory(updatedHistory);
|
||||
@@ -742,6 +1098,7 @@ export default function TerminalChatInput({
|
||||
onCompact,
|
||||
skipNextSubmit,
|
||||
items,
|
||||
config,
|
||||
],
|
||||
);
|
||||
|
||||
|
||||
@@ -580,6 +580,7 @@ export default function TerminalChat({
|
||||
}}
|
||||
items={items}
|
||||
thinkingSeconds={thinkingSeconds}
|
||||
config={config}
|
||||
/>
|
||||
)}
|
||||
{overlayMode === "history" && (
|
||||
|
||||
@@ -56,6 +56,10 @@ export default function HelpOverlay({
|
||||
<Text color="cyan">/bug</Text> – generate a prefilled GitHub issue URL
|
||||
with session log
|
||||
</Text>
|
||||
<Text>
|
||||
<Text color="cyan">/secreview</Text> – run AppSec security review and
|
||||
show the results
|
||||
</Text>
|
||||
<Text>
|
||||
<Text color="cyan">/diff</Text> – view working tree git diff
|
||||
</Text>
|
||||
|
||||
@@ -24,6 +24,10 @@ export const SLASH_COMMANDS: Array<SlashCommand> = [
|
||||
{ command: "/help", description: "Show list of commands" },
|
||||
{ command: "/model", description: "Open model selection panel" },
|
||||
{ command: "/approval", description: "Open approval mode selection panel" },
|
||||
{
|
||||
command: "/secreview",
|
||||
description: "Run AppSec security review and display the generated reports",
|
||||
},
|
||||
{
|
||||
command: "/bug",
|
||||
description: "Generate a prefilled GitHub issue URL with session log",
|
||||
|
||||
@@ -61,6 +61,11 @@ describe("/clear command", () => {
|
||||
active: true,
|
||||
thinkingSeconds: 0,
|
||||
items: existingItems,
|
||||
config: {
|
||||
model: "codex-mini-latest",
|
||||
instructions: "",
|
||||
provider: "openai",
|
||||
},
|
||||
};
|
||||
|
||||
const { stdin, flush, cleanup } = renderTui(
|
||||
|
||||
@@ -66,17 +66,27 @@ function stubProps(): any {
|
||||
loading: false,
|
||||
submitInput: vi.fn(),
|
||||
confirmationPrompt: null,
|
||||
explanation: undefined,
|
||||
submitConfirmation: vi.fn(),
|
||||
setLastResponseId: vi.fn(),
|
||||
// Cast to any to satisfy the generic React.Dispatch signature without
|
||||
// pulling the ResponseItem type into the test bundle.
|
||||
setItems: (() => {}) as any,
|
||||
setItems: vi.fn(),
|
||||
contextLeftPercent: 100,
|
||||
openOverlay: vi.fn(),
|
||||
openModelOverlay: vi.fn(),
|
||||
openHelpOverlay: vi.fn(),
|
||||
openApprovalOverlay: vi.fn(),
|
||||
openSessionsOverlay: vi.fn(),
|
||||
openDiffOverlay: vi.fn(),
|
||||
onCompact: vi.fn(),
|
||||
interruptAgent: vi.fn(),
|
||||
active: true,
|
||||
thinkingSeconds: 0,
|
||||
items: [],
|
||||
config: {
|
||||
model: "codex-mini-latest",
|
||||
instructions: "",
|
||||
provider: "openai",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ test("SLASH_COMMANDS includes expected commands", () => {
|
||||
expect(commands).toContain("/help");
|
||||
expect(commands).toContain("/model");
|
||||
expect(commands).toContain("/approval");
|
||||
expect(commands).toContain("/secreview");
|
||||
expect(commands).toContain("/clearhistory");
|
||||
expect(commands).toContain("/diff");
|
||||
});
|
||||
|
||||
@@ -26,6 +26,11 @@ describe("TerminalChatInput compact command", () => {
|
||||
interruptAgent: () => {},
|
||||
active: true,
|
||||
thinkingSeconds: 0,
|
||||
config: {
|
||||
model: "codex-mini-latest",
|
||||
instructions: "",
|
||||
provider: "openai",
|
||||
},
|
||||
};
|
||||
const { lastFrameStripped } = renderTui(<TerminalChatInput {...props} />);
|
||||
const frame = lastFrameStripped();
|
||||
|
||||
@@ -81,6 +81,11 @@ describe("TerminalChatInput file tag suggestions", () => {
|
||||
interruptAgent: vi.fn(),
|
||||
active: true,
|
||||
thinkingSeconds: 0,
|
||||
config: {
|
||||
model: "codex-mini-latest",
|
||||
instructions: "",
|
||||
provider: "openai",
|
||||
},
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
|
||||
@@ -47,6 +47,11 @@ describe("TerminalChatInput multiline functionality", () => {
|
||||
interruptAgent: () => {},
|
||||
active: true,
|
||||
thinkingSeconds: 0,
|
||||
config: {
|
||||
model: "codex-mini-latest",
|
||||
instructions: "",
|
||||
provider: "openai",
|
||||
},
|
||||
};
|
||||
|
||||
const { stdin, lastFrameStripped, flush, cleanup } = renderTui(
|
||||
@@ -99,6 +104,11 @@ describe("TerminalChatInput multiline functionality", () => {
|
||||
interruptAgent: () => {},
|
||||
active: true,
|
||||
thinkingSeconds: 0,
|
||||
config: {
|
||||
model: "codex-mini-latest",
|
||||
instructions: "",
|
||||
provider: "openai",
|
||||
},
|
||||
};
|
||||
|
||||
const { stdin, lastFrameStripped, flush, cleanup } = renderTui(
|
||||
|
||||
69
codex-rs/Cargo.lock
generated
69
codex-rs/Cargo.lock
generated
@@ -399,15 +399,6 @@ version = "2.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6099cdc01846bc367c4e7dd630dc5966dccf36b652fae7a74e17b640411a91b2"
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.10.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bstr"
|
||||
version = "1.12.0"
|
||||
@@ -680,7 +671,6 @@ dependencies = [
|
||||
"seccompiler",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha1",
|
||||
"strum_macros 0.27.1",
|
||||
"tempfile",
|
||||
"thiserror 2.0.12",
|
||||
@@ -693,7 +683,6 @@ dependencies = [
|
||||
"tree-sitter",
|
||||
"tree-sitter-bash",
|
||||
"uuid",
|
||||
"walkdir",
|
||||
"wildmatch",
|
||||
"wiremock",
|
||||
]
|
||||
@@ -829,10 +818,12 @@ dependencies = [
|
||||
"ratatui",
|
||||
"ratatui-image",
|
||||
"regex-lite",
|
||||
"reqwest",
|
||||
"serde_json",
|
||||
"shlex",
|
||||
"strum 0.27.1",
|
||||
"strum_macros 0.27.1",
|
||||
"time",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
@@ -943,15 +934,6 @@ version = "0.8.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
|
||||
|
||||
[[package]]
|
||||
name = "cpufeatures"
|
||||
version = "0.2.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.4.2"
|
||||
@@ -1026,16 +1008,6 @@ version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5"
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ctor"
|
||||
version = "0.1.26"
|
||||
@@ -1186,16 +1158,6 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8"
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.10.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"crypto-common",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs"
|
||||
version = "6.0.0"
|
||||
@@ -1685,16 +1647,6 @@ dependencies = [
|
||||
"byteorder",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "generic-array"
|
||||
version = "0.14.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
|
||||
dependencies = [
|
||||
"typenum",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getopts"
|
||||
version = "0.2.23"
|
||||
@@ -3994,17 +3946,6 @@ dependencies = [
|
||||
"syn 2.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha1"
|
||||
version = "0.10.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sharded-slab"
|
||||
version = "0.1.7"
|
||||
@@ -4912,12 +4853,6 @@ dependencies = [
|
||||
"unicode-width 0.2.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.18.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f"
|
||||
|
||||
[[package]]
|
||||
name = "unicase"
|
||||
version = "2.8.1"
|
||||
|
||||
@@ -28,7 +28,6 @@ rand = "0.9"
|
||||
reqwest = { version = "0.12", features = ["json", "stream"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
sha1 = "0.10.6"
|
||||
strum_macros = "0.27.1"
|
||||
thiserror = "2.0.12"
|
||||
time = { version = "0.3", features = ["formatting", "local-offset", "macros"] }
|
||||
@@ -66,5 +65,4 @@ predicates = "3"
|
||||
pretty_assertions = "1.4.1"
|
||||
tempfile = "3"
|
||||
tokio-test = "0.4"
|
||||
walkdir = "2.5.0"
|
||||
wiremock = "0.6"
|
||||
|
||||
@@ -134,7 +134,7 @@ pub(crate) async fn stream_chat_completions(
|
||||
|
||||
match res {
|
||||
Ok(resp) if resp.status().is_success() => {
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(1600);
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(16);
|
||||
let stream = resp.bytes_stream().map_err(CodexErr::Reqwest);
|
||||
tokio::spawn(process_chat_sse(stream, tx_event));
|
||||
return Ok(ResponseStream { rx_event });
|
||||
@@ -426,12 +426,6 @@ where
|
||||
// will never appear in a Chat Completions stream.
|
||||
continue;
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::OutputTextDelta(_))))
|
||||
| Poll::Ready(Some(Ok(ResponseEvent::ReasoningSummaryDelta(_)))) => {
|
||||
// Deltas are ignored here since aggregation waits for the
|
||||
// final OutputItemDone.
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ use tokio_util::io::ReaderStream;
|
||||
use tracing::debug;
|
||||
use tracing::trace;
|
||||
use tracing::warn;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::chat_completions::AggregateStreamExt;
|
||||
use crate::chat_completions::stream_chat_completions;
|
||||
@@ -45,7 +44,6 @@ pub struct ModelClient {
|
||||
config: Arc<Config>,
|
||||
client: reqwest::Client,
|
||||
provider: ModelProviderInfo,
|
||||
session_id: Uuid,
|
||||
effort: ReasoningEffortConfig,
|
||||
summary: ReasoningSummaryConfig,
|
||||
}
|
||||
@@ -56,13 +54,11 @@ impl ModelClient {
|
||||
provider: ModelProviderInfo,
|
||||
effort: ReasoningEffortConfig,
|
||||
summary: ReasoningSummaryConfig,
|
||||
session_id: Uuid,
|
||||
) -> Self {
|
||||
Self {
|
||||
config,
|
||||
client: reqwest::Client::new(),
|
||||
provider,
|
||||
session_id,
|
||||
effort,
|
||||
summary,
|
||||
}
|
||||
@@ -129,7 +125,6 @@ impl ModelClient {
|
||||
reasoning,
|
||||
previous_response_id: prompt.prev_id.clone(),
|
||||
store: prompt.store,
|
||||
// TODO: make this configurable
|
||||
stream: true,
|
||||
};
|
||||
|
||||
@@ -147,14 +142,13 @@ impl ModelClient {
|
||||
.provider
|
||||
.create_request_builder(&self.client)?
|
||||
.header("OpenAI-Beta", "responses=experimental")
|
||||
.header("session_id", self.session_id.to_string())
|
||||
.header(reqwest::header::ACCEPT, "text/event-stream")
|
||||
.json(&payload);
|
||||
|
||||
let res = req_builder.send().await;
|
||||
match res {
|
||||
Ok(resp) if resp.status().is_success() => {
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(1600);
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(16);
|
||||
|
||||
// spawn task to process SSE
|
||||
let stream = resp.bytes_stream().map_err(CodexErr::Reqwest);
|
||||
@@ -211,7 +205,6 @@ struct SseEvent {
|
||||
kind: String,
|
||||
response: Option<Value>,
|
||||
item: Option<Value>,
|
||||
delta: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -344,22 +337,6 @@ where
|
||||
return;
|
||||
}
|
||||
}
|
||||
"response.output_text.delta" => {
|
||||
if let Some(delta) = event.delta {
|
||||
let event = ResponseEvent::OutputTextDelta(delta);
|
||||
if tx_event.send(Ok(event)).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
"response.reasoning_summary_text.delta" => {
|
||||
if let Some(delta) = event.delta {
|
||||
let event = ResponseEvent::ReasoningSummaryDelta(delta);
|
||||
if tx_event.send(Ok(event)).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
"response.created" => {
|
||||
if event.response.is_some() {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::Created {})).await;
|
||||
@@ -383,8 +360,10 @@ where
|
||||
| "response.function_call_arguments.delta"
|
||||
| "response.in_progress"
|
||||
| "response.output_item.added"
|
||||
| "response.output_text.delta"
|
||||
| "response.output_text.done"
|
||||
| "response.reasoning_summary_part.added"
|
||||
| "response.reasoning_summary_text.delta"
|
||||
| "response.reasoning_summary_text.done" => {
|
||||
// Currently, we ignore these events, but we handle them
|
||||
// separately to skip the logging message in the `other` case.
|
||||
@@ -396,7 +375,7 @@ where
|
||||
|
||||
/// used in tests to stream from a text SSE file
|
||||
async fn stream_from_fixture(path: impl AsRef<Path>) -> Result<ResponseStream> {
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(1600);
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(16);
|
||||
let f = std::fs::File::open(path.as_ref())?;
|
||||
let lines = std::io::BufReader::new(f).lines();
|
||||
|
||||
|
||||
@@ -57,8 +57,6 @@ pub enum ResponseEvent {
|
||||
response_id: String,
|
||||
token_usage: Option<TokenUsage>,
|
||||
},
|
||||
OutputTextDelta(String),
|
||||
ReasoningSummaryDelta(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
|
||||
@@ -51,6 +51,7 @@ use crate::exec::process_exec_tool_call;
|
||||
use crate::exec_env::create_env;
|
||||
use crate::flags::OPENAI_STREAM_MAX_RETRIES;
|
||||
use crate::mcp_connection_manager::McpConnectionManager;
|
||||
use crate::mcp_connection_manager::try_parse_fully_qualified_tool_name;
|
||||
use crate::mcp_tool_call::handle_mcp_tool_call;
|
||||
use crate::models::ContentItem;
|
||||
use crate::models::FunctionCallOutputPayload;
|
||||
@@ -60,9 +61,7 @@ use crate::models::ResponseInputItem;
|
||||
use crate::models::ResponseItem;
|
||||
use crate::models::ShellToolCallParams;
|
||||
use crate::project_doc::get_user_instructions;
|
||||
use crate::protocol::AgentMessageDeltaEvent;
|
||||
use crate::protocol::AgentMessageEvent;
|
||||
use crate::protocol::AgentReasoningDeltaEvent;
|
||||
use crate::protocol::AgentReasoningEvent;
|
||||
use crate::protocol::ApplyPatchApprovalRequestEvent;
|
||||
use crate::protocol::AskForApproval;
|
||||
@@ -104,7 +103,7 @@ impl Codex {
|
||||
/// submitted to start the session.
|
||||
pub async fn spawn(config: Config, ctrl_c: Arc<Notify>) -> CodexResult<(Codex, String)> {
|
||||
let (tx_sub, rx_sub) = async_channel::bounded(64);
|
||||
let (tx_event, rx_event) = async_channel::bounded(1600);
|
||||
let (tx_event, rx_event) = async_channel::bounded(64);
|
||||
|
||||
let instructions = get_user_instructions(&config).await;
|
||||
let configure_session = Op::ConfigureSession {
|
||||
@@ -591,7 +590,6 @@ async fn submission_loop(
|
||||
provider.clone(),
|
||||
model_reasoning_effort,
|
||||
model_reasoning_summary,
|
||||
session_id,
|
||||
);
|
||||
|
||||
// abort any current running session and clone its state
|
||||
@@ -1123,8 +1121,15 @@ async fn try_run_turn(
|
||||
|
||||
let mut stream = sess.client.clone().stream(&prompt).await?;
|
||||
|
||||
// Buffer all the incoming messages from the stream first, then execute them.
|
||||
// If we execute a function call in the middle of handling the stream, it can time out.
|
||||
let mut input = Vec::new();
|
||||
while let Some(event) = stream.next().await {
|
||||
input.push(event?);
|
||||
}
|
||||
|
||||
let mut output = Vec::new();
|
||||
while let Some(Ok(event)) = stream.next().await {
|
||||
for event in input {
|
||||
match event {
|
||||
ResponseEvent::Created => {
|
||||
let mut state = sess.state.lock().unwrap();
|
||||
@@ -1167,20 +1172,6 @@ async fn try_run_turn(
|
||||
state.previous_response_id = Some(response_id);
|
||||
break;
|
||||
}
|
||||
ResponseEvent::OutputTextDelta(delta) => {
|
||||
let event = Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }),
|
||||
};
|
||||
sess.tx_event.send(event).await.ok();
|
||||
}
|
||||
ResponseEvent::ReasoningSummaryDelta(delta) => {
|
||||
let event = Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }),
|
||||
};
|
||||
sess.tx_event.send(event).await.ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(output)
|
||||
@@ -1292,7 +1283,7 @@ async fn handle_function_call(
|
||||
handle_container_exec_with_params(params, sess, sub_id, call_id).await
|
||||
}
|
||||
_ => {
|
||||
match sess.mcp_connection_manager.parse_tool_name(&name) {
|
||||
match try_parse_fully_qualified_tool_name(&name) {
|
||||
Some((server, tool_name)) => {
|
||||
// TODO(mbolin): Determine appropriate timeout for tool call.
|
||||
let timeout = None;
|
||||
|
||||
@@ -722,6 +722,7 @@ disable_response_storage = true
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
supports_temperature: true,
|
||||
};
|
||||
let model_provider_map = {
|
||||
let mut model_provider_map = built_in_model_providers();
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
//! `"<server><MCP_TOOL_NAME_DELIMITER><tool>"` as the key.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
@@ -17,12 +16,8 @@ use codex_mcp_client::McpClient;
|
||||
use mcp_types::ClientCapabilities;
|
||||
use mcp_types::Implementation;
|
||||
use mcp_types::Tool;
|
||||
|
||||
use sha1::Digest;
|
||||
use sha1::Sha1;
|
||||
use tokio::task::JoinSet;
|
||||
use tracing::info;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::config_types::McpServerConfig;
|
||||
|
||||
@@ -31,8 +26,7 @@ use crate::config_types::McpServerConfig;
|
||||
///
|
||||
/// OpenAI requires tool names to conform to `^[a-zA-Z0-9_-]+$`, so we must
|
||||
/// choose a delimiter from this character set.
|
||||
const MCP_TOOL_NAME_DELIMITER: &str = "__";
|
||||
const MAX_TOOL_NAME_LENGTH: usize = 64;
|
||||
const MCP_TOOL_NAME_DELIMITER: &str = "__OAI_CODEX_MCP__";
|
||||
|
||||
/// Timeout for the `tools/list` request.
|
||||
const LIST_TOOLS_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
@@ -41,42 +35,16 @@ const LIST_TOOLS_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
/// spawned successfully.
|
||||
pub type ClientStartErrors = HashMap<String, anyhow::Error>;
|
||||
|
||||
fn qualify_tools(tools: Vec<ToolInfo>) -> HashMap<String, ToolInfo> {
|
||||
let mut used_names = HashSet::new();
|
||||
let mut qualified_tools = HashMap::new();
|
||||
for tool in tools {
|
||||
let mut qualified_name = format!(
|
||||
"{}{}{}",
|
||||
tool.server_name, MCP_TOOL_NAME_DELIMITER, tool.tool_name
|
||||
);
|
||||
if qualified_name.len() > MAX_TOOL_NAME_LENGTH {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(qualified_name.as_bytes());
|
||||
let sha1 = hasher.finalize();
|
||||
let sha1_str = format!("{sha1:x}");
|
||||
|
||||
// Truncate to make room for the hash suffix
|
||||
let prefix_len = MAX_TOOL_NAME_LENGTH - sha1_str.len();
|
||||
|
||||
qualified_name = format!("{}{}", &qualified_name[..prefix_len], sha1_str);
|
||||
}
|
||||
|
||||
if used_names.contains(&qualified_name) {
|
||||
warn!("skipping duplicated tool {}", qualified_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
used_names.insert(qualified_name.clone());
|
||||
qualified_tools.insert(qualified_name, tool);
|
||||
}
|
||||
|
||||
qualified_tools
|
||||
fn fully_qualified_tool_name(server: &str, tool: &str) -> String {
|
||||
format!("{server}{MCP_TOOL_NAME_DELIMITER}{tool}")
|
||||
}
|
||||
|
||||
struct ToolInfo {
|
||||
server_name: String,
|
||||
tool_name: String,
|
||||
tool: Tool,
|
||||
pub(crate) fn try_parse_fully_qualified_tool_name(fq_name: &str) -> Option<(String, String)> {
|
||||
let (server, tool) = fq_name.split_once(MCP_TOOL_NAME_DELIMITER)?;
|
||||
if server.is_empty() || tool.is_empty() {
|
||||
return None;
|
||||
}
|
||||
Some((server.to_string(), tool.to_string()))
|
||||
}
|
||||
|
||||
/// A thin wrapper around a set of running [`McpClient`] instances.
|
||||
@@ -89,7 +57,7 @@ pub(crate) struct McpConnectionManager {
|
||||
clients: HashMap<String, std::sync::Arc<McpClient>>,
|
||||
|
||||
/// Fully qualified tool name -> tool instance.
|
||||
tools: HashMap<String, ToolInfo>,
|
||||
tools: HashMap<String, Tool>,
|
||||
}
|
||||
|
||||
impl McpConnectionManager {
|
||||
@@ -111,19 +79,9 @@ impl McpConnectionManager {
|
||||
|
||||
// Launch all configured servers concurrently.
|
||||
let mut join_set = JoinSet::new();
|
||||
let mut errors = ClientStartErrors::new();
|
||||
|
||||
for (server_name, cfg) in mcp_servers {
|
||||
// Validate server name before spawning
|
||||
if !is_valid_mcp_server_name(&server_name) {
|
||||
let error = anyhow::anyhow!(
|
||||
"invalid server name '{}': must match pattern ^[a-zA-Z0-9_-]+$",
|
||||
server_name
|
||||
);
|
||||
errors.insert(server_name, error);
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: Verify server name: require `^[a-zA-Z0-9_-]+$`?
|
||||
join_set.spawn(async move {
|
||||
let McpServerConfig { command, args, env } = cfg;
|
||||
let client_res = McpClient::new_stdio_client(command, args, env).await;
|
||||
@@ -159,6 +117,7 @@ impl McpConnectionManager {
|
||||
|
||||
let mut clients: HashMap<String, std::sync::Arc<McpClient>> =
|
||||
HashMap::with_capacity(join_set.len());
|
||||
let mut errors = ClientStartErrors::new();
|
||||
|
||||
while let Some(res) = join_set.join_next().await {
|
||||
let (server_name, client_res) = res?; // JoinError propagation
|
||||
@@ -173,9 +132,7 @@ impl McpConnectionManager {
|
||||
}
|
||||
}
|
||||
|
||||
let all_tools = list_all_tools(&clients).await?;
|
||||
|
||||
let tools = qualify_tools(all_tools);
|
||||
let tools = list_all_tools(&clients).await?;
|
||||
|
||||
Ok((Self { clients, tools }, errors))
|
||||
}
|
||||
@@ -183,10 +140,7 @@ impl McpConnectionManager {
|
||||
/// Returns a single map that contains **all** tools. Each key is the
|
||||
/// fully-qualified name for the tool.
|
||||
pub fn list_all_tools(&self) -> HashMap<String, Tool> {
|
||||
self.tools
|
||||
.iter()
|
||||
.map(|(name, tool)| (name.clone(), tool.tool.clone()))
|
||||
.collect()
|
||||
self.tools.clone()
|
||||
}
|
||||
|
||||
/// Invoke the tool indicated by the (server, tool) pair.
|
||||
@@ -208,19 +162,13 @@ impl McpConnectionManager {
|
||||
.await
|
||||
.with_context(|| format!("tool call failed for `{server}/{tool}`"))
|
||||
}
|
||||
|
||||
pub fn parse_tool_name(&self, tool_name: &str) -> Option<(String, String)> {
|
||||
self.tools
|
||||
.get(tool_name)
|
||||
.map(|tool| (tool.server_name.clone(), tool.tool_name.clone()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Query every server for its available tools and return a single map that
|
||||
/// contains **all** tools. Each key is the fully-qualified name for the tool.
|
||||
async fn list_all_tools(
|
||||
pub async fn list_all_tools(
|
||||
clients: &HashMap<String, std::sync::Arc<McpClient>>,
|
||||
) -> Result<Vec<ToolInfo>> {
|
||||
) -> Result<HashMap<String, Tool>> {
|
||||
let mut join_set = JoinSet::new();
|
||||
|
||||
// Spawn one task per server so we can query them concurrently. This
|
||||
@@ -237,19 +185,18 @@ async fn list_all_tools(
|
||||
});
|
||||
}
|
||||
|
||||
let mut aggregated: Vec<ToolInfo> = Vec::with_capacity(join_set.len());
|
||||
let mut aggregated: HashMap<String, Tool> = HashMap::with_capacity(join_set.len());
|
||||
|
||||
while let Some(join_res) = join_set.join_next().await {
|
||||
let (server_name, list_result) = join_res?;
|
||||
let list_result = list_result?;
|
||||
|
||||
for tool in list_result.tools {
|
||||
let tool_info = ToolInfo {
|
||||
server_name: server_name.clone(),
|
||||
tool_name: tool.name.clone(),
|
||||
tool,
|
||||
};
|
||||
aggregated.push(tool_info);
|
||||
// TODO(mbolin): escape tool names that contain invalid characters.
|
||||
let fq_name = fully_qualified_tool_name(&server_name, &tool.name);
|
||||
if aggregated.insert(fq_name.clone(), tool).is_some() {
|
||||
panic!("tool name collision for '{fq_name}': suspicious");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -261,97 +208,3 @@ async fn list_all_tools(
|
||||
|
||||
Ok(aggregated)
|
||||
}
|
||||
|
||||
fn is_valid_mcp_server_name(server_name: &str) -> bool {
|
||||
!server_name.is_empty()
|
||||
&& server_name
|
||||
.chars()
|
||||
.all(|c| c.is_ascii_alphanumeric() || c == '_' || c == '-')
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use mcp_types::ToolInputSchema;
|
||||
|
||||
fn create_test_tool(server_name: &str, tool_name: &str) -> ToolInfo {
|
||||
ToolInfo {
|
||||
server_name: server_name.to_string(),
|
||||
tool_name: tool_name.to_string(),
|
||||
tool: Tool {
|
||||
annotations: None,
|
||||
description: Some(format!("Test tool: {tool_name}")),
|
||||
input_schema: ToolInputSchema {
|
||||
properties: None,
|
||||
required: None,
|
||||
r#type: "object".to_string(),
|
||||
},
|
||||
name: tool_name.to_string(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_qualify_tools_short_non_duplicated_names() {
|
||||
let tools = vec![
|
||||
create_test_tool("server1", "tool1"),
|
||||
create_test_tool("server1", "tool2"),
|
||||
];
|
||||
|
||||
let qualified_tools = qualify_tools(tools);
|
||||
|
||||
assert_eq!(qualified_tools.len(), 2);
|
||||
assert!(qualified_tools.contains_key("server1__tool1"));
|
||||
assert!(qualified_tools.contains_key("server1__tool2"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_qualify_tools_duplicated_names_skipped() {
|
||||
let tools = vec![
|
||||
create_test_tool("server1", "duplicate_tool"),
|
||||
create_test_tool("server1", "duplicate_tool"),
|
||||
];
|
||||
|
||||
let qualified_tools = qualify_tools(tools);
|
||||
|
||||
// Only the first tool should remain, the second is skipped
|
||||
assert_eq!(qualified_tools.len(), 1);
|
||||
assert!(qualified_tools.contains_key("server1__duplicate_tool"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_qualify_tools_long_names_same_server() {
|
||||
let server_name = "my_server";
|
||||
|
||||
let tools = vec![
|
||||
create_test_tool(
|
||||
server_name,
|
||||
"extremely_lengthy_function_name_that_absolutely_surpasses_all_reasonable_limits",
|
||||
),
|
||||
create_test_tool(
|
||||
server_name,
|
||||
"yet_another_extremely_lengthy_function_name_that_absolutely_surpasses_all_reasonable_limits",
|
||||
),
|
||||
];
|
||||
|
||||
let qualified_tools = qualify_tools(tools);
|
||||
|
||||
assert_eq!(qualified_tools.len(), 2);
|
||||
|
||||
let mut keys: Vec<_> = qualified_tools.keys().cloned().collect();
|
||||
keys.sort();
|
||||
|
||||
assert_eq!(keys[0].len(), 64);
|
||||
assert_eq!(
|
||||
keys[0],
|
||||
"my_server__extremely_lena02e507efc5a9de88637e436690364fd4219e4ef"
|
||||
);
|
||||
|
||||
assert_eq!(keys[1].len(), 64);
|
||||
assert_eq!(
|
||||
keys[1],
|
||||
"my_server__yet_another_e1c3987bd9c50b826cbe1687966f79f0c602d19ca"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,6 +64,14 @@ pub struct ModelProviderInfo {
|
||||
/// value should be used. If the environment variable is not set, or the
|
||||
/// value is empty, the header will not be included in the request.
|
||||
pub env_http_headers: Option<HashMap<String, String>>,
|
||||
|
||||
/// Whether the provider accepts an explicit `temperature` parameter.
|
||||
#[serde(default = "default_supports_temperature")]
|
||||
pub supports_temperature: bool,
|
||||
}
|
||||
|
||||
const fn default_supports_temperature() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
impl ModelProviderInfo {
|
||||
@@ -205,6 +213,7 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
supports_temperature: false,
|
||||
},
|
||||
),
|
||||
]
|
||||
@@ -234,6 +243,7 @@ base_url = "http://localhost:11434/v1"
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
supports_temperature: true,
|
||||
};
|
||||
|
||||
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
|
||||
@@ -259,6 +269,7 @@ query_params = { api-version = "2025-04-01-preview" }
|
||||
}),
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
supports_temperature: true,
|
||||
};
|
||||
|
||||
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
|
||||
@@ -287,6 +298,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
|
||||
env_http_headers: Some(maplit::hashmap! {
|
||||
"X-Example-Env-Header".to_string() => "EXAMPLE_ENV_VAR".to_string(),
|
||||
}),
|
||||
supports_temperature: true,
|
||||
};
|
||||
|
||||
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
|
||||
|
||||
@@ -282,15 +282,9 @@ pub enum EventMsg {
|
||||
/// Agent text output message
|
||||
AgentMessage(AgentMessageEvent),
|
||||
|
||||
/// Agent text output delta message
|
||||
AgentMessageDelta(AgentMessageDeltaEvent),
|
||||
|
||||
/// Reasoning event from agent.
|
||||
AgentReasoning(AgentReasoningEvent),
|
||||
|
||||
/// Agent reasoning delta event from agent.
|
||||
AgentReasoningDelta(AgentReasoningDeltaEvent),
|
||||
|
||||
/// Ack the client's configure message.
|
||||
SessionConfigured(SessionConfiguredEvent),
|
||||
|
||||
@@ -346,21 +340,11 @@ pub struct AgentMessageEvent {
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct AgentMessageDeltaEvent {
|
||||
pub delta: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct AgentReasoningEvent {
|
||||
pub text: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct AgentReasoningDeltaEvent {
|
||||
pub delta: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct McpToolCallBeginEvent {
|
||||
/// Identifier so this can be paired with the McpToolCallEnd event.
|
||||
|
||||
@@ -153,16 +153,14 @@ struct LogFileInfo {
|
||||
}
|
||||
|
||||
fn create_log_file(config: &Config, session_id: Uuid) -> std::io::Result<LogFileInfo> {
|
||||
// Resolve ~/.codex/sessions/YYYY/MM/DD and create it if missing.
|
||||
let timestamp = OffsetDateTime::now_local()
|
||||
.map_err(|e| IoError::other(format!("failed to get local time: {e}")))?;
|
||||
// Resolve ~/.codex/sessions and create it if missing.
|
||||
let mut dir = config.codex_home.clone();
|
||||
dir.push(SESSIONS_SUBDIR);
|
||||
dir.push(timestamp.year().to_string());
|
||||
dir.push(format!("{:02}", u8::from(timestamp.month())));
|
||||
dir.push(format!("{:02}", timestamp.day()));
|
||||
fs::create_dir_all(&dir)?;
|
||||
|
||||
let timestamp = OffsetDateTime::now_local()
|
||||
.map_err(|e| IoError::other(format!("failed to get local time: {e}")))?;
|
||||
|
||||
// Custom format for YYYY-MM-DDThh-mm-ss. Use `-` instead of `:` for
|
||||
// compatibility with filesystems that do not allow colons in filenames.
|
||||
let format: &[FormatItem] =
|
||||
|
||||
@@ -2,12 +2,7 @@
|
||||
|
||||
use assert_cmd::Command as AssertCommand;
|
||||
use codex_core::exec::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use serde_json::Value;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tempfile::TempDir;
|
||||
use uuid::Uuid;
|
||||
use walkdir::WalkDir;
|
||||
use wiremock::Mock;
|
||||
use wiremock::MockServer;
|
||||
use wiremock::ResponseTemplate;
|
||||
@@ -76,8 +71,8 @@ async fn chat_mode_stream_cli() {
|
||||
println!("Stderr:\n{}", String::from_utf8_lossy(&output.stderr));
|
||||
assert!(output.status.success());
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let hi_lines = stdout.lines().filter(|line| line.trim() == "hi").count();
|
||||
assert_eq!(hi_lines, 1, "Expected exactly one line with 'hi'");
|
||||
assert!(stdout.contains("hi"));
|
||||
assert_eq!(stdout.matches("hi").count(), 1);
|
||||
|
||||
server.verify().await;
|
||||
}
|
||||
@@ -122,154 +117,3 @@ async fn responses_api_stream_cli() {
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
assert!(stdout.contains("fixture hello"));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn integration_creates_and_checks_session_file() {
|
||||
// Honor sandbox network restrictions for CI parity with the other tests.
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// 1. Temp home so we read/write isolated session files.
|
||||
let home = TempDir::new().unwrap();
|
||||
|
||||
// 2. Unique marker we'll look for in the session log.
|
||||
let marker = format!("integration-test-{}", Uuid::new_v4());
|
||||
let prompt = format!("echo {marker}");
|
||||
|
||||
// 3. Use the same offline SSE fixture as responses_api_stream_cli so the test is hermetic.
|
||||
let fixture =
|
||||
std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/cli_responses_fixture.sse");
|
||||
|
||||
// 4. Run the codex CLI through cargo (ensures the right bin is built) and invoke `exec`,
|
||||
// which is what records a session.
|
||||
let mut cmd = AssertCommand::new("cargo");
|
||||
cmd.arg("run")
|
||||
.arg("-p")
|
||||
.arg("codex-cli")
|
||||
.arg("--quiet")
|
||||
.arg("--")
|
||||
.arg("exec")
|
||||
.arg("--skip-git-repo-check")
|
||||
.arg("-C")
|
||||
.arg(env!("CARGO_MANIFEST_DIR"))
|
||||
.arg(&prompt);
|
||||
cmd.env("CODEX_HOME", home.path())
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
.env("CODEX_RS_SSE_FIXTURE", &fixture)
|
||||
// Required for CLI arg parsing even though fixture short-circuits network usage.
|
||||
.env("OPENAI_BASE_URL", "http://unused.local");
|
||||
|
||||
let output = cmd.output().unwrap();
|
||||
assert!(
|
||||
output.status.success(),
|
||||
"codex-cli exec failed: {}",
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
);
|
||||
|
||||
// 5. Sessions are written asynchronously; wait briefly for the directory to appear.
|
||||
let sessions_dir = home.path().join("sessions");
|
||||
let start = Instant::now();
|
||||
while !sessions_dir.exists() && start.elapsed() < Duration::from_secs(2) {
|
||||
std::thread::sleep(Duration::from_millis(50));
|
||||
}
|
||||
|
||||
// 6. Scan all session files and find the one that contains our marker.
|
||||
let mut matching_files = vec![];
|
||||
for entry in WalkDir::new(&sessions_dir) {
|
||||
let entry = entry.unwrap();
|
||||
if entry.file_type().is_file() && entry.file_name().to_string_lossy().ends_with(".jsonl") {
|
||||
let path = entry.path();
|
||||
let content = std::fs::read_to_string(path).unwrap();
|
||||
let mut lines = content.lines();
|
||||
// Skip SessionMeta (first line)
|
||||
let _ = lines.next();
|
||||
for line in lines {
|
||||
let item: Value = serde_json::from_str(line).unwrap();
|
||||
if let Some("message") = item.get("type").and_then(|t| t.as_str()) {
|
||||
if let Some(content) = item.get("content") {
|
||||
if content.to_string().contains(&marker) {
|
||||
matching_files.push(path.to_owned());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
matching_files.len(),
|
||||
1,
|
||||
"Expected exactly one session file containing the marker, found {}",
|
||||
matching_files.len()
|
||||
);
|
||||
let path = &matching_files[0];
|
||||
|
||||
// 7. Verify directory structure: sessions/YYYY/MM/DD/filename.jsonl
|
||||
let rel = match path.strip_prefix(&sessions_dir) {
|
||||
Ok(r) => r,
|
||||
Err(_) => panic!("session file should live under sessions/"),
|
||||
};
|
||||
let comps: Vec<String> = rel
|
||||
.components()
|
||||
.map(|c| c.as_os_str().to_string_lossy().into_owned())
|
||||
.collect();
|
||||
assert_eq!(
|
||||
comps.len(),
|
||||
4,
|
||||
"Expected sessions/YYYY/MM/DD/<file>, got {rel:?}"
|
||||
);
|
||||
let year = &comps[0];
|
||||
let month = &comps[1];
|
||||
let day = &comps[2];
|
||||
assert!(
|
||||
year.len() == 4 && year.chars().all(|c| c.is_ascii_digit()),
|
||||
"Year dir not 4-digit numeric: {year}"
|
||||
);
|
||||
assert!(
|
||||
month.len() == 2 && month.chars().all(|c| c.is_ascii_digit()),
|
||||
"Month dir not zero-padded 2-digit numeric: {month}"
|
||||
);
|
||||
assert!(
|
||||
day.len() == 2 && day.chars().all(|c| c.is_ascii_digit()),
|
||||
"Day dir not zero-padded 2-digit numeric: {day}"
|
||||
);
|
||||
// Range checks (best-effort; won't fail on leading zeros)
|
||||
if let Ok(m) = month.parse::<u8>() {
|
||||
assert!((1..=12).contains(&m), "Month out of range: {m}");
|
||||
}
|
||||
if let Ok(d) = day.parse::<u8>() {
|
||||
assert!((1..=31).contains(&d), "Day out of range: {d}");
|
||||
}
|
||||
|
||||
// 8. Parse SessionMeta line and basic sanity checks.
|
||||
let content = std::fs::read_to_string(path).unwrap();
|
||||
let mut lines = content.lines();
|
||||
let meta: Value = serde_json::from_str(lines.next().unwrap()).unwrap();
|
||||
assert!(meta.get("id").is_some(), "SessionMeta missing id");
|
||||
assert!(
|
||||
meta.get("timestamp").is_some(),
|
||||
"SessionMeta missing timestamp"
|
||||
);
|
||||
|
||||
// 9. Confirm at least one message contains the marker.
|
||||
let mut found_message = false;
|
||||
for line in lines {
|
||||
let item: Value = serde_json::from_str(line).unwrap();
|
||||
if item.get("type").map(|t| t == "message").unwrap_or(false) {
|
||||
if let Some(content) = item.get("content") {
|
||||
if content.to_string().contains(&marker) {
|
||||
found_message = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assert!(
|
||||
found_message,
|
||||
"No message found in session file containing the marker"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,117 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use codex_core::Codex;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::exec::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SessionConfiguredEvent;
|
||||
mod test_support;
|
||||
use tempfile::TempDir;
|
||||
use test_support::load_default_config_for_test;
|
||||
use test_support::load_sse_fixture_with_id;
|
||||
use tokio::time::timeout;
|
||||
use wiremock::Mock;
|
||||
use wiremock::MockServer;
|
||||
use wiremock::ResponseTemplate;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
/// Build minimal SSE stream with completed marker using the JSON fixture.
|
||||
fn sse_completed(id: &str) -> String {
|
||||
load_sse_fixture_with_id("tests/fixtures/completed_template.json", id)
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn includes_session_id_and_model_headers_in_request() {
|
||||
#![allow(clippy::unwrap_used)]
|
||||
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Mock server
|
||||
let server = MockServer::start().await;
|
||||
|
||||
// First request – must NOT include `previous_response_id`.
|
||||
let first = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(sse_completed("resp1"), "text/event-stream");
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(first)
|
||||
.expect(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
// Environment
|
||||
// Update environment – `set_var` is `unsafe` starting with the 2024
|
||||
// edition so we group the calls into a single `unsafe { … }` block.
|
||||
unsafe {
|
||||
std::env::set_var("OPENAI_REQUEST_MAX_RETRIES", "0");
|
||||
std::env::set_var("OPENAI_STREAM_MAX_RETRIES", "0");
|
||||
}
|
||||
let model_provider = ModelProviderInfo {
|
||||
name: "openai".into(),
|
||||
base_url: format!("{}/v1", server.uri()),
|
||||
// Environment variable that should exist in the test environment.
|
||||
// ModelClient will return an error if the environment variable for the
|
||||
// provider is not set.
|
||||
env_key: Some("PATH".into()),
|
||||
env_key_instructions: None,
|
||||
wire_api: codex_core::WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: Some(
|
||||
[("originator".to_string(), "codex_cli_rs".to_string())]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
env_http_headers: None,
|
||||
};
|
||||
|
||||
// Init session
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = model_provider;
|
||||
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
|
||||
let (codex, _init_id) = Codex::spawn(config, ctrl_c.clone()).await.unwrap();
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello".into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut current_session_id = None;
|
||||
// Wait for TaskComplete
|
||||
loop {
|
||||
let ev = timeout(Duration::from_secs(1), codex.next_event())
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
if let EventMsg::SessionConfigured(SessionConfiguredEvent { session_id, .. }) = ev.msg {
|
||||
current_session_id = Some(session_id.to_string());
|
||||
}
|
||||
if matches!(ev.msg, EventMsg::TaskComplete(_)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// get request from the server
|
||||
let request = &server.received_requests().await.unwrap()[0];
|
||||
let request_body = request.headers.get("session_id").unwrap();
|
||||
let originator = request.headers.get("originator").unwrap();
|
||||
|
||||
assert!(current_session_id.is_some());
|
||||
assert_eq!(request_body.to_str().unwrap(), ¤t_session_id.unwrap());
|
||||
assert_eq!(originator.to_str().unwrap(), "codex_cli_rs");
|
||||
}
|
||||
@@ -107,6 +107,7 @@ async fn keeps_previous_response_id_between_tasks() {
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
supports_temperature: true,
|
||||
};
|
||||
|
||||
// Init session
|
||||
|
||||
@@ -32,8 +32,6 @@ fn sse_completed(id: &str) -> String {
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
// this test is flaky (has race conditions), so we ignore it for now
|
||||
#[ignore]
|
||||
async fn retries_on_early_close() {
|
||||
#![allow(clippy::unwrap_used)]
|
||||
|
||||
@@ -98,6 +96,7 @@ async fn retries_on_early_close() {
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
supports_temperature: true,
|
||||
};
|
||||
|
||||
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
|
||||
|
||||
@@ -51,10 +51,6 @@ pub struct Cli {
|
||||
#[arg(long = "color", value_enum, default_value_t = Color::Auto)]
|
||||
pub color: Color,
|
||||
|
||||
/// Print events to stdout as JSONL.
|
||||
#[arg(long = "json", default_value_t = false)]
|
||||
pub json: bool,
|
||||
|
||||
/// Specifies file where the last message from the agent should be written.
|
||||
#[arg(long = "output-last-message")]
|
||||
pub last_message_file: Option<PathBuf>,
|
||||
|
||||
@@ -1,37 +1,492 @@
|
||||
use codex_common::elapsed::format_elapsed;
|
||||
use codex_common::summarize_sandbox_policy;
|
||||
use codex_core::WireApi;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::model_supports_reasoning_summaries;
|
||||
use codex_core::protocol::AgentMessageEvent;
|
||||
use codex_core::protocol::BackgroundEventEvent;
|
||||
use codex_core::protocol::ErrorEvent;
|
||||
use codex_core::protocol::Event;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::ExecCommandBeginEvent;
|
||||
use codex_core::protocol::ExecCommandEndEvent;
|
||||
use codex_core::protocol::FileChange;
|
||||
use codex_core::protocol::McpToolCallBeginEvent;
|
||||
use codex_core::protocol::McpToolCallEndEvent;
|
||||
use codex_core::protocol::PatchApplyBeginEvent;
|
||||
use codex_core::protocol::PatchApplyEndEvent;
|
||||
use codex_core::protocol::SessionConfiguredEvent;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use owo_colors::OwoColorize;
|
||||
use owo_colors::Style;
|
||||
use shlex::try_join;
|
||||
use std::collections::HashMap;
|
||||
use std::time::Instant;
|
||||
|
||||
pub(crate) trait EventProcessor {
|
||||
/// Print summary of effective configuration and user prompt.
|
||||
fn print_config_summary(&mut self, config: &Config, prompt: &str);
|
||||
/// This should be configurable. When used in CI, users may not want to impose
|
||||
/// a limit so they can see the full transcript.
|
||||
const MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL: usize = 20;
|
||||
|
||||
/// Handle a single event emitted by the agent.
|
||||
fn process_event(&mut self, event: Event);
|
||||
pub(crate) struct EventProcessor {
|
||||
call_id_to_command: HashMap<String, ExecCommandBegin>,
|
||||
call_id_to_patch: HashMap<String, PatchApplyBegin>,
|
||||
|
||||
/// Tracks in-flight MCP tool calls so we can calculate duration and print
|
||||
/// a concise summary when the corresponding `McpToolCallEnd` event is
|
||||
/// received.
|
||||
call_id_to_tool_call: HashMap<String, McpToolCallBegin>,
|
||||
|
||||
// To ensure that --color=never is respected, ANSI escapes _must_ be added
|
||||
// using .style() with one of these fields. If you need a new style, add a
|
||||
// new field here.
|
||||
bold: Style,
|
||||
italic: Style,
|
||||
dimmed: Style,
|
||||
|
||||
magenta: Style,
|
||||
red: Style,
|
||||
green: Style,
|
||||
cyan: Style,
|
||||
|
||||
/// Whether to include `AgentReasoning` events in the output.
|
||||
show_agent_reasoning: bool,
|
||||
}
|
||||
|
||||
pub(crate) fn create_config_summary_entries(config: &Config) -> Vec<(&'static str, String)> {
|
||||
let mut entries = vec![
|
||||
("workdir", config.cwd.display().to_string()),
|
||||
("model", config.model.clone()),
|
||||
("provider", config.model_provider_id.clone()),
|
||||
("approval", format!("{:?}", config.approval_policy)),
|
||||
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
|
||||
];
|
||||
if config.model_provider.wire_api == WireApi::Responses
|
||||
&& model_supports_reasoning_summaries(config)
|
||||
{
|
||||
entries.push((
|
||||
"reasoning effort",
|
||||
config.model_reasoning_effort.to_string(),
|
||||
));
|
||||
entries.push((
|
||||
"reasoning summaries",
|
||||
config.model_reasoning_summary.to_string(),
|
||||
));
|
||||
impl EventProcessor {
|
||||
pub(crate) fn create_with_ansi(with_ansi: bool, show_agent_reasoning: bool) -> Self {
|
||||
let call_id_to_command = HashMap::new();
|
||||
let call_id_to_patch = HashMap::new();
|
||||
let call_id_to_tool_call = HashMap::new();
|
||||
|
||||
if with_ansi {
|
||||
Self {
|
||||
call_id_to_command,
|
||||
call_id_to_patch,
|
||||
bold: Style::new().bold(),
|
||||
italic: Style::new().italic(),
|
||||
dimmed: Style::new().dimmed(),
|
||||
magenta: Style::new().magenta(),
|
||||
red: Style::new().red(),
|
||||
green: Style::new().green(),
|
||||
cyan: Style::new().cyan(),
|
||||
call_id_to_tool_call,
|
||||
show_agent_reasoning,
|
||||
}
|
||||
} else {
|
||||
Self {
|
||||
call_id_to_command,
|
||||
call_id_to_patch,
|
||||
bold: Style::new(),
|
||||
italic: Style::new(),
|
||||
dimmed: Style::new(),
|
||||
magenta: Style::new(),
|
||||
red: Style::new(),
|
||||
green: Style::new(),
|
||||
cyan: Style::new(),
|
||||
call_id_to_tool_call,
|
||||
show_agent_reasoning,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ExecCommandBegin {
|
||||
command: Vec<String>,
|
||||
start_time: Instant,
|
||||
}
|
||||
|
||||
/// Metadata captured when an `McpToolCallBegin` event is received.
|
||||
struct McpToolCallBegin {
|
||||
/// Formatted invocation string, e.g. `server.tool({"city":"sf"})`.
|
||||
invocation: String,
|
||||
/// Timestamp when the call started so we can compute duration later.
|
||||
start_time: Instant,
|
||||
}
|
||||
|
||||
struct PatchApplyBegin {
|
||||
start_time: Instant,
|
||||
auto_approved: bool,
|
||||
}
|
||||
|
||||
// Timestamped println helper. The timestamp is styled with self.dimmed.
|
||||
#[macro_export]
|
||||
macro_rules! ts_println {
|
||||
($self:ident, $($arg:tt)*) => {{
|
||||
let now = chrono::Utc::now();
|
||||
let formatted = now.format("[%Y-%m-%dT%H:%M:%S]");
|
||||
print!("{} ", formatted.style($self.dimmed));
|
||||
println!($($arg)*);
|
||||
}};
|
||||
}
|
||||
|
||||
impl EventProcessor {
|
||||
/// Print a concise summary of the effective configuration that will be used
|
||||
/// for the session. This mirrors the information shown in the TUI welcome
|
||||
/// screen.
|
||||
pub(crate) fn print_config_summary(&mut self, config: &Config, prompt: &str) {
|
||||
const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
ts_println!(
|
||||
self,
|
||||
"OpenAI Codex v{} (research preview)\n--------",
|
||||
VERSION
|
||||
);
|
||||
|
||||
let mut entries = vec![
|
||||
("workdir", config.cwd.display().to_string()),
|
||||
("model", config.model.clone()),
|
||||
("provider", config.model_provider_id.clone()),
|
||||
("approval", format!("{:?}", config.approval_policy)),
|
||||
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
|
||||
];
|
||||
if config.model_provider.wire_api == WireApi::Responses
|
||||
&& model_supports_reasoning_summaries(config)
|
||||
{
|
||||
entries.push((
|
||||
"reasoning effort",
|
||||
config.model_reasoning_effort.to_string(),
|
||||
));
|
||||
entries.push((
|
||||
"reasoning summaries",
|
||||
config.model_reasoning_summary.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
for (key, value) in entries {
|
||||
println!("{} {}", format!("{key}:").style(self.bold), value);
|
||||
}
|
||||
|
||||
println!("--------");
|
||||
|
||||
// Echo the prompt that will be sent to the agent so it is visible in the
|
||||
// transcript/logs before any events come in. Note the prompt may have been
|
||||
// read from stdin, so it may not be visible in the terminal otherwise.
|
||||
ts_println!(
|
||||
self,
|
||||
"{}\n{}",
|
||||
"User instructions:".style(self.bold).style(self.cyan),
|
||||
prompt
|
||||
);
|
||||
}
|
||||
|
||||
entries
|
||||
pub(crate) fn process_event(&mut self, event: Event) {
|
||||
let Event { id: _, msg } = event;
|
||||
match msg {
|
||||
EventMsg::Error(ErrorEvent { message }) => {
|
||||
let prefix = "ERROR:".style(self.red);
|
||||
ts_println!(self, "{prefix} {message}");
|
||||
}
|
||||
EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => {
|
||||
ts_println!(self, "{}", message.style(self.dimmed));
|
||||
}
|
||||
EventMsg::TaskStarted | EventMsg::TaskComplete(_) => {
|
||||
// Ignore.
|
||||
}
|
||||
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
|
||||
ts_println!(self, "tokens used: {total_tokens}");
|
||||
}
|
||||
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
|
||||
ts_println!(
|
||||
self,
|
||||
"{}\n{message}",
|
||||
"codex".style(self.bold).style(self.magenta)
|
||||
);
|
||||
}
|
||||
EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
|
||||
call_id,
|
||||
command,
|
||||
cwd,
|
||||
}) => {
|
||||
self.call_id_to_command.insert(
|
||||
call_id.clone(),
|
||||
ExecCommandBegin {
|
||||
command: command.clone(),
|
||||
start_time: Instant::now(),
|
||||
},
|
||||
);
|
||||
ts_println!(
|
||||
self,
|
||||
"{} {} in {}",
|
||||
"exec".style(self.magenta),
|
||||
escape_command(&command).style(self.bold),
|
||||
cwd.to_string_lossy(),
|
||||
);
|
||||
}
|
||||
EventMsg::ExecCommandEnd(ExecCommandEndEvent {
|
||||
call_id,
|
||||
stdout,
|
||||
stderr,
|
||||
exit_code,
|
||||
}) => {
|
||||
let exec_command = self.call_id_to_command.remove(&call_id);
|
||||
let (duration, call) = if let Some(ExecCommandBegin {
|
||||
command,
|
||||
start_time,
|
||||
}) = exec_command
|
||||
{
|
||||
(
|
||||
format!(" in {}", format_elapsed(start_time)),
|
||||
format!("{}", escape_command(&command).style(self.bold)),
|
||||
)
|
||||
} else {
|
||||
("".to_string(), format!("exec('{call_id}')"))
|
||||
};
|
||||
|
||||
let output = if exit_code == 0 { stdout } else { stderr };
|
||||
let truncated_output = output
|
||||
.lines()
|
||||
.take(MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL)
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
match exit_code {
|
||||
0 => {
|
||||
let title = format!("{call} succeeded{duration}:");
|
||||
ts_println!(self, "{}", title.style(self.green));
|
||||
}
|
||||
_ => {
|
||||
let title = format!("{call} exited {exit_code}{duration}:");
|
||||
ts_println!(self, "{}", title.style(self.red));
|
||||
}
|
||||
}
|
||||
println!("{}", truncated_output.style(self.dimmed));
|
||||
}
|
||||
EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
|
||||
call_id,
|
||||
server,
|
||||
tool,
|
||||
arguments,
|
||||
}) => {
|
||||
// Build fully-qualified tool name: server.tool
|
||||
let fq_tool_name = format!("{server}.{tool}");
|
||||
|
||||
// Format arguments as compact JSON so they fit on one line.
|
||||
let args_str = arguments
|
||||
.as_ref()
|
||||
.map(|v: &serde_json::Value| {
|
||||
serde_json::to_string(v).unwrap_or_else(|_| v.to_string())
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
let invocation = if args_str.is_empty() {
|
||||
format!("{fq_tool_name}()")
|
||||
} else {
|
||||
format!("{fq_tool_name}({args_str})")
|
||||
};
|
||||
|
||||
self.call_id_to_tool_call.insert(
|
||||
call_id.clone(),
|
||||
McpToolCallBegin {
|
||||
invocation: invocation.clone(),
|
||||
start_time: Instant::now(),
|
||||
},
|
||||
);
|
||||
|
||||
ts_println!(
|
||||
self,
|
||||
"{} {}",
|
||||
"tool".style(self.magenta),
|
||||
invocation.style(self.bold),
|
||||
);
|
||||
}
|
||||
EventMsg::McpToolCallEnd(tool_call_end_event) => {
|
||||
let is_success = tool_call_end_event.is_success();
|
||||
let McpToolCallEndEvent { call_id, result } = tool_call_end_event;
|
||||
// Retrieve start time and invocation for duration calculation and labeling.
|
||||
let info = self.call_id_to_tool_call.remove(&call_id);
|
||||
|
||||
let (duration, invocation) = if let Some(McpToolCallBegin {
|
||||
invocation,
|
||||
start_time,
|
||||
..
|
||||
}) = info
|
||||
{
|
||||
(format!(" in {}", format_elapsed(start_time)), invocation)
|
||||
} else {
|
||||
(String::new(), format!("tool('{call_id}')"))
|
||||
};
|
||||
|
||||
let status_str = if is_success { "success" } else { "failed" };
|
||||
let title_style = if is_success { self.green } else { self.red };
|
||||
let title = format!("{invocation} {status_str}{duration}:");
|
||||
|
||||
ts_println!(self, "{}", title.style(title_style));
|
||||
|
||||
if let Ok(res) = result {
|
||||
let val: serde_json::Value = res.into();
|
||||
let pretty =
|
||||
serde_json::to_string_pretty(&val).unwrap_or_else(|_| val.to_string());
|
||||
|
||||
for line in pretty.lines().take(MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL) {
|
||||
println!("{}", line.style(self.dimmed));
|
||||
}
|
||||
}
|
||||
}
|
||||
EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
|
||||
call_id,
|
||||
auto_approved,
|
||||
changes,
|
||||
}) => {
|
||||
// Store metadata so we can calculate duration later when we
|
||||
// receive the corresponding PatchApplyEnd event.
|
||||
self.call_id_to_patch.insert(
|
||||
call_id.clone(),
|
||||
PatchApplyBegin {
|
||||
start_time: Instant::now(),
|
||||
auto_approved,
|
||||
},
|
||||
);
|
||||
|
||||
ts_println!(
|
||||
self,
|
||||
"{} auto_approved={}:",
|
||||
"apply_patch".style(self.magenta),
|
||||
auto_approved,
|
||||
);
|
||||
|
||||
// Pretty-print the patch summary with colored diff markers so
|
||||
// it’s easy to scan in the terminal output.
|
||||
for (path, change) in changes.iter() {
|
||||
match change {
|
||||
FileChange::Add { content } => {
|
||||
let header = format!(
|
||||
"{} {}",
|
||||
format_file_change(change),
|
||||
path.to_string_lossy()
|
||||
);
|
||||
println!("{}", header.style(self.magenta));
|
||||
for line in content.lines() {
|
||||
println!("{}", line.style(self.green));
|
||||
}
|
||||
}
|
||||
FileChange::Delete => {
|
||||
let header = format!(
|
||||
"{} {}",
|
||||
format_file_change(change),
|
||||
path.to_string_lossy()
|
||||
);
|
||||
println!("{}", header.style(self.magenta));
|
||||
}
|
||||
FileChange::Update {
|
||||
unified_diff,
|
||||
move_path,
|
||||
} => {
|
||||
let header = if let Some(dest) = move_path {
|
||||
format!(
|
||||
"{} {} -> {}",
|
||||
format_file_change(change),
|
||||
path.to_string_lossy(),
|
||||
dest.to_string_lossy()
|
||||
)
|
||||
} else {
|
||||
format!("{} {}", format_file_change(change), path.to_string_lossy())
|
||||
};
|
||||
println!("{}", header.style(self.magenta));
|
||||
|
||||
// Colorize diff lines. We keep file header lines
|
||||
// (--- / +++) without extra coloring so they are
|
||||
// still readable.
|
||||
for diff_line in unified_diff.lines() {
|
||||
if diff_line.starts_with('+') && !diff_line.starts_with("+++") {
|
||||
println!("{}", diff_line.style(self.green));
|
||||
} else if diff_line.starts_with('-')
|
||||
&& !diff_line.starts_with("---")
|
||||
{
|
||||
println!("{}", diff_line.style(self.red));
|
||||
} else {
|
||||
println!("{diff_line}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
EventMsg::PatchApplyEnd(PatchApplyEndEvent {
|
||||
call_id,
|
||||
stdout,
|
||||
stderr,
|
||||
success,
|
||||
}) => {
|
||||
let patch_begin = self.call_id_to_patch.remove(&call_id);
|
||||
|
||||
// Compute duration and summary label similar to exec commands.
|
||||
let (duration, label) = if let Some(PatchApplyBegin {
|
||||
start_time,
|
||||
auto_approved,
|
||||
}) = patch_begin
|
||||
{
|
||||
(
|
||||
format!(" in {}", format_elapsed(start_time)),
|
||||
format!("apply_patch(auto_approved={auto_approved})"),
|
||||
)
|
||||
} else {
|
||||
(String::new(), format!("apply_patch('{call_id}')"))
|
||||
};
|
||||
|
||||
let (exit_code, output, title_style) = if success {
|
||||
(0, stdout, self.green)
|
||||
} else {
|
||||
(1, stderr, self.red)
|
||||
};
|
||||
|
||||
let title = format!("{label} exited {exit_code}{duration}:");
|
||||
ts_println!(self, "{}", title.style(title_style));
|
||||
for line in output.lines() {
|
||||
println!("{}", line.style(self.dimmed));
|
||||
}
|
||||
}
|
||||
EventMsg::ExecApprovalRequest(_) => {
|
||||
// Should we exit?
|
||||
}
|
||||
EventMsg::ApplyPatchApprovalRequest(_) => {
|
||||
// Should we exit?
|
||||
}
|
||||
EventMsg::AgentReasoning(agent_reasoning_event) => {
|
||||
if self.show_agent_reasoning {
|
||||
ts_println!(
|
||||
self,
|
||||
"{}\n{}",
|
||||
"thinking".style(self.italic).style(self.magenta),
|
||||
agent_reasoning_event.text
|
||||
);
|
||||
}
|
||||
}
|
||||
EventMsg::SessionConfigured(session_configured_event) => {
|
||||
let SessionConfiguredEvent {
|
||||
session_id,
|
||||
model,
|
||||
history_log_id: _,
|
||||
history_entry_count: _,
|
||||
} = session_configured_event;
|
||||
|
||||
ts_println!(
|
||||
self,
|
||||
"{} {}",
|
||||
"codex session".style(self.magenta).style(self.bold),
|
||||
session_id.to_string().style(self.dimmed)
|
||||
);
|
||||
|
||||
ts_println!(self, "model: {}", model);
|
||||
println!();
|
||||
}
|
||||
EventMsg::GetHistoryEntryResponse(_) => {
|
||||
// Currently ignored in exec output.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn escape_command(command: &[String]) -> String {
|
||||
try_join(command.iter().map(|s| s.as_str())).unwrap_or_else(|_| command.join(" "))
|
||||
}
|
||||
|
||||
fn format_file_change(change: &FileChange) -> &'static str {
|
||||
match change {
|
||||
FileChange::Add { .. } => "A",
|
||||
FileChange::Delete => "D",
|
||||
FileChange::Update {
|
||||
move_path: Some(_), ..
|
||||
} => "R",
|
||||
FileChange::Update {
|
||||
move_path: None, ..
|
||||
} => "M",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,520 +0,0 @@
|
||||
use codex_common::elapsed::format_elapsed;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::protocol::AgentMessageDeltaEvent;
|
||||
use codex_core::protocol::AgentMessageEvent;
|
||||
use codex_core::protocol::AgentReasoningDeltaEvent;
|
||||
use codex_core::protocol::BackgroundEventEvent;
|
||||
use codex_core::protocol::ErrorEvent;
|
||||
use codex_core::protocol::Event;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::ExecCommandBeginEvent;
|
||||
use codex_core::protocol::ExecCommandEndEvent;
|
||||
use codex_core::protocol::FileChange;
|
||||
use codex_core::protocol::McpToolCallBeginEvent;
|
||||
use codex_core::protocol::McpToolCallEndEvent;
|
||||
use codex_core::protocol::PatchApplyBeginEvent;
|
||||
use codex_core::protocol::PatchApplyEndEvent;
|
||||
use codex_core::protocol::SessionConfiguredEvent;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use owo_colors::OwoColorize;
|
||||
use owo_colors::Style;
|
||||
use shlex::try_join;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Write;
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::event_processor::EventProcessor;
|
||||
use crate::event_processor::create_config_summary_entries;
|
||||
|
||||
/// This should be configurable. When used in CI, users may not want to impose
|
||||
/// a limit so they can see the full transcript.
|
||||
const MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL: usize = 20;
|
||||
pub(crate) struct EventProcessorWithHumanOutput {
|
||||
call_id_to_command: HashMap<String, ExecCommandBegin>,
|
||||
call_id_to_patch: HashMap<String, PatchApplyBegin>,
|
||||
|
||||
/// Tracks in-flight MCP tool calls so we can calculate duration and print
|
||||
/// a concise summary when the corresponding `McpToolCallEnd` event is
|
||||
/// received.
|
||||
call_id_to_tool_call: HashMap<String, McpToolCallBegin>,
|
||||
|
||||
// To ensure that --color=never is respected, ANSI escapes _must_ be added
|
||||
// using .style() with one of these fields. If you need a new style, add a
|
||||
// new field here.
|
||||
bold: Style,
|
||||
italic: Style,
|
||||
dimmed: Style,
|
||||
|
||||
magenta: Style,
|
||||
red: Style,
|
||||
green: Style,
|
||||
cyan: Style,
|
||||
|
||||
/// Whether to include `AgentReasoning` events in the output.
|
||||
show_agent_reasoning: bool,
|
||||
answer_started: bool,
|
||||
reasoning_started: bool,
|
||||
}
|
||||
|
||||
impl EventProcessorWithHumanOutput {
|
||||
pub(crate) fn create_with_ansi(with_ansi: bool, config: &Config) -> Self {
|
||||
let call_id_to_command = HashMap::new();
|
||||
let call_id_to_patch = HashMap::new();
|
||||
let call_id_to_tool_call = HashMap::new();
|
||||
|
||||
if with_ansi {
|
||||
Self {
|
||||
call_id_to_command,
|
||||
call_id_to_patch,
|
||||
bold: Style::new().bold(),
|
||||
italic: Style::new().italic(),
|
||||
dimmed: Style::new().dimmed(),
|
||||
magenta: Style::new().magenta(),
|
||||
red: Style::new().red(),
|
||||
green: Style::new().green(),
|
||||
cyan: Style::new().cyan(),
|
||||
call_id_to_tool_call,
|
||||
show_agent_reasoning: !config.hide_agent_reasoning,
|
||||
answer_started: false,
|
||||
reasoning_started: false,
|
||||
}
|
||||
} else {
|
||||
Self {
|
||||
call_id_to_command,
|
||||
call_id_to_patch,
|
||||
bold: Style::new(),
|
||||
italic: Style::new(),
|
||||
dimmed: Style::new(),
|
||||
magenta: Style::new(),
|
||||
red: Style::new(),
|
||||
green: Style::new(),
|
||||
cyan: Style::new(),
|
||||
call_id_to_tool_call,
|
||||
show_agent_reasoning: !config.hide_agent_reasoning,
|
||||
answer_started: false,
|
||||
reasoning_started: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ExecCommandBegin {
|
||||
command: Vec<String>,
|
||||
start_time: Instant,
|
||||
}
|
||||
|
||||
/// Metadata captured when an `McpToolCallBegin` event is received.
|
||||
struct McpToolCallBegin {
|
||||
/// Formatted invocation string, e.g. `server.tool({"city":"sf"})`.
|
||||
invocation: String,
|
||||
/// Timestamp when the call started so we can compute duration later.
|
||||
start_time: Instant,
|
||||
}
|
||||
|
||||
struct PatchApplyBegin {
|
||||
start_time: Instant,
|
||||
auto_approved: bool,
|
||||
}
|
||||
|
||||
// Timestamped println helper. The timestamp is styled with self.dimmed.
|
||||
#[macro_export]
|
||||
macro_rules! ts_println {
|
||||
($self:ident, $($arg:tt)*) => {{
|
||||
let now = chrono::Utc::now();
|
||||
let formatted = now.format("[%Y-%m-%dT%H:%M:%S]");
|
||||
print!("{} ", formatted.style($self.dimmed));
|
||||
println!($($arg)*);
|
||||
}};
|
||||
}
|
||||
|
||||
impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
/// Print a concise summary of the effective configuration that will be used
|
||||
/// for the session. This mirrors the information shown in the TUI welcome
|
||||
/// screen.
|
||||
fn print_config_summary(&mut self, config: &Config, prompt: &str) {
|
||||
const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
ts_println!(
|
||||
self,
|
||||
"OpenAI Codex v{} (research preview)\n--------",
|
||||
VERSION
|
||||
);
|
||||
|
||||
let entries = create_config_summary_entries(config);
|
||||
|
||||
for (key, value) in entries {
|
||||
println!("{} {}", format!("{key}:").style(self.bold), value);
|
||||
}
|
||||
|
||||
println!("--------");
|
||||
|
||||
// Echo the prompt that will be sent to the agent so it is visible in the
|
||||
// transcript/logs before any events come in. Note the prompt may have been
|
||||
// read from stdin, so it may not be visible in the terminal otherwise.
|
||||
ts_println!(
|
||||
self,
|
||||
"{}\n{}",
|
||||
"User instructions:".style(self.bold).style(self.cyan),
|
||||
prompt
|
||||
);
|
||||
}
|
||||
|
||||
fn process_event(&mut self, event: Event) {
|
||||
let Event { id: _, msg } = event;
|
||||
match msg {
|
||||
EventMsg::Error(ErrorEvent { message }) => {
|
||||
let prefix = "ERROR:".style(self.red);
|
||||
ts_println!(self, "{prefix} {message}");
|
||||
}
|
||||
EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => {
|
||||
ts_println!(self, "{}", message.style(self.dimmed));
|
||||
}
|
||||
EventMsg::TaskStarted | EventMsg::TaskComplete(_) => {
|
||||
// Ignore.
|
||||
}
|
||||
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
|
||||
ts_println!(self, "tokens used: {total_tokens}");
|
||||
}
|
||||
EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
|
||||
if !self.answer_started {
|
||||
ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta));
|
||||
self.answer_started = true;
|
||||
}
|
||||
print!("{delta}");
|
||||
#[allow(clippy::expect_used)]
|
||||
std::io::stdout().flush().expect("could not flush stdout");
|
||||
}
|
||||
EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
|
||||
if !self.show_agent_reasoning {
|
||||
return;
|
||||
}
|
||||
if !self.reasoning_started {
|
||||
ts_println!(
|
||||
self,
|
||||
"{}\n",
|
||||
"thinking".style(self.italic).style(self.magenta),
|
||||
);
|
||||
self.reasoning_started = true;
|
||||
}
|
||||
print!("{delta}");
|
||||
#[allow(clippy::expect_used)]
|
||||
std::io::stdout().flush().expect("could not flush stdout");
|
||||
}
|
||||
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
|
||||
// if answer_started is false, this means we haven't received any
|
||||
// delta. Thus, we need to print the message as a new answer.
|
||||
if !self.answer_started {
|
||||
ts_println!(
|
||||
self,
|
||||
"{}\n{}",
|
||||
"codex".style(self.italic).style(self.magenta),
|
||||
message,
|
||||
);
|
||||
} else {
|
||||
println!();
|
||||
self.answer_started = false;
|
||||
}
|
||||
}
|
||||
EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
|
||||
call_id,
|
||||
command,
|
||||
cwd,
|
||||
}) => {
|
||||
self.call_id_to_command.insert(
|
||||
call_id.clone(),
|
||||
ExecCommandBegin {
|
||||
command: command.clone(),
|
||||
start_time: Instant::now(),
|
||||
},
|
||||
);
|
||||
ts_println!(
|
||||
self,
|
||||
"{} {} in {}",
|
||||
"exec".style(self.magenta),
|
||||
escape_command(&command).style(self.bold),
|
||||
cwd.to_string_lossy(),
|
||||
);
|
||||
}
|
||||
EventMsg::ExecCommandEnd(ExecCommandEndEvent {
|
||||
call_id,
|
||||
stdout,
|
||||
stderr,
|
||||
exit_code,
|
||||
}) => {
|
||||
let exec_command = self.call_id_to_command.remove(&call_id);
|
||||
let (duration, call) = if let Some(ExecCommandBegin {
|
||||
command,
|
||||
start_time,
|
||||
}) = exec_command
|
||||
{
|
||||
(
|
||||
format!(" in {}", format_elapsed(start_time)),
|
||||
format!("{}", escape_command(&command).style(self.bold)),
|
||||
)
|
||||
} else {
|
||||
("".to_string(), format!("exec('{call_id}')"))
|
||||
};
|
||||
|
||||
let output = if exit_code == 0 { stdout } else { stderr };
|
||||
let truncated_output = output
|
||||
.lines()
|
||||
.take(MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL)
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
match exit_code {
|
||||
0 => {
|
||||
let title = format!("{call} succeeded{duration}:");
|
||||
ts_println!(self, "{}", title.style(self.green));
|
||||
}
|
||||
_ => {
|
||||
let title = format!("{call} exited {exit_code}{duration}:");
|
||||
ts_println!(self, "{}", title.style(self.red));
|
||||
}
|
||||
}
|
||||
println!("{}", truncated_output.style(self.dimmed));
|
||||
}
|
||||
EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
|
||||
call_id,
|
||||
server,
|
||||
tool,
|
||||
arguments,
|
||||
}) => {
|
||||
// Build fully-qualified tool name: server.tool
|
||||
let fq_tool_name = format!("{server}.{tool}");
|
||||
|
||||
// Format arguments as compact JSON so they fit on one line.
|
||||
let args_str = arguments
|
||||
.as_ref()
|
||||
.map(|v: &serde_json::Value| {
|
||||
serde_json::to_string(v).unwrap_or_else(|_| v.to_string())
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
let invocation = if args_str.is_empty() {
|
||||
format!("{fq_tool_name}()")
|
||||
} else {
|
||||
format!("{fq_tool_name}({args_str})")
|
||||
};
|
||||
|
||||
self.call_id_to_tool_call.insert(
|
||||
call_id.clone(),
|
||||
McpToolCallBegin {
|
||||
invocation: invocation.clone(),
|
||||
start_time: Instant::now(),
|
||||
},
|
||||
);
|
||||
|
||||
ts_println!(
|
||||
self,
|
||||
"{} {}",
|
||||
"tool".style(self.magenta),
|
||||
invocation.style(self.bold),
|
||||
);
|
||||
}
|
||||
EventMsg::McpToolCallEnd(tool_call_end_event) => {
|
||||
let is_success = tool_call_end_event.is_success();
|
||||
let McpToolCallEndEvent { call_id, result } = tool_call_end_event;
|
||||
// Retrieve start time and invocation for duration calculation and labeling.
|
||||
let info = self.call_id_to_tool_call.remove(&call_id);
|
||||
|
||||
let (duration, invocation) = if let Some(McpToolCallBegin {
|
||||
invocation,
|
||||
start_time,
|
||||
..
|
||||
}) = info
|
||||
{
|
||||
(format!(" in {}", format_elapsed(start_time)), invocation)
|
||||
} else {
|
||||
(String::new(), format!("tool('{call_id}')"))
|
||||
};
|
||||
|
||||
let status_str = if is_success { "success" } else { "failed" };
|
||||
let title_style = if is_success { self.green } else { self.red };
|
||||
let title = format!("{invocation} {status_str}{duration}:");
|
||||
|
||||
ts_println!(self, "{}", title.style(title_style));
|
||||
|
||||
if let Ok(res) = result {
|
||||
let val: serde_json::Value = res.into();
|
||||
let pretty =
|
||||
serde_json::to_string_pretty(&val).unwrap_or_else(|_| val.to_string());
|
||||
|
||||
for line in pretty.lines().take(MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL) {
|
||||
println!("{}", line.style(self.dimmed));
|
||||
}
|
||||
}
|
||||
}
|
||||
EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
|
||||
call_id,
|
||||
auto_approved,
|
||||
changes,
|
||||
}) => {
|
||||
// Store metadata so we can calculate duration later when we
|
||||
// receive the corresponding PatchApplyEnd event.
|
||||
self.call_id_to_patch.insert(
|
||||
call_id.clone(),
|
||||
PatchApplyBegin {
|
||||
start_time: Instant::now(),
|
||||
auto_approved,
|
||||
},
|
||||
);
|
||||
|
||||
ts_println!(
|
||||
self,
|
||||
"{} auto_approved={}:",
|
||||
"apply_patch".style(self.magenta),
|
||||
auto_approved,
|
||||
);
|
||||
|
||||
// Pretty-print the patch summary with colored diff markers so
|
||||
// it's easy to scan in the terminal output.
|
||||
for (path, change) in changes.iter() {
|
||||
match change {
|
||||
FileChange::Add { content } => {
|
||||
let header = format!(
|
||||
"{} {}",
|
||||
format_file_change(change),
|
||||
path.to_string_lossy()
|
||||
);
|
||||
println!("{}", header.style(self.magenta));
|
||||
for line in content.lines() {
|
||||
println!("{}", line.style(self.green));
|
||||
}
|
||||
}
|
||||
FileChange::Delete => {
|
||||
let header = format!(
|
||||
"{} {}",
|
||||
format_file_change(change),
|
||||
path.to_string_lossy()
|
||||
);
|
||||
println!("{}", header.style(self.magenta));
|
||||
}
|
||||
FileChange::Update {
|
||||
unified_diff,
|
||||
move_path,
|
||||
} => {
|
||||
let header = if let Some(dest) = move_path {
|
||||
format!(
|
||||
"{} {} -> {}",
|
||||
format_file_change(change),
|
||||
path.to_string_lossy(),
|
||||
dest.to_string_lossy()
|
||||
)
|
||||
} else {
|
||||
format!("{} {}", format_file_change(change), path.to_string_lossy())
|
||||
};
|
||||
println!("{}", header.style(self.magenta));
|
||||
|
||||
// Colorize diff lines. We keep file header lines
|
||||
// (--- / +++) without extra coloring so they are
|
||||
// still readable.
|
||||
for diff_line in unified_diff.lines() {
|
||||
if diff_line.starts_with('+') && !diff_line.starts_with("+++") {
|
||||
println!("{}", diff_line.style(self.green));
|
||||
} else if diff_line.starts_with('-')
|
||||
&& !diff_line.starts_with("---")
|
||||
{
|
||||
println!("{}", diff_line.style(self.red));
|
||||
} else {
|
||||
println!("{diff_line}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
EventMsg::PatchApplyEnd(PatchApplyEndEvent {
|
||||
call_id,
|
||||
stdout,
|
||||
stderr,
|
||||
success,
|
||||
}) => {
|
||||
let patch_begin = self.call_id_to_patch.remove(&call_id);
|
||||
|
||||
// Compute duration and summary label similar to exec commands.
|
||||
let (duration, label) = if let Some(PatchApplyBegin {
|
||||
start_time,
|
||||
auto_approved,
|
||||
}) = patch_begin
|
||||
{
|
||||
(
|
||||
format!(" in {}", format_elapsed(start_time)),
|
||||
format!("apply_patch(auto_approved={auto_approved})"),
|
||||
)
|
||||
} else {
|
||||
(String::new(), format!("apply_patch('{call_id}')"))
|
||||
};
|
||||
|
||||
let (exit_code, output, title_style) = if success {
|
||||
(0, stdout, self.green)
|
||||
} else {
|
||||
(1, stderr, self.red)
|
||||
};
|
||||
|
||||
let title = format!("{label} exited {exit_code}{duration}:");
|
||||
ts_println!(self, "{}", title.style(title_style));
|
||||
for line in output.lines() {
|
||||
println!("{}", line.style(self.dimmed));
|
||||
}
|
||||
}
|
||||
EventMsg::ExecApprovalRequest(_) => {
|
||||
// Should we exit?
|
||||
}
|
||||
EventMsg::ApplyPatchApprovalRequest(_) => {
|
||||
// Should we exit?
|
||||
}
|
||||
EventMsg::AgentReasoning(agent_reasoning_event) => {
|
||||
if self.show_agent_reasoning {
|
||||
if !self.reasoning_started {
|
||||
ts_println!(
|
||||
self,
|
||||
"{}\n{}",
|
||||
"codex".style(self.italic).style(self.magenta),
|
||||
agent_reasoning_event.text,
|
||||
);
|
||||
} else {
|
||||
println!();
|
||||
self.reasoning_started = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
EventMsg::SessionConfigured(session_configured_event) => {
|
||||
let SessionConfiguredEvent {
|
||||
session_id,
|
||||
model,
|
||||
history_log_id: _,
|
||||
history_entry_count: _,
|
||||
} = session_configured_event;
|
||||
|
||||
ts_println!(
|
||||
self,
|
||||
"{} {}",
|
||||
"codex session".style(self.magenta).style(self.bold),
|
||||
session_id.to_string().style(self.dimmed)
|
||||
);
|
||||
|
||||
ts_println!(self, "model: {}", model);
|
||||
println!();
|
||||
}
|
||||
EventMsg::GetHistoryEntryResponse(_) => {
|
||||
// Currently ignored in exec output.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn escape_command(command: &[String]) -> String {
|
||||
try_join(command.iter().map(|s| s.as_str())).unwrap_or_else(|_| command.join(" "))
|
||||
}
|
||||
|
||||
fn format_file_change(change: &FileChange) -> &'static str {
|
||||
match change {
|
||||
FileChange::Add { .. } => "A",
|
||||
FileChange::Delete => "D",
|
||||
FileChange::Update {
|
||||
move_path: Some(_), ..
|
||||
} => "R",
|
||||
FileChange::Update {
|
||||
move_path: None, ..
|
||||
} => "M",
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use codex_core::config::Config;
|
||||
use codex_core::protocol::Event;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use serde_json::json;
|
||||
|
||||
use crate::event_processor::EventProcessor;
|
||||
use crate::event_processor::create_config_summary_entries;
|
||||
|
||||
pub(crate) struct EventProcessorWithJsonOutput;
|
||||
|
||||
impl EventProcessorWithJsonOutput {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl EventProcessor for EventProcessorWithJsonOutput {
|
||||
fn print_config_summary(&mut self, config: &Config, prompt: &str) {
|
||||
let entries = create_config_summary_entries(config)
|
||||
.into_iter()
|
||||
.map(|(key, value)| (key.to_string(), value))
|
||||
.collect::<HashMap<String, String>>();
|
||||
#[allow(clippy::expect_used)]
|
||||
let config_json =
|
||||
serde_json::to_string(&entries).expect("Failed to serialize config summary to JSON");
|
||||
println!("{config_json}");
|
||||
|
||||
let prompt_json = json!({
|
||||
"prompt": prompt,
|
||||
});
|
||||
println!("{prompt_json}");
|
||||
}
|
||||
|
||||
fn process_event(&mut self, event: Event) {
|
||||
match event.msg {
|
||||
EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoningDelta(_) => {
|
||||
// Suppress streaming events in JSON mode.
|
||||
}
|
||||
_ => {
|
||||
if let Ok(line) = serde_json::to_string(&event) {
|
||||
println!("{line}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
mod cli;
|
||||
mod event_processor;
|
||||
mod event_processor_with_human_output;
|
||||
mod event_processor_with_json_output;
|
||||
|
||||
use std::io::IsTerminal;
|
||||
use std::io::Read;
|
||||
@@ -21,15 +19,12 @@ use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::TaskCompleteEvent;
|
||||
use codex_core::util::is_inside_git_repo;
|
||||
use event_processor_with_human_output::EventProcessorWithHumanOutput;
|
||||
use event_processor_with_json_output::EventProcessorWithJsonOutput;
|
||||
use event_processor::EventProcessor;
|
||||
use tracing::debug;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
use crate::event_processor::EventProcessor;
|
||||
|
||||
pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()> {
|
||||
let Cli {
|
||||
images,
|
||||
@@ -41,7 +36,6 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
skip_git_repo_check,
|
||||
color,
|
||||
last_message_file,
|
||||
json: json_mode,
|
||||
sandbox_mode: sandbox_mode_cli_arg,
|
||||
prompt,
|
||||
config_overrides,
|
||||
@@ -121,15 +115,8 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
};
|
||||
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides, overrides)?;
|
||||
let mut event_processor: Box<dyn EventProcessor> = if json_mode {
|
||||
Box::new(EventProcessorWithJsonOutput::new())
|
||||
} else {
|
||||
Box::new(EventProcessorWithHumanOutput::create_with_ansi(
|
||||
stdout_with_ansi,
|
||||
&config,
|
||||
))
|
||||
};
|
||||
|
||||
let mut event_processor =
|
||||
EventProcessor::create_with_ansi(stdout_with_ansi, !config.hide_agent_reasoning);
|
||||
// Print the effective configuration and prompt so users can see what Codex
|
||||
// is using.
|
||||
event_processor.print_config_summary(&config, &prompt);
|
||||
|
||||
@@ -23,10 +23,3 @@ file-search *args:
|
||||
# format code
|
||||
fmt:
|
||||
cargo fmt -- --config imports_granularity=Item
|
||||
|
||||
fix:
|
||||
cargo clippy --fix --all-features --tests --allow-dirty
|
||||
|
||||
install:
|
||||
rustup show active-toolchain
|
||||
cargo fetch
|
||||
|
||||
@@ -5,31 +5,22 @@
|
||||
use codex_core::codex_wrapper::init_codex;
|
||||
use codex_core::config::Config as CodexConfig;
|
||||
use codex_core::protocol::AgentMessageEvent;
|
||||
use codex_core::protocol::ApplyPatchApprovalRequestEvent;
|
||||
use codex_core::protocol::Event;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::ExecApprovalRequestEvent;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::ReviewDecision;
|
||||
use codex_core::protocol::Submission;
|
||||
use codex_core::protocol::TaskCompleteEvent;
|
||||
use mcp_types::CallToolResult;
|
||||
use mcp_types::CallToolResultContent;
|
||||
use mcp_types::JSONRPC_VERSION;
|
||||
use mcp_types::JSONRPCMessage;
|
||||
use mcp_types::JSONRPCNotification as McpNotification;
|
||||
use mcp_types::JSONRPCResponse;
|
||||
use mcp_types::RequestId;
|
||||
use mcp_types::TextContent;
|
||||
use tokio::sync::mpsc::Receiver;
|
||||
use tokio::sync::mpsc::Sender;
|
||||
|
||||
/// Convert a Codex [`Event`] to an MCP notification.
|
||||
///
|
||||
/// NOTE: This helper is kept local because we only ever emit notifications
|
||||
/// from within this worker. The implementation is intentionally infallible –
|
||||
/// serialization failures are treated as bugs.
|
||||
fn codex_event_to_notification(event: &Event) -> JSONRPCMessage {
|
||||
#[expect(clippy::expect_used)]
|
||||
JSONRPCMessage::Notification(mcp_types::JSONRPCNotification {
|
||||
@@ -48,7 +39,6 @@ pub async fn run_codex_tool_session(
|
||||
initial_prompt: String,
|
||||
config: CodexConfig,
|
||||
outgoing: Sender<JSONRPCMessage>,
|
||||
mut approval_rx: Receiver<ReviewDecision>,
|
||||
) {
|
||||
let (codex, first_event, _ctrl_c) = match init_codex(config).await {
|
||||
Ok(res) => res,
|
||||
@@ -86,7 +76,7 @@ pub async fn run_codex_tool_session(
|
||||
};
|
||||
|
||||
let submission = Submission {
|
||||
id: sub_id.clone(),
|
||||
id: sub_id,
|
||||
op: Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: initial_prompt.clone(),
|
||||
@@ -100,10 +90,8 @@ pub async fn run_codex_tool_session(
|
||||
|
||||
let mut last_agent_message: Option<String> = None;
|
||||
|
||||
// Stream events until the Codex task completes. When Codex asks for
|
||||
// approval we pause, wait for a decision from the MCP client (delivered
|
||||
// over `approval_rx` via `codex/approval`), forward the decision, and
|
||||
// continue the session.
|
||||
// Stream events until the task needs to pause for user interaction or
|
||||
// completes.
|
||||
loop {
|
||||
match codex.next_event().await {
|
||||
Ok(event) => {
|
||||
@@ -113,77 +101,45 @@ pub async fn run_codex_tool_session(
|
||||
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
|
||||
last_agent_message = Some(message.clone());
|
||||
}
|
||||
EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent {
|
||||
command,
|
||||
cwd,
|
||||
reason,
|
||||
}) => {
|
||||
// Dispatch an informational notification so the client can surface a UI.
|
||||
// We intentionally send a *notification* rather than a *request* because most
|
||||
// generic MCP clients (including the Inspector) do not implement a handler for
|
||||
// custom server->client requests and will otherwise respond with -32601.
|
||||
let params = serde_json::json!({
|
||||
"id": sub_id,
|
||||
"kind": "exec",
|
||||
"command": command,
|
||||
"cwd": cwd,
|
||||
"reason": reason,
|
||||
});
|
||||
EventMsg::ExecApprovalRequest(_) => {
|
||||
let result = CallToolResult {
|
||||
content: vec![CallToolResultContent::TextContent(TextContent {
|
||||
r#type: "text".to_string(),
|
||||
text: "EXEC_APPROVAL_REQUIRED".to_string(),
|
||||
annotations: None,
|
||||
})],
|
||||
is_error: None,
|
||||
};
|
||||
let _ = outgoing
|
||||
.send(JSONRPCMessage::Notification(McpNotification {
|
||||
.send(JSONRPCMessage::Response(JSONRPCResponse {
|
||||
jsonrpc: JSONRPC_VERSION.into(),
|
||||
method: "codex/approval".into(),
|
||||
params: Some(params),
|
||||
id: id.clone(),
|
||||
result: result.into(),
|
||||
}))
|
||||
.await;
|
||||
|
||||
// Wait for the MCP client to respond with an approval decision.
|
||||
let decision = approval_rx.recv().await.unwrap_or_default();
|
||||
// Forward to Codex.
|
||||
if let Err(e) = codex
|
||||
.submit(Op::ExecApproval {
|
||||
id: event.id.clone(),
|
||||
decision,
|
||||
})
|
||||
.await
|
||||
{
|
||||
tracing::error!("failed to submit ExecApproval op: {e}");
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
|
||||
reason,
|
||||
grant_root,
|
||||
..
|
||||
}) => {
|
||||
let params = serde_json::json!({
|
||||
"id": sub_id,
|
||||
"kind": "patch",
|
||||
"reason": reason,
|
||||
"grant_root": grant_root,
|
||||
});
|
||||
EventMsg::ApplyPatchApprovalRequest(_) => {
|
||||
let result = CallToolResult {
|
||||
content: vec![CallToolResultContent::TextContent(TextContent {
|
||||
r#type: "text".to_string(),
|
||||
text: "PATCH_APPROVAL_REQUIRED".to_string(),
|
||||
annotations: None,
|
||||
})],
|
||||
is_error: None,
|
||||
};
|
||||
let _ = outgoing
|
||||
.send(JSONRPCMessage::Notification(McpNotification {
|
||||
.send(JSONRPCMessage::Response(JSONRPCResponse {
|
||||
jsonrpc: JSONRPC_VERSION.into(),
|
||||
method: "codex/approval".into(),
|
||||
params: Some(params),
|
||||
id: id.clone(),
|
||||
result: result.into(),
|
||||
}))
|
||||
.await;
|
||||
|
||||
let decision = approval_rx.recv().await.unwrap_or_default();
|
||||
if let Err(e) = codex
|
||||
.submit(Op::PatchApproval {
|
||||
id: event.id.clone(),
|
||||
decision,
|
||||
})
|
||||
.await
|
||||
{
|
||||
tracing::error!("failed to submit PatchApproval op: {e}");
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
EventMsg::TaskComplete(TaskCompleteEvent { .. }) => {
|
||||
// Session finished – send the final MCP response.
|
||||
EventMsg::TaskComplete(TaskCompleteEvent {
|
||||
last_agent_message: _,
|
||||
}) => {
|
||||
let result = if let Some(msg) = last_agent_message {
|
||||
CallToolResult {
|
||||
content: vec![CallToolResultContent::TextContent(TextContent {
|
||||
@@ -213,11 +169,9 @@ pub async fn run_codex_tool_session(
|
||||
break;
|
||||
}
|
||||
EventMsg::SessionConfigured(_) => {
|
||||
// Already surfaced above; ignore duplicates.
|
||||
tracing::error!("unexpected SessionConfigured event");
|
||||
}
|
||||
EventMsg::AgentMessageDelta(_)
|
||||
| EventMsg::AgentReasoningDelta(_)
|
||||
| EventMsg::Error(_)
|
||||
EventMsg::Error(_)
|
||||
| EventMsg::TaskStarted
|
||||
| EventMsg::TokenCount(_)
|
||||
| EventMsg::AgentReasoning(_)
|
||||
@@ -229,7 +183,12 @@ pub async fn run_codex_tool_session(
|
||||
| EventMsg::PatchApplyBegin(_)
|
||||
| EventMsg::PatchApplyEnd(_)
|
||||
| EventMsg::GetHistoryEntryResponse(_) => {
|
||||
// No special handling.
|
||||
// For now, we do not do anything extra for these
|
||||
// events. Note that
|
||||
// send(codex_event_to_notification(&event)) above has
|
||||
// already dispatched these events as notifications,
|
||||
// though we may want to do give different treatment to
|
||||
// individual events in the future.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,9 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use crate::codex_tool_config::CodexToolCallParam;
|
||||
use crate::codex_tool_config::create_tool_for_codex_tool_call_param;
|
||||
use crate::codex_tool_runner::run_codex_tool_session;
|
||||
|
||||
use codex_core::config::Config as CodexConfig;
|
||||
use codex_core::protocol::ReviewDecision;
|
||||
use mcp_types::CallToolRequestParams;
|
||||
use mcp_types::CallToolResult;
|
||||
use mcp_types::CallToolResultContent;
|
||||
@@ -32,19 +27,10 @@ use serde_json::json;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task;
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct ApprovalParams {
|
||||
id: String,
|
||||
decision: ReviewDecision,
|
||||
}
|
||||
|
||||
pub(crate) struct MessageProcessor {
|
||||
outgoing: mpsc::Sender<JSONRPCMessage>,
|
||||
initialized: bool,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
/// Map from Codex submission id (stringified MCP request id) -> channel used
|
||||
/// to forward approval decisions to the corresponding running Codex tool session.
|
||||
pending_approval_senders: Arc<Mutex<HashMap<String, mpsc::Sender<ReviewDecision>>>>,
|
||||
}
|
||||
|
||||
impl MessageProcessor {
|
||||
@@ -58,7 +44,6 @@ impl MessageProcessor {
|
||||
outgoing,
|
||||
initialized: false,
|
||||
codex_linux_sandbox_exe,
|
||||
pending_approval_senders: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,20 +51,6 @@ impl MessageProcessor {
|
||||
// Hold on to the ID so we can respond.
|
||||
let request_id = request.id.clone();
|
||||
|
||||
if request.method == "codex/approval" {
|
||||
let params_json = request.params.unwrap_or(serde_json::Value::Null);
|
||||
let params: ApprovalParams = match serde_json::from_value(params_json) {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
tracing::warn!("Failed to parse approval params: {e}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
self.handle_codex_approval(request_id, params);
|
||||
return;
|
||||
}
|
||||
|
||||
let client_request = match ClientRequest::try_from(request) {
|
||||
Ok(client_request) => client_request,
|
||||
Err(e) => {
|
||||
@@ -421,31 +392,15 @@ impl MessageProcessor {
|
||||
}
|
||||
};
|
||||
|
||||
// Create a channel to receive approval decisions for this Codex session.
|
||||
// We stringify the MCP request id to use as the Codex submission id.
|
||||
let sub_id_str = match &id {
|
||||
RequestId::String(s) => s.clone(),
|
||||
RequestId::Integer(n) => n.to_string(),
|
||||
};
|
||||
let (approval_tx, approval_rx) = mpsc::channel::<ReviewDecision>(8);
|
||||
{
|
||||
let mut map = self.pending_approval_senders.lock().unwrap();
|
||||
map.insert(sub_id_str.clone(), approval_tx);
|
||||
}
|
||||
|
||||
// Clone outgoing sender to move into async task.
|
||||
let outgoing = self.outgoing.clone();
|
||||
let approval_map = self.pending_approval_senders.clone();
|
||||
|
||||
// Spawn an async task to handle the Codex session so that we do not
|
||||
// block the synchronous message-processing loop.
|
||||
task::spawn(async move {
|
||||
// Run the Codex session and stream events back to the client.
|
||||
run_codex_tool_session(id, initial_prompt, config, outgoing, approval_rx).await;
|
||||
|
||||
// Session finished; drop the sender entry so future approvals are ignored.
|
||||
let mut map = approval_map.lock().unwrap();
|
||||
map.remove(&sub_id_str);
|
||||
crate::codex_tool_runner::run_codex_tool_session(id, initial_prompt, config, outgoing)
|
||||
.await;
|
||||
});
|
||||
}
|
||||
|
||||
@@ -463,43 +418,6 @@ impl MessageProcessor {
|
||||
tracing::info!("completion/complete -> params: {:?}", params);
|
||||
}
|
||||
|
||||
fn handle_codex_approval(&self, id: RequestId, params: ApprovalParams) {
|
||||
tracing::info!("codex/approval -> params: {:?}", params);
|
||||
|
||||
// Forward the decision to the running Codex session (if any).
|
||||
let mut delivered = false;
|
||||
{
|
||||
let mut map = self.pending_approval_senders.lock().unwrap();
|
||||
if let Some(tx) = map.get_mut(¶ms.id) {
|
||||
match tx.try_send(params.decision) {
|
||||
Ok(()) => {
|
||||
delivered = true;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("failed to forward approval to session {}: {e}", params.id);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tracing::warn!(
|
||||
"no pending Codex session found for approval id {}",
|
||||
params.id
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Ack the JSON-RPC request regardless of whether we were able to deliver the decision.
|
||||
// Include a boolean in the result so clients can detect delivery failures if desired.
|
||||
let response = JSONRPCMessage::Response(JSONRPCResponse {
|
||||
jsonrpc: JSONRPC_VERSION.into(),
|
||||
id,
|
||||
result: serde_json::json!({ "delivered": delivered }),
|
||||
});
|
||||
|
||||
if let Err(e) = self.outgoing.try_send(response) {
|
||||
tracing::error!("Failed to send approval response: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// Notification handlers
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
[toolchain]
|
||||
channel = "1.88.0"
|
||||
components = [ "clippy", "rustfmt", "rust-src"]
|
||||
@@ -40,6 +40,7 @@ ratatui = { version = "0.29.0", features = [
|
||||
] }
|
||||
ratatui-image = "8.0.0"
|
||||
regex-lite = "0.1"
|
||||
reqwest = { version = "0.12", features = ["json"] }
|
||||
serde_json = { version = "1", features = ["preserve_order"] }
|
||||
shlex = "1.3.0"
|
||||
strum = "0.27.1"
|
||||
@@ -59,6 +60,7 @@ tui-markdown = "0.3.3"
|
||||
tui-textarea = "0.7.0"
|
||||
unicode-segmentation = "1.12.0"
|
||||
uuid = "1"
|
||||
time = { version = "0.3", features = ["formatting"] }
|
||||
|
||||
[dev-dependencies]
|
||||
insta = "1.43.1"
|
||||
|
||||
@@ -18,16 +18,8 @@ use crossterm::event::KeyEvent;
|
||||
use crossterm::event::MouseEvent;
|
||||
use crossterm::event::MouseEventKind;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
/// Time window for debouncing redraw requests.
|
||||
const REDRAW_DEBOUNCE: Duration = Duration::from_millis(10);
|
||||
|
||||
/// Top-level application state: which full-screen view is currently active.
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
@@ -54,9 +46,6 @@ pub(crate) struct App<'a> {
|
||||
|
||||
file_search: FileSearchManager,
|
||||
|
||||
/// True when a redraw has been scheduled but not yet executed.
|
||||
pending_redraw: Arc<AtomicBool>,
|
||||
|
||||
/// Stored parameters needed to instantiate the ChatWidget later, e.g.,
|
||||
/// after dismissing the Git-repo warning.
|
||||
chat_args: Option<ChatWidgetArgs>,
|
||||
@@ -71,7 +60,7 @@ struct ChatWidgetArgs {
|
||||
initial_images: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
impl App<'_> {
|
||||
impl<'a> App<'a> {
|
||||
pub(crate) fn new(
|
||||
config: Config,
|
||||
initial_prompt: Option<String>,
|
||||
@@ -81,7 +70,6 @@ impl App<'_> {
|
||||
) -> Self {
|
||||
let (app_event_tx, app_event_rx) = channel();
|
||||
let app_event_tx = AppEventSender::new(app_event_tx);
|
||||
let pending_redraw = Arc::new(AtomicBool::new(false));
|
||||
let scroll_event_helper = ScrollEventHelper::new(app_event_tx.clone());
|
||||
|
||||
// Spawn a dedicated thread for reading the crossterm event loop and
|
||||
@@ -95,7 +83,7 @@ impl App<'_> {
|
||||
app_event_tx.send(AppEvent::KeyEvent(key_event));
|
||||
}
|
||||
crossterm::event::Event::Resize(_, _) => {
|
||||
app_event_tx.send(AppEvent::RequestRedraw);
|
||||
app_event_tx.send(AppEvent::Redraw);
|
||||
}
|
||||
crossterm::event::Event::Mouse(MouseEvent {
|
||||
kind: MouseEventKind::ScrollUp,
|
||||
@@ -164,7 +152,6 @@ impl App<'_> {
|
||||
app_state,
|
||||
config,
|
||||
file_search,
|
||||
pending_redraw,
|
||||
chat_args,
|
||||
}
|
||||
}
|
||||
@@ -175,28 +162,6 @@ impl App<'_> {
|
||||
self.app_event_tx.clone()
|
||||
}
|
||||
|
||||
/// Schedule a redraw if one is not already pending.
|
||||
#[allow(clippy::unwrap_used)]
|
||||
fn schedule_redraw(&self) {
|
||||
// Attempt to set the flag to `true`. If it was already `true`, another
|
||||
// redraw is already pending so we can return early.
|
||||
if self
|
||||
.pending_redraw
|
||||
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
|
||||
.is_err()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let tx = self.app_event_tx.clone();
|
||||
let pending_redraw = self.pending_redraw.clone();
|
||||
thread::spawn(move || {
|
||||
thread::sleep(REDRAW_DEBOUNCE);
|
||||
tx.send(AppEvent::Redraw);
|
||||
pending_redraw.store(false, Ordering::SeqCst);
|
||||
});
|
||||
}
|
||||
|
||||
pub(crate) fn run(
|
||||
&mut self,
|
||||
terminal: &mut tui::Tui,
|
||||
@@ -204,13 +169,10 @@ impl App<'_> {
|
||||
) -> Result<()> {
|
||||
// Insert an event to trigger the first render.
|
||||
let app_event_tx = self.app_event_tx.clone();
|
||||
app_event_tx.send(AppEvent::RequestRedraw);
|
||||
app_event_tx.send(AppEvent::Redraw);
|
||||
|
||||
while let Ok(event) = self.app_event_rx.recv() {
|
||||
match event {
|
||||
AppEvent::RequestRedraw => {
|
||||
self.schedule_redraw();
|
||||
}
|
||||
AppEvent::Redraw => {
|
||||
self.draw_next_frame(terminal)?;
|
||||
}
|
||||
@@ -237,21 +199,7 @@ impl App<'_> {
|
||||
modifiers: crossterm::event::KeyModifiers::CONTROL,
|
||||
..
|
||||
} => {
|
||||
match &mut self.app_state {
|
||||
AppState::Chat { widget } => {
|
||||
if widget.composer_is_empty() {
|
||||
self.app_event_tx.send(AppEvent::ExitRequest);
|
||||
} else {
|
||||
// Treat Ctrl+D as a normal key event when the composer
|
||||
// is not empty so that it doesn't quit the application
|
||||
// prematurely.
|
||||
self.dispatch_key_event(key_event);
|
||||
}
|
||||
}
|
||||
AppState::Login { .. } | AppState::GitWarning { .. } => {
|
||||
self.app_event_tx.send(AppEvent::ExitRequest);
|
||||
}
|
||||
}
|
||||
self.app_event_tx.send(AppEvent::ExitRequest);
|
||||
}
|
||||
_ => {
|
||||
self.dispatch_key_event(key_event);
|
||||
@@ -287,7 +235,7 @@ impl App<'_> {
|
||||
Vec::new(),
|
||||
));
|
||||
self.app_state = AppState::Chat { widget: new_widget };
|
||||
self.app_event_tx.send(AppEvent::RequestRedraw);
|
||||
self.app_event_tx.send(AppEvent::Redraw);
|
||||
}
|
||||
SlashCommand::ToggleMouseMode => {
|
||||
if let Err(e) = mouse_capture.toggle() {
|
||||
@@ -318,6 +266,11 @@ impl App<'_> {
|
||||
widget.add_diff_output(text);
|
||||
}
|
||||
}
|
||||
SlashCommand::SecurityReview => {
|
||||
if let AppState::Chat { widget } = &mut self.app_state {
|
||||
widget.start_security_review_with_defaults();
|
||||
}
|
||||
}
|
||||
},
|
||||
AppEvent::StartFileSearch(query) => {
|
||||
self.file_search.on_user_query(query);
|
||||
@@ -327,6 +280,11 @@ impl App<'_> {
|
||||
widget.apply_file_search_result(query, matches);
|
||||
}
|
||||
}
|
||||
AppEvent::SecurityReviewFinished { mode, outcome } => {
|
||||
if let AppState::Chat { widget } = &mut self.app_state {
|
||||
widget.handle_security_review_finished(mode, outcome);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
terminal.clear()?;
|
||||
@@ -335,8 +293,6 @@ impl App<'_> {
|
||||
}
|
||||
|
||||
fn draw_next_frame(&mut self, terminal: &mut tui::Tui) -> Result<()> {
|
||||
// TODO: add a throttle to avoid redrawing too often
|
||||
|
||||
match &mut self.app_state {
|
||||
AppState::Chat { widget } => {
|
||||
terminal.draw(|frame| frame.render_widget_ref(&**widget, frame.area()))?;
|
||||
@@ -374,7 +330,7 @@ impl App<'_> {
|
||||
args.initial_images,
|
||||
));
|
||||
self.app_state = AppState::Chat { widget };
|
||||
self.app_event_tx.send(AppEvent::RequestRedraw);
|
||||
self.app_event_tx.send(AppEvent::Redraw);
|
||||
}
|
||||
GitWarningOutcome::Quit => {
|
||||
self.app_event_tx.send(AppEvent::ExitRequest);
|
||||
|
||||
@@ -2,16 +2,13 @@ use codex_core::protocol::Event;
|
||||
use codex_file_search::FileMatch;
|
||||
use crossterm::event::KeyEvent;
|
||||
|
||||
use crate::security_review::{SecurityReviewFailure, SecurityReviewMode, SecurityReviewResult};
|
||||
use crate::slash_command::SlashCommand;
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub(crate) enum AppEvent {
|
||||
CodexEvent(Event),
|
||||
|
||||
/// Request a redraw which will be debounced by the [`App`].
|
||||
RequestRedraw,
|
||||
|
||||
/// Actually draw the next frame.
|
||||
Redraw,
|
||||
|
||||
KeyEvent(KeyEvent),
|
||||
@@ -49,4 +46,10 @@ pub(crate) enum AppEvent {
|
||||
query: String,
|
||||
matches: Vec<FileMatch>,
|
||||
},
|
||||
|
||||
/// Completion event for a `/secreview` invocation.
|
||||
SecurityReviewFinished {
|
||||
mode: SecurityReviewMode,
|
||||
outcome: Result<SecurityReviewResult, SecurityReviewFailure>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -76,11 +76,6 @@ impl ChatComposer<'_> {
|
||||
this
|
||||
}
|
||||
|
||||
/// Returns true if the composer currently contains no user input.
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
self.textarea.is_empty()
|
||||
}
|
||||
|
||||
/// Update the cached *context-left* percentage and refresh the placeholder
|
||||
/// text. The UI relies on the placeholder to convey the remaining
|
||||
/// context when the composer is empty.
|
||||
|
||||
@@ -72,7 +72,8 @@ impl ChatComposerHistory {
|
||||
return false;
|
||||
}
|
||||
|
||||
if textarea.is_empty() {
|
||||
let lines = textarea.lines();
|
||||
if lines.len() == 1 && lines[0].is_empty() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -84,7 +85,6 @@ impl ChatComposerHistory {
|
||||
return false;
|
||||
}
|
||||
|
||||
let lines = textarea.lines();
|
||||
matches!(&self.last_history_text, Some(prev) if prev == &lines.join("\n"))
|
||||
}
|
||||
|
||||
|
||||
@@ -162,10 +162,6 @@ impl BottomPane<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn composer_is_empty(&self) -> bool {
|
||||
self.composer.is_empty()
|
||||
}
|
||||
|
||||
pub(crate) fn is_task_running(&self) -> bool {
|
||||
self.is_task_running
|
||||
}
|
||||
@@ -212,7 +208,7 @@ impl BottomPane<'_> {
|
||||
}
|
||||
|
||||
pub(crate) fn request_redraw(&self) {
|
||||
self.app_event_tx.send(AppEvent::RequestRedraw)
|
||||
self.app_event_tx.send(AppEvent::Redraw)
|
||||
}
|
||||
|
||||
/// Returns true when a popup inside the composer is visible.
|
||||
|
||||
@@ -24,7 +24,7 @@ impl StatusIndicatorView {
|
||||
}
|
||||
}
|
||||
|
||||
impl BottomPaneView<'_> for StatusIndicatorView {
|
||||
impl<'a> BottomPaneView<'a> for StatusIndicatorView {
|
||||
fn update_status_text(&mut self, text: String) -> ConditionalUpdate {
|
||||
self.update_text(text);
|
||||
ConditionalUpdate::NeedsRedraw
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
use std::path::PathBuf;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_core::codex_wrapper::init_codex;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::protocol::AgentMessageDeltaEvent;
|
||||
use codex_core::protocol::AgentMessageEvent;
|
||||
use codex_core::protocol::AgentReasoningDeltaEvent;
|
||||
use codex_core::protocol::AgentReasoningEvent;
|
||||
use codex_core::protocol::ApplyPatchApprovalRequestEvent;
|
||||
use codex_core::protocol::ErrorEvent;
|
||||
@@ -31,6 +29,7 @@ use ratatui::widgets::Widget;
|
||||
use ratatui::widgets::WidgetRef;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio::sync::mpsc::unbounded_channel;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
use crate::app_event::AppEvent;
|
||||
use crate::app_event_sender::AppEventSender;
|
||||
@@ -40,7 +39,10 @@ use crate::bottom_pane::InputResult;
|
||||
use crate::conversation_history_widget::ConversationHistoryWidget;
|
||||
use crate::history_cell::PatchEventType;
|
||||
use crate::user_approval_widget::ApprovalRequest;
|
||||
use crate::security_review::{run_security_review, SecurityReviewFailure, SecurityReviewMode, SecurityReviewRequest, SecurityReviewResult};
|
||||
use codex_file_search::FileMatch;
|
||||
use path_clean::PathClean;
|
||||
use shlex;
|
||||
|
||||
pub(crate) struct ChatWidget<'a> {
|
||||
app_event_tx: AppEventSender,
|
||||
@@ -51,8 +53,8 @@ pub(crate) struct ChatWidget<'a> {
|
||||
config: Config,
|
||||
initial_user_message: Option<UserMessage>,
|
||||
token_usage: TokenUsage,
|
||||
reasoning_buffer: String,
|
||||
answer_buffer: String,
|
||||
security_review_handle: Option<JoinHandle<()>>,
|
||||
active_security_review_mode: Option<SecurityReviewMode>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
@@ -83,6 +85,15 @@ fn create_initial_user_message(text: String, image_paths: Vec<PathBuf>) -> Optio
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct ParsedSecReviewCommand {
|
||||
mode: SecurityReviewMode,
|
||||
include_paths: Vec<String>,
|
||||
output_path: Option<String>,
|
||||
repo_path: Option<String>,
|
||||
model_name: Option<String>,
|
||||
}
|
||||
|
||||
impl ChatWidget<'_> {
|
||||
pub(crate) fn new(
|
||||
config: Config,
|
||||
@@ -139,8 +150,8 @@ impl ChatWidget<'_> {
|
||||
initial_images,
|
||||
),
|
||||
token_usage: TokenUsage::default(),
|
||||
reasoning_buffer: String::new(),
|
||||
answer_buffer: String::new(),
|
||||
security_review_handle: None,
|
||||
active_security_review_mode: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,6 +199,11 @@ impl ChatWidget<'_> {
|
||||
|
||||
fn submit_user_message(&mut self, user_message: UserMessage) {
|
||||
let UserMessage { text, image_paths } = user_message;
|
||||
|
||||
if self.try_handle_slash_command(&text) {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut items: Vec<InputItem> = Vec::new();
|
||||
|
||||
if !text.is_empty() {
|
||||
@@ -224,6 +240,240 @@ impl ChatWidget<'_> {
|
||||
self.conversation_history.scroll_to_bottom();
|
||||
}
|
||||
|
||||
fn try_handle_slash_command(&mut self, text: &str) -> bool {
|
||||
let trimmed = text.trim();
|
||||
if trimmed.starts_with("/secreview") {
|
||||
match parse_security_review_command(trimmed) {
|
||||
Ok(command) => {
|
||||
if let Err(err) = self.launch_security_review(command) {
|
||||
self.report_security_review_error(err);
|
||||
}
|
||||
}
|
||||
Err(err) => self.report_security_review_error(err),
|
||||
}
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn start_security_review_with_defaults(&mut self) {
|
||||
let command = ParsedSecReviewCommand::default();
|
||||
if let Err(err) = self.launch_security_review(command) {
|
||||
self.report_security_review_error(err);
|
||||
}
|
||||
}
|
||||
|
||||
fn launch_security_review(&mut self, command: ParsedSecReviewCommand) -> Result<(), String> {
|
||||
let repo_candidate = if let Some(repo_override) = command.repo_path.as_ref() {
|
||||
let candidate = Path::new(repo_override);
|
||||
if candidate.is_absolute() {
|
||||
candidate.to_path_buf()
|
||||
} else {
|
||||
self.config.cwd.join(candidate)
|
||||
}
|
||||
} else {
|
||||
self.config.cwd.clone()
|
||||
}
|
||||
.clean();
|
||||
|
||||
let repo_path = match repo_candidate.canonicalize() {
|
||||
Ok(path) => path,
|
||||
Err(_) => repo_candidate.clone(),
|
||||
};
|
||||
|
||||
if !repo_path.exists() {
|
||||
return Err(format!(
|
||||
"Repository path '{}' does not exist.",
|
||||
repo_path.display()
|
||||
));
|
||||
}
|
||||
if !repo_path.is_dir() {
|
||||
return Err(format!(
|
||||
"Repository path '{}' is not a directory.",
|
||||
repo_path.display()
|
||||
));
|
||||
}
|
||||
|
||||
let mut resolved_paths: Vec<PathBuf> = Vec::new();
|
||||
let mut display_paths: Vec<String> = Vec::new();
|
||||
|
||||
for include in &command.include_paths {
|
||||
let candidate = resolve_path(&repo_path, include);
|
||||
let canonical = match candidate.canonicalize() {
|
||||
Ok(path) => path,
|
||||
Err(_) => candidate.clone(),
|
||||
};
|
||||
|
||||
if !canonical.exists() {
|
||||
return Err(format!("Path '{}' does not exist.", canonical.display()));
|
||||
}
|
||||
if !canonical.starts_with(&repo_path) {
|
||||
return Err(format!(
|
||||
"Path '{}' is outside the repository root '{}'.",
|
||||
canonical.display(),
|
||||
repo_path.display()
|
||||
));
|
||||
}
|
||||
|
||||
let relative = canonical
|
||||
.strip_prefix(&repo_path)
|
||||
.unwrap_or(&canonical)
|
||||
.display()
|
||||
.to_string();
|
||||
display_paths.push(relative);
|
||||
resolved_paths.push(canonical);
|
||||
}
|
||||
|
||||
let output_root = if let Some(output_override) = command.output_path.as_ref() {
|
||||
let candidate = Path::new(output_override);
|
||||
if candidate.is_absolute() {
|
||||
candidate.to_path_buf()
|
||||
} else {
|
||||
repo_path.join(candidate)
|
||||
}
|
||||
} else {
|
||||
repo_path.join("appsec_review")
|
||||
}
|
||||
.clean();
|
||||
|
||||
let model_name = command
|
||||
.model_name
|
||||
.clone()
|
||||
.unwrap_or_else(|| self.config.model.clone());
|
||||
|
||||
if self.security_review_handle.is_some() {
|
||||
return Err("A security review is already running. Please wait for it to finish or abort it before starting another.".to_string());
|
||||
}
|
||||
|
||||
let scope_description = if resolved_paths.is_empty() {
|
||||
"entire repository".to_string()
|
||||
} else {
|
||||
display_paths.join(", ")
|
||||
};
|
||||
|
||||
let summary = format!(
|
||||
"🔐 Running AppSec security review (mode: {}).\nRepository: {}\nScope: {}\nOutput: {}\nModel: {}",
|
||||
command.mode.as_str(),
|
||||
repo_path.display(),
|
||||
scope_description,
|
||||
output_root.display(),
|
||||
model_name
|
||||
);
|
||||
self.conversation_history.add_background_event(summary);
|
||||
self.conversation_history.scroll_to_bottom();
|
||||
self.bottom_pane.set_task_running(true);
|
||||
self.request_redraw();
|
||||
|
||||
let provider = self.config.model_provider.clone();
|
||||
let request = SecurityReviewRequest {
|
||||
repo_path: repo_path.clone(),
|
||||
include_paths: resolved_paths,
|
||||
output_root: output_root.clone(),
|
||||
mode: command.mode,
|
||||
model: model_name,
|
||||
provider,
|
||||
progress_sender: Some(self.app_event_tx.clone()),
|
||||
};
|
||||
|
||||
let app_event_tx = self.app_event_tx.clone();
|
||||
let mode = command.mode;
|
||||
let handle = tokio::spawn(async move {
|
||||
let outcome = run_security_review(request).await;
|
||||
app_event_tx.send(AppEvent::SecurityReviewFinished { mode, outcome });
|
||||
});
|
||||
self.security_review_handle = Some(handle);
|
||||
self.active_security_review_mode = Some(mode);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn report_security_review_error(&mut self, message: String) {
|
||||
self.security_review_handle = None;
|
||||
self.active_security_review_mode = None;
|
||||
self.bottom_pane.set_task_running(false);
|
||||
self.conversation_history
|
||||
.add_background_event(format!("❌ {message}"));
|
||||
self.conversation_history.scroll_to_bottom();
|
||||
self.request_redraw();
|
||||
}
|
||||
|
||||
pub(crate) fn handle_security_review_finished(
|
||||
&mut self,
|
||||
mode: SecurityReviewMode,
|
||||
outcome: Result<SecurityReviewResult, SecurityReviewFailure>,
|
||||
) {
|
||||
self.security_review_handle = None;
|
||||
self.active_security_review_mode = None;
|
||||
self.bottom_pane.set_task_running(false);
|
||||
match outcome {
|
||||
Ok(result) => {
|
||||
let SecurityReviewResult {
|
||||
bugs_markdown,
|
||||
report_markdown,
|
||||
bugs_path,
|
||||
report_path,
|
||||
logs,
|
||||
} = result;
|
||||
|
||||
let mut summary = format!(
|
||||
"✅ AppSec security review complete (mode: {}).\nBugs saved to {}.",
|
||||
mode.as_str(),
|
||||
bugs_path.display()
|
||||
);
|
||||
if let Some(report_path) = report_path.as_ref() {
|
||||
summary.push_str(&format!(
|
||||
"\nReport saved to {}.",
|
||||
report_path.display()
|
||||
));
|
||||
}
|
||||
self.conversation_history.add_background_event(summary);
|
||||
|
||||
if matches!(mode, SecurityReviewMode::Full) {
|
||||
if let Some(markdown) = report_markdown.and_then(|m| {
|
||||
if m.trim().is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(m)
|
||||
}
|
||||
}) {
|
||||
self.conversation_history.add_agent_message(
|
||||
&self.config,
|
||||
format!("# AppSec Security Review Report\n\n{markdown}"),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if !bugs_markdown.trim().is_empty() {
|
||||
let heading = if matches!(mode, SecurityReviewMode::Full) {
|
||||
"## Bugs Summary"
|
||||
} else {
|
||||
"# AppSec Bugs Summary"
|
||||
};
|
||||
self.conversation_history.add_agent_message(
|
||||
&self.config,
|
||||
format!("{heading}\n\n{bugs_markdown}"),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(log_text) = format_security_review_logs(&logs) {
|
||||
self.conversation_history
|
||||
.add_background_event(format!("Logs:\n{log_text}"));
|
||||
}
|
||||
}
|
||||
Err(error) => {
|
||||
let SecurityReviewFailure { message, logs } = error;
|
||||
let mut summary = format!("❌ AppSec security review failed: {message}");
|
||||
if let Some(log_text) = format_security_review_logs(&logs) {
|
||||
summary.push_str(&format!("\n\nLogs:\n{log_text}"));
|
||||
}
|
||||
self.conversation_history.add_background_event(summary);
|
||||
}
|
||||
}
|
||||
|
||||
self.conversation_history.scroll_to_bottom();
|
||||
self.request_redraw();
|
||||
}
|
||||
|
||||
pub(crate) fn handle_codex_event(&mut self, event: Event) {
|
||||
let Event { id, msg } = event;
|
||||
match msg {
|
||||
@@ -246,51 +496,16 @@ impl ChatWidget<'_> {
|
||||
self.request_redraw();
|
||||
}
|
||||
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
|
||||
// if the answer buffer is empty, this means we haven't received any
|
||||
// delta. Thus, we need to print the message as a new answer.
|
||||
if self.answer_buffer.is_empty() {
|
||||
self.conversation_history
|
||||
.add_agent_message(&self.config, message);
|
||||
} else {
|
||||
self.conversation_history
|
||||
.replace_prev_agent_message(&self.config, message);
|
||||
}
|
||||
self.answer_buffer.clear();
|
||||
self.request_redraw();
|
||||
}
|
||||
EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
|
||||
if self.answer_buffer.is_empty() {
|
||||
self.conversation_history
|
||||
.add_agent_message(&self.config, "".to_string());
|
||||
}
|
||||
self.answer_buffer.push_str(&delta.clone());
|
||||
self.conversation_history
|
||||
.replace_prev_agent_message(&self.config, self.answer_buffer.clone());
|
||||
self.request_redraw();
|
||||
}
|
||||
EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
|
||||
if self.reasoning_buffer.is_empty() {
|
||||
self.conversation_history
|
||||
.add_agent_reasoning(&self.config, "".to_string());
|
||||
}
|
||||
self.reasoning_buffer.push_str(&delta.clone());
|
||||
self.conversation_history
|
||||
.replace_prev_agent_reasoning(&self.config, self.reasoning_buffer.clone());
|
||||
.add_agent_message(&self.config, message);
|
||||
self.request_redraw();
|
||||
}
|
||||
EventMsg::AgentReasoning(AgentReasoningEvent { text }) => {
|
||||
// if the reasoning buffer is empty, this means we haven't received any
|
||||
// delta. Thus, we need to print the message as a new reasoning.
|
||||
if self.reasoning_buffer.is_empty() {
|
||||
if !self.config.hide_agent_reasoning {
|
||||
self.conversation_history
|
||||
.add_agent_reasoning(&self.config, "".to_string());
|
||||
} else {
|
||||
// else, we rerender one last time.
|
||||
self.conversation_history
|
||||
.replace_prev_agent_reasoning(&self.config, text);
|
||||
.add_agent_reasoning(&self.config, text);
|
||||
self.request_redraw();
|
||||
}
|
||||
self.reasoning_buffer.clear();
|
||||
self.request_redraw();
|
||||
}
|
||||
EventMsg::TaskStarted => {
|
||||
self.bottom_pane.clear_ctrl_c_quit_hint();
|
||||
@@ -431,7 +646,7 @@ impl ChatWidget<'_> {
|
||||
}
|
||||
|
||||
fn request_redraw(&mut self) {
|
||||
self.app_event_tx.send(AppEvent::RequestRedraw);
|
||||
self.app_event_tx.send(AppEvent::Redraw);
|
||||
}
|
||||
|
||||
pub(crate) fn add_diff_output(&mut self, diff_output: String) {
|
||||
@@ -461,6 +676,20 @@ impl ChatWidget<'_> {
|
||||
/// Returns true if the key press was handled, false if it was not.
|
||||
/// If the key press was not handled, the caller should handle it (likely by exiting the process).
|
||||
pub(crate) fn on_ctrl_c(&mut self) -> bool {
|
||||
if let Some(handle) = self.security_review_handle.take() {
|
||||
handle.abort();
|
||||
let mode = self
|
||||
.active_security_review_mode
|
||||
.take()
|
||||
.unwrap_or_else(SecurityReviewMode::default);
|
||||
let failure = SecurityReviewFailure {
|
||||
message: "AppSec security review aborted by user.".to_string(),
|
||||
logs: vec!["AppSec security review aborted by user.".to_string()],
|
||||
};
|
||||
self.handle_security_review_finished(mode, Err(failure));
|
||||
return true;
|
||||
}
|
||||
|
||||
if self.bottom_pane.is_task_running() {
|
||||
self.bottom_pane.clear_ctrl_c_quit_hint();
|
||||
self.submit_op(Op::Interrupt);
|
||||
@@ -473,10 +702,6 @@ impl ChatWidget<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn composer_is_empty(&self) -> bool {
|
||||
self.bottom_pane.composer_is_empty()
|
||||
}
|
||||
|
||||
/// Forward an `Op` directly to codex.
|
||||
pub(crate) fn submit_op(&self, op: Op) {
|
||||
if let Err(e) = self.codex_op_tx.send(op) {
|
||||
@@ -526,3 +751,137 @@ fn add_token_usage(current_usage: &TokenUsage, new_usage: &TokenUsage) -> TokenU
|
||||
total_tokens: current_usage.total_tokens + new_usage.total_tokens,
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_security_review_command(input: &str) -> Result<ParsedSecReviewCommand, String> {
|
||||
let tokens = shlex::split(input).ok_or_else(|| "Unable to parse command arguments.".to_string())?;
|
||||
if tokens.is_empty() {
|
||||
return Err("Empty command.".to_string());
|
||||
}
|
||||
|
||||
if tokens[0] != "/secreview" {
|
||||
return Err("Unrecognized command.".to_string());
|
||||
}
|
||||
|
||||
let mut command = ParsedSecReviewCommand::default();
|
||||
let mut idx = 1;
|
||||
while idx < tokens.len() {
|
||||
let token = &tokens[idx];
|
||||
|
||||
if token == "--" {
|
||||
for extra in tokens.iter().skip(idx + 1) {
|
||||
if !extra.is_empty() {
|
||||
command.include_paths.push(extra.to_string());
|
||||
}
|
||||
}
|
||||
break;
|
||||
} else if matches!(
|
||||
token.as_str(),
|
||||
"bugs" | "--bugs" | "--mode=bugs"
|
||||
) {
|
||||
command.mode = SecurityReviewMode::Bugs;
|
||||
} else if matches!(
|
||||
token.as_str(),
|
||||
"full" | "--full" | "--mode=full"
|
||||
) {
|
||||
command.mode = SecurityReviewMode::Full;
|
||||
} else if token == "--mode" {
|
||||
idx += 1;
|
||||
if idx >= tokens.len() {
|
||||
return Err("Expected value after --mode.".to_string());
|
||||
}
|
||||
command.mode = parse_mode(&tokens[idx])?;
|
||||
} else if let Some(value) = token.strip_prefix("--mode=") {
|
||||
command.mode = parse_mode(value)?;
|
||||
} else if token == "--path" || token == "-p" {
|
||||
idx += 1;
|
||||
if idx >= tokens.len() {
|
||||
return Err(format!("Expected value after {token}."));
|
||||
}
|
||||
command.include_paths.push(tokens[idx].clone());
|
||||
} else if let Some(value) = token.strip_prefix("--path=") {
|
||||
command.include_paths.push(value.to_string());
|
||||
} else if let Some(value) = token.strip_prefix("-p=") {
|
||||
command.include_paths.push(value.to_string());
|
||||
} else if matches!(
|
||||
token.as_str(),
|
||||
"--output" | "-o" | "--output-location"
|
||||
) {
|
||||
idx += 1;
|
||||
if idx >= tokens.len() {
|
||||
return Err(format!("Expected value after {token}."));
|
||||
}
|
||||
command.output_path = Some(tokens[idx].clone());
|
||||
} else if let Some(value) = token.strip_prefix("--output=") {
|
||||
command.output_path = Some(value.to_string());
|
||||
} else if let Some(value) = token.strip_prefix("-o=") {
|
||||
command.output_path = Some(value.to_string());
|
||||
} else if matches!(
|
||||
token.as_str(),
|
||||
"--repo" | "--repo-location" | "--repository"
|
||||
) {
|
||||
idx += 1;
|
||||
if idx >= tokens.len() {
|
||||
return Err(format!("Expected value after {token}."));
|
||||
}
|
||||
command.repo_path = Some(tokens[idx].clone());
|
||||
} else if let Some(value) = token.strip_prefix("--repo=") {
|
||||
command.repo_path = Some(value.to_string());
|
||||
} else if let Some(value) = token.strip_prefix("--repo-location=") {
|
||||
command.repo_path = Some(value.to_string());
|
||||
} else if token == "--model" || token == "--model-name" {
|
||||
idx += 1;
|
||||
if idx >= tokens.len() {
|
||||
return Err(format!("Expected value after {token}."));
|
||||
}
|
||||
command.model_name = Some(tokens[idx].clone());
|
||||
} else if let Some(value) = token.strip_prefix("--model=") {
|
||||
command.model_name = Some(value.to_string());
|
||||
} else if let Some(value) = token.strip_prefix("--model-name=") {
|
||||
command.model_name = Some(value.to_string());
|
||||
} else if !token.is_empty() {
|
||||
command.include_paths.push(token.clone());
|
||||
}
|
||||
|
||||
idx += 1;
|
||||
}
|
||||
|
||||
Ok(command)
|
||||
}
|
||||
|
||||
fn parse_mode(value: &str) -> Result<SecurityReviewMode, String> {
|
||||
match value.to_ascii_lowercase().as_str() {
|
||||
"full" => Ok(SecurityReviewMode::Full),
|
||||
"bugs" | "bugs-only" | "bugsonly" => Ok(SecurityReviewMode::Bugs),
|
||||
other => Err(format!("Unknown mode '{other}'. Use 'full' or 'bugs'.")),
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_path(base: &Path, candidate: &str) -> PathBuf {
|
||||
let path = Path::new(candidate);
|
||||
if path.is_absolute() {
|
||||
path.to_path_buf()
|
||||
} else {
|
||||
base.join(path)
|
||||
}
|
||||
.clean()
|
||||
}
|
||||
|
||||
fn format_security_review_logs(logs: &[String]) -> Option<String> {
|
||||
if logs.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let joined = logs.join("\n");
|
||||
if joined.trim().is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let lines: Vec<&str> = joined.lines().collect();
|
||||
const MAX_LINES: usize = 40;
|
||||
if lines.len() <= MAX_LINES {
|
||||
Some(joined)
|
||||
} else {
|
||||
let tail = lines[lines.len().saturating_sub(MAX_LINES)..].join("\n");
|
||||
Some(format!("… (showing last {MAX_LINES} lines)\n{tail}"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,14 +202,6 @@ impl ConversationHistoryWidget {
|
||||
self.add_to_history(HistoryCell::new_agent_reasoning(config, text));
|
||||
}
|
||||
|
||||
pub fn replace_prev_agent_reasoning(&mut self, config: &Config, text: String) {
|
||||
self.replace_last_agent_reasoning(config, text);
|
||||
}
|
||||
|
||||
pub fn replace_prev_agent_message(&mut self, config: &Config, text: String) {
|
||||
self.replace_last_agent_message(config, text);
|
||||
}
|
||||
|
||||
pub fn add_background_event(&mut self, message: String) {
|
||||
self.add_to_history(HistoryCell::new_background_event(message));
|
||||
}
|
||||
@@ -257,42 +249,6 @@ impl ConversationHistoryWidget {
|
||||
});
|
||||
}
|
||||
|
||||
pub fn replace_last_agent_reasoning(&mut self, config: &Config, text: String) {
|
||||
if let Some(idx) = self
|
||||
.entries
|
||||
.iter()
|
||||
.rposition(|entry| matches!(entry.cell, HistoryCell::AgentReasoning { .. }))
|
||||
{
|
||||
let width = self.cached_width.get();
|
||||
let entry = &mut self.entries[idx];
|
||||
entry.cell = HistoryCell::new_agent_reasoning(config, text);
|
||||
let height = if width > 0 {
|
||||
entry.cell.height(width)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
entry.line_count.set(height);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn replace_last_agent_message(&mut self, config: &Config, text: String) {
|
||||
if let Some(idx) = self
|
||||
.entries
|
||||
.iter()
|
||||
.rposition(|entry| matches!(entry.cell, HistoryCell::AgentMessage { .. }))
|
||||
{
|
||||
let width = self.cached_width.get();
|
||||
let entry = &mut self.entries[idx];
|
||||
entry.cell = HistoryCell::new_agent_message(config, text);
|
||||
let height = if width > 0 {
|
||||
entry.cell.height(width)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
entry.line_count.set(height);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record_completed_exec_command(
|
||||
&mut self,
|
||||
call_id: String,
|
||||
@@ -498,7 +454,7 @@ impl WidgetRef for ConversationHistoryWidget {
|
||||
|
||||
{
|
||||
// Choose a thumb color that stands out only when this pane has focus so that the
|
||||
// user's attention is naturally drawn to the active viewport. When unfocused we show
|
||||
// user’s attention is naturally drawn to the active viewport. When unfocused we show
|
||||
// a low-contrast thumb so the scrollbar fades into the background without becoming
|
||||
// invisible.
|
||||
let thumb_style = if self.has_input_focus {
|
||||
|
||||
@@ -20,6 +20,7 @@ use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::prelude::*;
|
||||
|
||||
mod app;
|
||||
mod security_review;
|
||||
mod app_event;
|
||||
mod app_event_sender;
|
||||
mod bottom_pane;
|
||||
|
||||
@@ -14,6 +14,8 @@ pub enum SlashCommand {
|
||||
// more frequently used commands should be listed first.
|
||||
New,
|
||||
Diff,
|
||||
#[strum(serialize = "secreview")]
|
||||
SecurityReview,
|
||||
Quit,
|
||||
ToggleMouseMode,
|
||||
}
|
||||
@@ -30,6 +32,9 @@ impl SlashCommand {
|
||||
SlashCommand::Diff => {
|
||||
"Show git diff of the working directory (including untracked files)"
|
||||
}
|
||||
SlashCommand::SecurityReview => {
|
||||
"Run AppSec security review and display the generated reports"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ impl StatusIndicatorWidget {
|
||||
std::thread::sleep(Duration::from_millis(200));
|
||||
counter = counter.wrapping_add(1);
|
||||
frame_idx_clone.store(counter, Ordering::Relaxed);
|
||||
app_event_tx_clone.send(AppEvent::RequestRedraw);
|
||||
app_event_tx_clone.send(AppEvent::Redraw);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user