Compare commits

..

45 Commits

Author SHA1 Message Date
oai-ragona
5b6224dd1b Merge branch 'main' into codex-rs-session 2025-04-28 09:53:28 -07:00
Ryan Ragona
798364e86b remove unused platform method 2025-04-27 15:34:19 -07:00
Ryan Ragona
34e849a89a windows build cleanup 2025-04-27 14:56:16 -07:00
Ryan Ragona
c7596debb1 cleanup 2025-04-27 12:56:35 -07:00
Ryan Ragona
ffe7e2277f artial, broken 2025-04-27 12:42:27 -07:00
Ryan Ragona
b344757fb0 first pass at get 2025-04-27 09:00:37 -07:00
Ryan Ragona
0fbe5f2069 cleanup 2025-04-27 08:46:45 -07:00
Ryan Ragona
aef7f25302 cleanup 2025-04-27 08:43:44 -07:00
Ryan Ragona
66a2e970f7 humansize 2025-04-27 08:35:21 -07:00
Ryan Ragona
e96055be36 move to_args closer to args 2025-04-27 08:12:09 -07:00
Ryan Ragona
359a09cd8d fmt 2025-04-27 07:50:12 -07:00
Ryan Ragona
8c672d5442 cuter job names 2025-04-27 07:49:59 -07:00
Ryan Ragona
4d26c773b9 drop ascii art 2025-04-26 16:34:03 -07:00
Ryan Ragona
026990fcc0 fmt 2025-04-26 16:31:47 -07:00
Ryan Ragona
ee51ffc130 fmt 2025-04-26 16:25:17 -07:00
Ryan Ragona
a4197ec97a truncate 2025-04-26 16:25:02 -07:00
Ryan Ragona
56e609d481 cleanup on clap args 2025-04-26 16:19:15 -07:00
Ryan Ragona
2b55e5a8f2 remove overcomments 2025-04-26 16:03:43 -07:00
Ryan Ragona
2420a6a898 clippy 2025-04-26 15:57:49 -07:00
Ryan Ragona
8ed2704191 in progress cleanup 2025-04-26 15:45:09 -07:00
Ryan Ragona
07911ddc3e cleanup pass 2025-04-26 15:34:08 -07:00
Ryan Ragona
f1c6625bf2 shorten timestamp 2025-04-26 14:38:17 -07:00
Ryan Ragona
337164738a fmt 2025-04-26 14:25:35 -07:00
Ryan Ragona
96d8d2a37a save session metadata 2025-04-26 14:25:26 -07:00
Ryan Ragona
e782378176 gate on windows 2025-04-26 12:20:27 -07:00
Ryan Ragona
d3b69e98bd fmt 2025-04-26 12:13:24 -07:00
Ryan Ragona
3d9ce18299 fix for tail 2025-04-26 12:13:17 -07:00
Ryan Ragona
1e2983d612 cleanup on failure 2025-04-26 12:09:04 -07:00
Ryan Ragona
a7a8fa1753 session validation 2025-04-26 11:48:42 -07:00
Ryan Ragona
6f0e4a5733 tail 2025-04-26 11:42:50 -07:00
Ryan Ragona
a09be2144e stdout tailing 2025-04-26 11:36:48 -07:00
Ryan Ragona
d0e8aa5233 impl kill 2025-04-26 11:20:46 -07:00
Ryan Ragona
dab7b1734d fmt 2025-04-26 10:03:34 -07:00
Ryan Ragona
9f10ec53b6 remove tui socket stuff 2025-04-26 10:03:24 -07:00
Ryan Ragona
786c81d706 draft still broken 2025-04-26 09:44:50 -07:00
Ryan Ragona
1d0d725494 draft broken 2025-04-26 09:41:06 -07:00
Ryan Ragona
f2b7b14284 draft of tui sock 2025-04-26 09:13:57 -07:00
Ryan Ragona
63ec18989a display kind 2025-04-26 08:27:26 -07:00
Ryan Ragona
2aa7f42dc9 fmt 2025-04-26 08:11:04 -07:00
Ryan Ragona
8f8479fd80 add repl subcommand 2025-04-26 08:10:46 -07:00
Ryan Ragona
9aaa947828 numeric prefix 2025-04-26 07:32:26 -07:00
Ryan Ragona
342ac711ca use dot dir 2025-04-26 07:06:20 -07:00
Ryan Ragona
b41f26f484 metadata 2025-04-26 07:05:19 -07:00
Ryan Ragona
abf0198a49 progress 2025-04-26 06:53:03 -07:00
Ryan Ragona
314d2216cb codex draft 2025-04-26 06:40:39 -07:00
65 changed files with 2065 additions and 1978 deletions

View File

@@ -1,37 +0,0 @@
{
"outputs": {
"codex-repl": {
"platforms": {
"macos-aarch64": { "regex": "^codex-repl-aarch64-apple-darwin\\.zst$", "path": "codex-repl" },
"macos-x86_64": { "regex": "^codex-repl-x86_64-apple-darwin\\.zst$", "path": "codex-repl" },
"linux-x86_64": { "regex": "^codex-repl-x86_64-unknown-linux-musl\\.zst$", "path": "codex-repl" },
"linux-aarch64": { "regex": "^codex-repl-aarch64-unknown-linux-gnu\\.zst$", "path": "codex-repl" }
}
},
"codex-exec": {
"platforms": {
"macos-aarch64": { "regex": "^codex-exec-aarch64-apple-darwin\\.zst$", "path": "codex-exec" },
"macos-x86_64": { "regex": "^codex-exec-x86_64-apple-darwin\\.zst$", "path": "codex-exec" },
"linux-x86_64": { "regex": "^codex-exec-x86_64-unknown-linux-musl\\.zst$", "path": "codex-exec" },
"linux-aarch64": { "regex": "^codex-exec-aarch64-unknown-linux-gnu\\.zst$", "path": "codex-exec" }
}
},
"codex": {
"platforms": {
"macos-aarch64": { "regex": "^codex-aarch64-apple-darwin\\.zst$", "path": "codex" },
"macos-x86_64": { "regex": "^codex-x86_64-apple-darwin\\.zst$", "path": "codex" },
"linux-x86_64": { "regex": "^codex-x86_64-unknown-linux-musl\\.zst$", "path": "codex" },
"linux-aarch64": { "regex": "^codex-aarch64-unknown-linux-gnu\\.zst$", "path": "codex" }
}
},
"codex-linux-sandbox": {
"platforms": {
"linux-x86_64": { "regex": "^codex-linux-sandbox-x86_64-unknown-linux-musl\\.zst$", "path": "codex-linux-sandbox" },
"linux-aarch64": { "regex": "^codex-linux-sandbox-aarch64-unknown-linux-gnu\\.zst$", "path": "codex-linux-sandbox" }
}
}
}
}

View File

@@ -1,157 +0,0 @@
# Release workflow for codex-rs.
# To release, follow a workflow like:
# ```
# git tag -a rust-v0.1.0 -m "Release 0.1.0"
# git push origin rust-v0.1.0
# ```
name: rust-release
on:
push:
tags:
- "rust-v.*.*.*"
concurrency:
group: ${{ github.workflow }}
cancel-in-progress: true
env:
TAG_REGEX: '^rust-v\.[0-9]+\.[0-9]+\.[0-9]+$'
jobs:
tag-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Validate tag matches Cargo.toml version
shell: bash
run: |
set -euo pipefail
echo "::group::Tag validation"
# 1. Must be a tag and match the regex
[[ "${GITHUB_REF_TYPE}" == "tag" ]] \
|| { echo "❌ Not a tag push"; exit 1; }
[[ "${GITHUB_REF_NAME}" =~ ${TAG_REGEX} ]] \
|| { echo "❌ Tag '${GITHUB_REF_NAME}' != ${TAG_REGEX}"; exit 1; }
# 2. Extract versions
tag_ver="${GITHUB_REF_NAME#rust-v.}"
cargo_ver="$(grep -m1 '^version' codex-rs/Cargo.toml \
| sed -E 's/version *= *"([^"]+)".*/\1/')"
# 3. Compare
[[ "${tag_ver}" == "${cargo_ver}" ]] \
|| { echo "❌ Tag ${tag_ver} ≠ Cargo.toml ${cargo_ver}"; exit 1; }
echo "✅ Tag and Cargo.toml agree (${tag_ver})"
echo "::endgroup::"
build:
needs: tag-check
name: ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
defaults:
run:
working-directory: codex-rs
strategy:
fail-fast: false
matrix:
include:
- runner: macos-14
target: aarch64-apple-darwin
- runner: macos-14
target: x86_64-apple-darwin
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
targets: ${{ matrix.target }}
- uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/codex-rs/target/
key: cargo-release-${{ matrix.runner }}-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' }}
name: Install musl build tools
run: |
sudo apt install -y musl-tools pkg-config
- name: Cargo build
run: cargo build --target ${{ matrix.target }} --release --all-targets --all-features
- name: Stage artifacts
shell: bash
run: |
dest="dist/${{ matrix.target }}"
mkdir -p "$dest"
cp target/${{ matrix.target }}/release/codex-repl "$dest/codex-repl-${{ matrix.target }}"
cp target/${{ matrix.target }}/release/codex-exec "$dest/codex-exec-${{ matrix.target }}"
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'x86_64-unknown-linux-gnu' || matrix.target == 'aarch64-unknown-linux-gnu' }}
name: Stage Linux-only artifacts
shell: bash
run: |
dest="dist/${{ matrix.target }}"
cp target/${{ matrix.target }}/release/codex-linux-sandbox "$dest/codex-linux-sandbox-${{ matrix.target }}"
- name: Compress artifacts
shell: bash
run: |
dest="dist/${{ matrix.target }}"
zstd -T0 -19 --rm "$dest"/*
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.target }}
path: codex-rs/dist/${{ matrix.target }}/*
release:
needs: build
name: release
runs-on: ubuntu-24.04
env:
RELEASE_TAG: codex-rs-${{ github.sha }}-${{ github.run_attempt }}-${{ github.ref_name }}
steps:
- uses: actions/download-artifact@v4
with:
path: dist
- name: List
run: ls -R dist/
- uses: softprops/action-gh-release@v2
with:
tag_name: ${{ env.RELEASE_TAG }}
files: dist/**
# TODO(ragona): I'm going to leave these as prerelease/draft for now.
# It gives us 1) clarity that these are not yet a stable version, and
# 2) allows a human step to review the release before publishing the draft.
prerelease: true
draft: true
- uses: facebook/dotslash-publish-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag: ${{ env.RELEASE_TAG }}
config: .github/dotslash-config.json

View File

@@ -2,26 +2,6 @@
set -euo pipefail # Exit on error, undefined vars, and pipeline failures
IFS=$'\n\t' # Stricter word splitting
# Read allowed domains from file
ALLOWED_DOMAINS_FILE="/etc/codex/allowed_domains.txt"
if [ -f "$ALLOWED_DOMAINS_FILE" ]; then
ALLOWED_DOMAINS=()
while IFS= read -r domain; do
ALLOWED_DOMAINS+=("$domain")
done < "$ALLOWED_DOMAINS_FILE"
echo "Using domains from file: ${ALLOWED_DOMAINS[*]}"
else
# Fallback to default domains
ALLOWED_DOMAINS=("api.openai.com")
echo "Domains file not found, using default: ${ALLOWED_DOMAINS[*]}"
fi
# Ensure we have at least one domain
if [ ${#ALLOWED_DOMAINS[@]} -eq 0 ]; then
echo "ERROR: No allowed domains specified"
exit 1
fi
# Flush existing rules and delete existing ipsets
iptables -F
iptables -X
@@ -44,7 +24,8 @@ iptables -A OUTPUT -o lo -j ACCEPT
ipset create allowed-domains hash:net
# Resolve and add other allowed domains
for domain in "${ALLOWED_DOMAINS[@]}"; do
for domain in \
"api.openai.com"; do
echo "Resolving $domain..."
ips=$(dig +short A "$domain")
if [ -z "$ips" ]; then
@@ -106,7 +87,7 @@ else
echo "Firewall verification passed - unable to reach https://example.com as expected"
fi
# Always verify OpenAI API access is working
# Verify OpenAI API access
if ! curl --connect-timeout 5 https://api.openai.com >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - unable to reach https://api.openai.com"
exit 1

View File

@@ -10,8 +10,6 @@ set -e
# Default the work directory to WORKSPACE_ROOT_DIR if not provided.
WORK_DIR="${WORKSPACE_ROOT_DIR:-$(pwd)}"
# Default allowed domains - can be overridden with OPENAI_ALLOWED_DOMAINS env var
OPENAI_ALLOWED_DOMAINS="${OPENAI_ALLOWED_DOMAINS:-api.openai.com}"
# Parse optional flag.
if [ "$1" = "--work_dir" ]; then
@@ -47,12 +45,6 @@ if [ -z "$WORK_DIR" ]; then
exit 1
fi
# Verify that OPENAI_ALLOWED_DOMAINS is not empty
if [ -z "$OPENAI_ALLOWED_DOMAINS" ]; then
echo "Error: OPENAI_ALLOWED_DOMAINS is empty."
exit 1
fi
# Kill any existing container for the working directory using cleanup(), centralizing removal logic.
cleanup
@@ -65,25 +57,8 @@ docker run --name "$CONTAINER_NAME" -d \
codex \
sleep infinity
# Write the allowed domains to a file in the container
docker exec --user root "$CONTAINER_NAME" bash -c "mkdir -p /etc/codex"
for domain in $OPENAI_ALLOWED_DOMAINS; do
# Validate domain format to prevent injection
if [[ ! "$domain" =~ ^[a-zA-Z0-9][a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then
echo "Error: Invalid domain format: $domain"
exit 1
fi
echo "$domain" | docker exec --user root -i "$CONTAINER_NAME" bash -c "cat >> /etc/codex/allowed_domains.txt"
done
# Set proper permissions on the domains file
docker exec --user root "$CONTAINER_NAME" bash -c "chmod 444 /etc/codex/allowed_domains.txt && chown root:root /etc/codex/allowed_domains.txt"
# Initialize the firewall inside the container as root user
docker exec --user root "$CONTAINER_NAME" bash -c "/usr/local/bin/init_firewall.sh"
# Remove the firewall script after running it
docker exec --user root "$CONTAINER_NAME" bash -c "rm -f /usr/local/bin/init_firewall.sh"
# Initialize the firewall inside the container with root privileges.
docker exec --user root "$CONTAINER_NAME" /usr/local/bin/init_firewall.sh
# Execute the provided command in the container, ensuring it runs in the work directory.
# We use a parameterized bash command to safely handle the command and directory.

View File

@@ -10,7 +10,6 @@ import type { ApprovalPolicy } from "./approvals";
import type { CommandConfirmation } from "./utils/agent/agent-loop";
import type { AppConfig } from "./utils/config";
import type { ResponseItem } from "openai/resources/responses/responses";
import type { ReasoningEffort } from "openai/resources.mjs";
import App from "./app";
import { runSinglePass } from "./cli-singlepass";
@@ -161,12 +160,6 @@ const cli = meow(
"Disable truncation of command stdout/stderr messages (show everything)",
aliases: ["no-truncate"],
},
reasoning: {
type: "string",
description: "Set the reasoning effort level (low, medium, high)",
choices: ["low", "medium", "high"],
default: "high",
},
// Notification
notify: {
type: "boolean",
@@ -294,22 +287,17 @@ if (!apiKey && !NO_API_KEY_REQUIRED.has(provider.toLowerCase())) {
process.exit(1);
}
const flagPresent = Object.hasOwn(cli.flags, "disableResponseStorage");
const disableResponseStorage = flagPresent
? Boolean(cli.flags.disableResponseStorage) // value user actually passed
: (config.disableResponseStorage ?? false); // fall back to YAML, default to false
config = {
apiKey,
...config,
model: model ?? config.model,
notify: Boolean(cli.flags.notify),
reasoningEffort:
(cli.flags.reasoning as ReasoningEffort | undefined) ?? "high",
flexMode: Boolean(cli.flags.flexMode),
provider,
disableResponseStorage,
disableResponseStorage:
cli.flags.disableResponseStorage !== undefined
? Boolean(cli.flags.disableResponseStorage)
: config.disableResponseStorage,
};
// Check for updates after loading config. This is important because we write state file in

View File

@@ -34,7 +34,7 @@ import OpenAI, { APIConnectionTimeoutError } from "openai";
// Wait time before retrying after rate limit errors (ms).
const RATE_LIMIT_RETRY_WAIT_MS = parseInt(
process.env["OPENAI_RATE_LIMIT_RETRY_WAIT_MS"] || "500",
process.env["OPENAI_RATE_LIMIT_RETRY_WAIT_MS"] || "2500",
10,
);
@@ -671,12 +671,12 @@ export class AgentLoop {
let stream;
// Retry loop for transient errors. Up to MAX_RETRIES attempts.
const MAX_RETRIES = 8;
const MAX_RETRIES = 5;
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
try {
let reasoning: Reasoning | undefined;
if (this.model.startsWith("o")) {
reasoning = { effort: this.config.reasoningEffort ?? "high" };
reasoning = { effort: "high" };
if (this.model === "o3" || this.model === "o4-mini") {
reasoning.summary = "auto";
}

View File

@@ -11,8 +11,8 @@ import { exec, execApplyPatch } from "./exec.js";
import { ReviewDecision } from "./review.js";
import { isLoggingEnabled, log } from "../logger/log.js";
import { SandboxType } from "./sandbox/interface.js";
import { PATH_TO_SEATBELT_EXECUTABLE } from "./sandbox/macos-seatbelt.js";
import fs from "fs/promises";
import { access } from "fs/promises";
import { execFile } from "node:child_process";
// ---------------------------------------------------------------------------
// Sessionlevel cache of commands that the user has chosen to always approve.
@@ -218,7 +218,7 @@ async function execCommand(
let { workdir } = execInput;
if (workdir) {
try {
await fs.access(workdir);
await access(workdir);
} catch (e) {
log(`EXEC workdir=${workdir} not found, use process.cwd() instead`);
workdir = process.cwd();
@@ -271,19 +271,18 @@ async function execCommand(
};
}
/** Return `true` if the `/usr/bin/sandbox-exec` is present and executable. */
const isSandboxExecAvailable: Promise<boolean> = fs
.access(PATH_TO_SEATBELT_EXECUTABLE, fs.constants.X_OK)
.then(
() => true,
(err) => {
if (!["ENOENT", "ACCESS", "EPERM"].includes(err.code)) {
log(
`Unexpected error for \`stat ${PATH_TO_SEATBELT_EXECUTABLE}\`: ${err.message}`,
);
}
return false;
},
/**
* Return `true` if the `sandbox-exec` binary can be located. This intentionally does **not**
* spawn the binary we only care about its presence.
*/
export const isSandboxExecAvailable = (): Promise<boolean> =>
new Promise((res) =>
execFile(
"command",
["-v", "sandbox-exec"],
{ signal: AbortSignal.timeout(200) },
(err) => res(!err), // exit 0 ⇒ found
),
);
async function getSandbox(runInSandbox: boolean): Promise<SandboxType> {
@@ -296,7 +295,7 @@ async function getSandbox(runInSandbox: boolean): Promise<SandboxType> {
// instance, inside certain CI images). Attempting to spawn a missing
// binary makes Node.js throw an *uncaught* `ENOENT` error further down
// the stack which crashes the whole CLI.
if (await isSandboxExecAvailable) {
if (await isSandboxExecAvailable()) {
return SandboxType.MACOS_SEATBELT;
} else {
throw new Error(

View File

@@ -12,14 +12,6 @@ function getCommonRoots() {
];
}
/**
* When working with `sandbox-exec`, only consider `sandbox-exec` in `/usr/bin`
* to defend against an attacker trying to inject a malicious version on the
* PATH. If /usr/bin/sandbox-exec has been tampered with, then the attacker
* already has root access.
*/
export const PATH_TO_SEATBELT_EXECUTABLE = "/usr/bin/sandbox-exec";
export function execWithSeatbelt(
cmd: Array<string>,
opts: SpawnOptions,
@@ -65,7 +57,7 @@ export function execWithSeatbelt(
);
const fullCommand = [
PATH_TO_SEATBELT_EXECUTABLE,
"sandbox-exec",
"-p",
fullPolicy,
...policyTemplateParams,

View File

@@ -7,7 +7,6 @@
// compiled `dist/` output used by the published CLI.
import type { FullAutoErrorMode } from "./auto-approval-mode.js";
import type { ReasoningEffort } from "openai/resources.mjs";
import { AutoApprovalMode } from "./auto-approval-mode.js";
import { log } from "./logger/log.js";
@@ -63,8 +62,6 @@ export const OPENAI_TIMEOUT_MS =
parseInt(process.env["OPENAI_TIMEOUT_MS"] || "0", 10) || undefined;
export const OPENAI_BASE_URL = process.env["OPENAI_BASE_URL"] || "";
export let OPENAI_API_KEY = process.env["OPENAI_API_KEY"] || "";
export const DEFAULT_REASONING_EFFORT = "high";
export const OPENAI_ORGANIZATION = process.env["OPENAI_ORGANIZATION"] || "";
export const OPENAI_PROJECT = process.env["OPENAI_PROJECT"] || "";
@@ -145,9 +142,6 @@ export type StoredConfig = {
saveHistory?: boolean;
sensitivePatterns?: Array<string>;
};
/** User-defined safe commands */
safeCommands?: Array<string>;
reasoningEffort?: ReasoningEffort;
};
// Minimal config written on first run. An *empty* model string ensures that
@@ -171,7 +165,6 @@ export type AppConfig = {
approvalMode?: AutoApprovalMode;
fullAutoErrorMode?: FullAutoErrorMode;
memory?: MemoryConfig;
reasoningEffort?: ReasoningEffort;
/** Whether to enable desktop notifications for responses */
notify?: boolean;
@@ -323,22 +316,6 @@ export const loadConfig = (
}
}
if (
storedConfig.disableResponseStorage !== undefined &&
typeof storedConfig.disableResponseStorage !== "boolean"
) {
if (storedConfig.disableResponseStorage === "true") {
storedConfig.disableResponseStorage = true;
} else if (storedConfig.disableResponseStorage === "false") {
storedConfig.disableResponseStorage = false;
} else {
log(
`[codex] Warning: 'disableResponseStorage' in config is not a boolean (got '${storedConfig.disableResponseStorage}'). Ignoring this value.`,
);
delete storedConfig.disableResponseStorage;
}
}
const instructionsFilePathResolved =
instructionsPath ?? INSTRUCTIONS_FILEPATH;
const userInstructions = existsSync(instructionsFilePathResolved)
@@ -388,8 +365,7 @@ export const loadConfig = (
instructions: combinedInstructions,
notify: storedConfig.notify === true,
approvalMode: storedConfig.approvalMode,
disableResponseStorage: storedConfig.disableResponseStorage === true,
reasoningEffort: storedConfig.reasoningEffort,
disableResponseStorage: storedConfig.disableResponseStorage ?? false,
};
// -----------------------------------------------------------------------
@@ -504,8 +480,6 @@ export const saveConfig = (
provider: config.provider,
providers: config.providers,
approvalMode: config.approvalMode,
disableResponseStorage: config.disableResponseStorage,
reasoningEffort: config.reasoningEffort,
};
// Add history settings if they exist

View File

@@ -98,8 +98,10 @@ describe("AgentLoop ratelimit handling", () => {
// is in progress.
const runPromise = agent.run(userMsg as any);
// Should be done in at most 180 seconds.
await vi.advanceTimersByTimeAsync(180_000);
// The agent waits 15 000 ms between retries (ratelimit backoff) and does
// this four times (after attempts 14). Fastforward a bit more to cover
// any additional small `setTimeout` calls inside the implementation.
await vi.advanceTimersByTimeAsync(61_000); // 4 * 15s + 1s safety margin
// Ensure the promise settles without throwing.
await expect(runPromise).resolves.not.toThrow();
@@ -108,8 +110,8 @@ describe("AgentLoop ratelimit handling", () => {
await vi.advanceTimersByTimeAsync(20);
// The OpenAI client should have been called the maximum number of retry
// attempts (8).
expect(openAiState.createSpy).toHaveBeenCalledTimes(8);
// attempts (5).
expect(openAiState.createSpy).toHaveBeenCalledTimes(5);
// Finally, verify that the user sees a helpful system message.
const sysMsg = received.find(

View File

@@ -122,7 +122,7 @@ describe("AgentLoop automatic retry on 5xx errors", () => {
expect(assistant?.content?.[0]?.text).toBe("ok");
});
it("fails after a few attempts and surfaces system message", async () => {
it("fails after 3 attempts and surfaces system message", async () => {
openAiState.createSpy = vi.fn(async () => {
const err: any = new Error("Internal Server Error");
err.status = 502; // any 5xx
@@ -154,7 +154,7 @@ describe("AgentLoop automatic retry on 5xx errors", () => {
await new Promise((r) => setTimeout(r, 20));
expect(openAiState.createSpy).toHaveBeenCalledTimes(8);
expect(openAiState.createSpy).toHaveBeenCalledTimes(5);
const sysMsg = received.find(
(i) =>

View File

@@ -1,121 +0,0 @@
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
import {
loadConfig,
DEFAULT_REASONING_EFFORT,
saveConfig,
} from "../src/utils/config";
import type { ReasoningEffort } from "openai/resources.mjs";
import * as fs from "fs";
// Mock the fs module
vi.mock("fs", () => ({
existsSync: vi.fn(),
readFileSync: vi.fn(),
writeFileSync: vi.fn(),
mkdirSync: vi.fn(),
}));
// Mock path.dirname
vi.mock("path", async () => {
const actual = await vi.importActual("path");
return {
...actual,
dirname: vi.fn().mockReturnValue("/mock/dir"),
};
});
describe("Reasoning Effort Configuration", () => {
beforeEach(() => {
vi.resetAllMocks();
});
afterEach(() => {
vi.clearAllMocks();
});
it('should have "high" as the default reasoning effort', () => {
expect(DEFAULT_REASONING_EFFORT).toBe("high");
});
it("should use default reasoning effort when not specified in config", () => {
// Mock fs.existsSync to return true for config file
vi.mocked(fs.existsSync).mockImplementation(() => true);
// Mock fs.readFileSync to return a JSON with no reasoningEffort
vi.mocked(fs.readFileSync).mockImplementation(() =>
JSON.stringify({ model: "test-model" }),
);
const config = loadConfig("/mock/config.json", "/mock/instructions.md");
// Config should not have reasoningEffort explicitly set
expect(config.reasoningEffort).toBeUndefined();
});
it("should load reasoningEffort from config file", () => {
// Mock fs.existsSync to return true for config file
vi.mocked(fs.existsSync).mockImplementation(() => true);
// Mock fs.readFileSync to return a JSON with reasoningEffort
vi.mocked(fs.readFileSync).mockImplementation(() =>
JSON.stringify({
model: "test-model",
reasoningEffort: "low" as ReasoningEffort,
}),
);
const config = loadConfig("/mock/config.json", "/mock/instructions.md");
// Config should have the reasoningEffort from the file
expect(config.reasoningEffort).toBe("low");
});
it("should support all valid reasoning effort values", () => {
// Valid values for ReasoningEffort
const validEfforts: Array<ReasoningEffort> = ["low", "medium", "high"];
for (const effort of validEfforts) {
// Mock fs.existsSync to return true for config file
vi.mocked(fs.existsSync).mockImplementation(() => true);
// Mock fs.readFileSync to return a JSON with reasoningEffort
vi.mocked(fs.readFileSync).mockImplementation(() =>
JSON.stringify({
model: "test-model",
reasoningEffort: effort,
}),
);
const config = loadConfig("/mock/config.json", "/mock/instructions.md");
// Config should have the correct reasoningEffort
expect(config.reasoningEffort).toBe(effort);
}
});
it("should preserve reasoningEffort when saving configuration", () => {
// Setup
vi.mocked(fs.existsSync).mockReturnValue(false);
// Create config with reasoningEffort
const configToSave = {
model: "test-model",
instructions: "",
reasoningEffort: "medium" as ReasoningEffort,
notify: false,
};
// Act
saveConfig(configToSave, "/mock/config.json", "/mock/instructions.md");
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(
"/mock/config.json",
expect.stringContaining('"model"'),
"utf-8",
);
// Note: Current implementation of saveConfig doesn't save reasoningEffort,
// this test would need to be updated if that functionality is added
});
});

View File

@@ -1,93 +0,0 @@
/**
* codex-cli/tests/disableResponseStorage.agentLoop.test.ts
*
* Verifies AgentLoop's request-building logic for both values of
* disableResponseStorage.
*/
import { describe, it, expect, vi } from "vitest";
import { AgentLoop } from "../src/utils/agent/agent-loop";
import type { AppConfig } from "../src/utils/config";
import { ReviewDecision } from "../src/utils/agent/review";
/* ─────────── 1. Spy + module mock ─────────────────────────────── */
const createSpy = vi.fn().mockResolvedValue({
data: { id: "resp_123", status: "completed", output: [] },
});
vi.mock("openai", () => ({
default: class {
public responses = { create: createSpy };
},
APIConnectionTimeoutError: class extends Error {},
}));
/* ─────────── 2. Parametrised tests ─────────────────────────────── */
describe.each([
{ flag: true, title: "omits previous_response_id & sets store:false" },
{ flag: false, title: "sends previous_response_id & allows store:true" },
])("AgentLoop with disableResponseStorage=%s", ({ flag, title }) => {
/* build a fresh config for each case */
const cfg: AppConfig = {
model: "o4-mini",
provider: "openai",
instructions: "",
disableResponseStorage: flag,
notify: false,
};
it(title, async () => {
/* reset spy per iteration */
createSpy.mockClear();
const loop = new AgentLoop({
model: cfg.model,
provider: cfg.provider,
config: cfg,
instructions: "",
approvalPolicy: "suggest",
disableResponseStorage: flag,
additionalWritableRoots: [],
onItem() {},
onLoading() {},
getCommandConfirmation: async () => ({ review: ReviewDecision.YES }),
onLastResponseId() {},
});
await loop.run([
{
type: "message",
role: "user",
content: [{ type: "input_text", text: "hello" }],
},
]);
expect(createSpy).toHaveBeenCalledTimes(1);
const call = createSpy.mock.calls[0];
if (!call) {
throw new Error("Expected createSpy to have been called at least once");
}
const payload: any = call[0];
if (flag) {
/* behaviour when ZDR is *on* */
expect(payload).not.toHaveProperty("previous_response_id");
if (payload.input) {
payload.input.forEach((m: any) => {
expect(m.store === undefined ? false : m.store).toBe(false);
});
}
} else {
/* behaviour when ZDR is *off* */
expect(payload).toHaveProperty("previous_response_id");
if (payload.input) {
payload.input.forEach((m: any) => {
if ("store" in m) {
expect(m.store).not.toBe(false);
}
});
}
}
});
});

View File

@@ -1,43 +0,0 @@
/**
* codex/codex-cli/tests/disableResponseStorage.test.ts
*/
import { describe, it, expect, beforeAll, afterAll } from "vitest";
import { mkdtempSync, rmSync, writeFileSync, mkdirSync } from "node:fs";
import { join } from "node:path";
import { tmpdir } from "node:os";
import { loadConfig, saveConfig } from "../src/utils/config";
import type { AppConfig } from "../src/utils/config";
const sandboxHome: string = mkdtempSync(join(tmpdir(), "codex-home-"));
const codexDir: string = join(sandboxHome, ".codex");
const yamlPath: string = join(codexDir, "config.yaml");
describe("disableResponseStorage persistence", () => {
beforeAll((): void => {
// mkdir -p ~/.codex inside the sandbox
rmSync(codexDir, { recursive: true, force: true });
mkdirSync(codexDir, { recursive: true });
// seed YAML with ZDR enabled
writeFileSync(yamlPath, "model: o4-mini\ndisableResponseStorage: true\n");
});
afterAll((): void => {
rmSync(sandboxHome, { recursive: true, force: true });
});
it("keeps disableResponseStorage=true across load/save cycle", async (): Promise<void> => {
// 1⃣ explicitly load the sandbox file
const cfg1: AppConfig = loadConfig(yamlPath);
expect(cfg1.disableResponseStorage).toBe(true);
// 2⃣ save right back to the same file
await saveConfig(cfg1, yamlPath);
// 3⃣ reload and re-assert
const cfg2: AppConfig = loadConfig(yamlPath);
expect(cfg2.disableResponseStorage).toBe(true);
});
});

301
codex-rs/Cargo.lock generated
View File

@@ -469,12 +469,13 @@ dependencies = [
[[package]]
name = "codex-cli"
version = "0.0.2504292236"
version = "0.1.0"
dependencies = [
"anyhow",
"clap",
"codex-core",
"codex-exec",
"codex-interactive",
"codex-repl",
"codex-tui",
"serde_json",
@@ -504,9 +505,8 @@ dependencies = [
"mime_guess",
"openssl-sys",
"patch",
"path-absolutize",
"predicates",
"rand",
"rand 0.9.1",
"reqwest",
"seccompiler",
"serde",
@@ -524,14 +524,11 @@ dependencies = [
[[package]]
name = "codex-exec"
version = "0.0.2504292236"
version = "0.1.0"
dependencies = [
"anyhow",
"chrono",
"clap",
"codex-core",
"owo-colors 4.2.0",
"shlex",
"tokio",
"tracing",
"tracing-subscriber",
@@ -557,20 +554,60 @@ dependencies = [
"tempfile",
]
[[package]]
name = "codex-interactive"
version = "0.1.0"
dependencies = [
"anyhow",
"clap",
"codex-core",
"tokio",
]
[[package]]
name = "codex-repl"
version = "0.0.2504292236"
version = "0.1.0"
dependencies = [
"anyhow",
"clap",
"codex-core",
"owo-colors 4.2.0",
"rand",
"rand 0.9.1",
"tokio",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "codex-session"
version = "0.1.0"
dependencies = [
"anyhow",
"chrono",
"clap",
"codex-core",
"codex-exec",
"codex-repl",
"command-group",
"dirs",
"humansize",
"libc",
"names",
"nix 0.28.0",
"petname",
"rand 0.9.1",
"serde",
"serde_json",
"sysinfo",
"tabwriter",
"tempfile",
"tokio",
"tracing",
"tracing-subscriber",
"uuid",
"windows-sys 0.48.0",
]
[[package]]
name = "codex-tui"
version = "0.1.0"
@@ -624,6 +661,18 @@ version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"
[[package]]
name = "command-group"
version = "5.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a68fa787550392a9d58f44c21a3022cfb3ea3e2458b7f85d3b399d0ceeccf409"
dependencies = [
"async-trait",
"nix 0.27.1",
"tokio",
"winapi",
]
[[package]]
name = "compact_str"
version = "0.8.1"
@@ -681,6 +730,25 @@ dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.21"
@@ -1417,6 +1485,15 @@ version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]]
name = "humansize"
version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6cb51c9a029ddc91b07a787f1d86b53ccfa49b0e86688c946ebe8d3555685dd7"
dependencies = [
"libm",
]
[[package]]
name = "hyper"
version = "1.6.0"
@@ -1851,6 +1928,12 @@ version = "0.2.172"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
[[package]]
name = "libm"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c9627da5196e5d8ed0b0495e61e518847578da83483c37288316d9b2e03a7f72"
[[package]]
name = "libredox"
version = "0.1.3"
@@ -2022,6 +2105,15 @@ dependencies = [
"serde",
]
[[package]]
name = "names"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7bddcd3bf5144b6392de80e04c347cd7fab2508f6df16a85fc496ecd5cec39bc"
dependencies = [
"rand 0.8.5",
]
[[package]]
name = "native-tls"
version = "0.2.14"
@@ -2054,6 +2146,17 @@ dependencies = [
"smallvec",
]
[[package]]
name = "nix"
version = "0.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053"
dependencies = [
"bitflags 2.9.0",
"cfg-if",
"libc",
]
[[package]]
name = "nix"
version = "0.28.0"
@@ -2093,6 +2196,15 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
[[package]]
name = "ntapi"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4"
dependencies = [
"winapi",
]
[[package]]
name = "nu-ansi-term"
version = "0.46.0"
@@ -2320,6 +2432,20 @@ dependencies = [
"indexmap 2.9.0",
]
[[package]]
name = "petname"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cd31dcfdbbd7431a807ef4df6edd6473228e94d5c805e8cf671227a21bad068"
dependencies = [
"anyhow",
"clap",
"itertools 0.13.0",
"proc-macro2",
"quote",
"rand 0.8.5",
]
[[package]]
name = "phf_shared"
version = "0.11.3"
@@ -2457,14 +2583,35 @@ dependencies = [
"nibble_vec",
]
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha 0.3.1",
"rand_core 0.6.4",
]
[[package]]
name = "rand"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97"
dependencies = [
"rand_chacha",
"rand_core",
"rand_chacha 0.9.0",
"rand_core 0.9.3",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core 0.6.4",
]
[[package]]
@@ -2474,7 +2621,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
dependencies = [
"ppv-lite86",
"rand_core",
"rand_core 0.9.3",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.16",
]
[[package]]
@@ -2507,6 +2663,26 @@ dependencies = [
"unicode-width 0.2.0",
]
[[package]]
name = "rayon"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "redox_syscall"
version = "0.5.11"
@@ -2753,7 +2929,7 @@ dependencies = [
"libc",
"log",
"memchr",
"nix",
"nix 0.28.0",
"radix_trie",
"unicode-segmentation",
"unicode-width 0.1.14",
@@ -3241,6 +3417,21 @@ dependencies = [
"syn 2.0.100",
]
[[package]]
name = "sysinfo"
version = "0.29.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd727fc423c2060f6c92d9534cef765c65a6ed3f428a03d7def74a8c4348e666"
dependencies = [
"cfg-if",
"core-foundation-sys",
"libc",
"ntapi",
"once_cell",
"rayon",
"winapi",
]
[[package]]
name = "system-configuration"
version = "0.6.1"
@@ -3262,6 +3453,15 @@ dependencies = [
"libc",
]
[[package]]
name = "tabwriter"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fce91f2f0ec87dff7e6bcbbeb267439aa1188703003c6055193c821487400432"
dependencies = [
"unicode-width 0.2.0",
]
[[package]]
name = "tempfile"
version = "3.19.1"
@@ -3757,6 +3957,15 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "uuid"
version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9"
dependencies = [
"getrandom 0.3.2",
]
[[package]]
name = "valuable"
version = "0.1.1"
@@ -4003,6 +4212,15 @@ dependencies = [
"windows-link",
]
[[package]]
name = "windows-sys"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
dependencies = [
"windows-targets 0.48.5",
]
[[package]]
name = "windows-sys"
version = "0.52.0"
@@ -4021,6 +4239,21 @@ dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-targets"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
dependencies = [
"windows_aarch64_gnullvm 0.48.5",
"windows_aarch64_msvc 0.48.5",
"windows_i686_gnu 0.48.5",
"windows_i686_msvc 0.48.5",
"windows_x86_64_gnu 0.48.5",
"windows_x86_64_gnullvm 0.48.5",
"windows_x86_64_msvc 0.48.5",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
@@ -4053,6 +4286,12 @@ dependencies = [
"windows_x86_64_msvc 0.53.0",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
@@ -4065,6 +4304,12 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
@@ -4077,6 +4322,12 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
[[package]]
name = "windows_i686_gnu"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
@@ -4101,6 +4352,12 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
[[package]]
name = "windows_i686_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
@@ -4113,6 +4370,12 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
@@ -4125,6 +4388,12 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
@@ -4137,6 +4406,12 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"

View File

@@ -7,15 +7,8 @@ members = [
"core",
"exec",
"execpolicy",
"interactive",
"repl",
"session",
"tui",
]
[workspace.package]
version = "0.0.2504292236"
[profile.release]
lto = "fat"
# Because we bundle some of these executables with the TypeScript CLI, we
# remove everything to make the binary as small as possible.
strip = "symbols"

View File

@@ -17,6 +17,7 @@ Currently, the Rust implementation is materially behind the TypeScript implement
This folder is the root of a Cargo workspace. It contains quite a bit of experimental code, but here are the key crates:
- [`core/`](./core) contains the business logic for Codex. Ultimately, we hope this to be a library crate that is generally useful for building other Rust/native applications that use Codex.
- [`interactive/`](./interactive) CLI with a UX comparable to the TypeScript Codex CLI.
- [`exec/`](./exec) "headless" CLI for use in automation.
- [`tui/`](./tui) CLI that launches a fullscreen TUI built with [Ratatui](https://ratatui.rs/).
- [`repl/`](./repl) CLI that launches a lightweight REPL similar to the Python or Node.js REPL.

View File

@@ -86,8 +86,6 @@ pub enum ApplyPatchFileChange {
Update {
unified_diff: String,
move_path: Option<PathBuf>,
/// new_content that will result after the unified_diff is applied.
new_content: String,
},
}
@@ -128,10 +126,7 @@ pub fn maybe_parse_apply_patch_verified(argv: &[String]) -> MaybeApplyPatchVerif
move_path,
chunks,
} => {
let ApplyPatchFileUpdate {
unified_diff,
content: contents,
} = match unified_diff_from_chunks(&path, &chunks) {
let unified_diff = match unified_diff_from_chunks(&path, &chunks) {
Ok(diff) => diff,
Err(e) => {
return MaybeApplyPatchVerified::CorrectnessError(e);
@@ -142,7 +137,6 @@ pub fn maybe_parse_apply_patch_verified(argv: &[String]) -> MaybeApplyPatchVerif
ApplyPatchFileChange::Update {
unified_diff,
move_path,
new_content: contents,
},
);
}
@@ -522,17 +516,10 @@ fn apply_replacements(
lines
}
/// Intended result of a file update for apply_patch.
#[derive(Debug, Eq, PartialEq)]
pub struct ApplyPatchFileUpdate {
unified_diff: String,
content: String,
}
pub fn unified_diff_from_chunks(
path: &Path,
chunks: &[UpdateFileChunk],
) -> std::result::Result<ApplyPatchFileUpdate, ApplyPatchError> {
) -> std::result::Result<String, ApplyPatchError> {
unified_diff_from_chunks_with_context(path, chunks, 1)
}
@@ -540,17 +527,13 @@ pub fn unified_diff_from_chunks_with_context(
path: &Path,
chunks: &[UpdateFileChunk],
context: usize,
) -> std::result::Result<ApplyPatchFileUpdate, ApplyPatchError> {
) -> std::result::Result<String, ApplyPatchError> {
let AppliedPatch {
original_contents,
new_contents,
} = derive_new_contents_from_chunks(path, chunks)?;
let text_diff = TextDiff::from_lines(&original_contents, &new_contents);
let unified_diff = text_diff.unified_diff().context_radius(context).to_string();
Ok(ApplyPatchFileUpdate {
unified_diff,
content: new_contents,
})
Ok(text_diff.unified_diff().context_radius(context).to_string())
}
/// Print the summary of changes in git-style format.
@@ -915,11 +898,7 @@ PATCH"#,
-qux
+QUX
"#;
let expected = ApplyPatchFileUpdate {
unified_diff: expected_diff.to_string(),
content: "foo\nBAR\nbaz\nQUX\n".to_string(),
};
assert_eq!(expected, diff);
assert_eq!(expected_diff, diff);
}
#[test]
@@ -951,11 +930,7 @@ PATCH"#,
+FOO
bar
"#;
let expected = ApplyPatchFileUpdate {
unified_diff: expected_diff.to_string(),
content: "FOO\nbar\nbaz\n".to_string(),
};
assert_eq!(expected, diff);
assert_eq!(expected_diff, diff);
}
#[test]
@@ -988,11 +963,7 @@ PATCH"#,
-baz
+BAZ
"#;
let expected = ApplyPatchFileUpdate {
unified_diff: expected_diff.to_string(),
content: "foo\nbar\nBAZ\n".to_string(),
};
assert_eq!(expected, diff);
assert_eq!(expected_diff, diff);
}
#[test]
@@ -1022,11 +993,7 @@ PATCH"#,
baz
+quux
"#;
let expected = ApplyPatchFileUpdate {
unified_diff: expected_diff.to_string(),
content: "foo\nbar\nbaz\nquux\n".to_string(),
};
assert_eq!(expected, diff);
assert_eq!(expected_diff, diff);
}
#[test]
@@ -1065,7 +1032,7 @@ PATCH"#,
let diff = unified_diff_from_chunks(&path, chunks).unwrap();
let expected_diff = r#"@@ -1,6 +1,7 @@
let expected = r#"@@ -1,6 +1,7 @@
a
-b
+B
@@ -1077,11 +1044,6 @@ PATCH"#,
+g
"#;
let expected = ApplyPatchFileUpdate {
unified_diff: expected_diff.to_string(),
content: "a\nB\nc\nd\nE\nf\ng\n".to_string(),
};
assert_eq!(expected, diff);
let mut stdout = Vec::new();

View File

@@ -1,25 +1,18 @@
[package]
name = "codex-cli"
version = { workspace = true }
version = "0.1.0"
edition = "2021"
[[bin]]
name = "codex"
path = "src/main.rs"
[[bin]]
name = "codex-linux-sandbox"
path = "src/linux-sandbox/main.rs"
[lib]
name = "codex_cli"
path = "src/lib.rs"
[dependencies]
anyhow = "1"
clap = { version = "4", features = ["derive"] }
codex-core = { path = "../core" }
codex-exec = { path = "../exec" }
codex-interactive = { path = "../interactive" }
codex-repl = { path = "../repl" }
codex-tui = { path = "../tui" }
serde_json = "1"

View File

@@ -1,37 +0,0 @@
//! `debug landlock` implementation for the Codex CLI.
//!
//! On Linux the command is executed inside a Landlock + seccomp sandbox by
//! calling the low-level `exec_linux` helper from `codex_core::linux`.
use codex_core::protocol::SandboxPolicy;
use std::os::unix::process::ExitStatusExt;
use std::process;
use std::process::Command;
use std::process::ExitStatus;
/// Execute `command` in a Linux sandbox (Landlock + seccomp) the way Codex
/// would.
pub fn run_landlock(command: Vec<String>, sandbox_policy: SandboxPolicy) -> anyhow::Result<()> {
if command.is_empty() {
anyhow::bail!("command args are empty");
}
// Spawn a new thread and apply the sandbox policies there.
let handle = std::thread::spawn(move || -> anyhow::Result<ExitStatus> {
codex_core::linux::apply_sandbox_policy_to_current_thread(sandbox_policy)?;
let status = Command::new(&command[0]).args(&command[1..]).status()?;
Ok(status)
});
let status = handle
.join()
.map_err(|e| anyhow::anyhow!("Failed to join thread: {e:?}"))??;
// Use ExitStatus to derive the exit code.
if let Some(code) = status.code() {
process::exit(code);
} else if let Some(signal) = status.signal() {
process::exit(128 + signal);
} else {
process::exit(1);
}
}

View File

@@ -1,47 +0,0 @@
#[cfg(target_os = "linux")]
pub mod landlock;
pub mod proto;
pub mod seatbelt;
use clap::Parser;
use codex_core::protocol::SandboxPolicy;
use codex_core::SandboxPermissionOption;
#[derive(Debug, Parser)]
pub struct SeatbeltCommand {
/// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR)
#[arg(long = "full-auto", default_value_t = false)]
pub full_auto: bool,
#[clap(flatten)]
pub sandbox: SandboxPermissionOption,
/// Full command args to run under seatbelt.
#[arg(trailing_var_arg = true)]
pub command: Vec<String>,
}
#[derive(Debug, Parser)]
pub struct LandlockCommand {
/// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR)
#[arg(long = "full-auto", default_value_t = false)]
pub full_auto: bool,
#[clap(flatten)]
pub sandbox: SandboxPermissionOption,
/// Full command args to run under landlock.
#[arg(trailing_var_arg = true)]
pub command: Vec<String>,
}
pub fn create_sandbox_policy(full_auto: bool, sandbox: SandboxPermissionOption) -> SandboxPolicy {
if full_auto {
SandboxPolicy::new_full_auto_policy()
} else {
match sandbox.permissions.map(Into::into) {
Some(sandbox_policy) => sandbox_policy,
None => SandboxPolicy::new_read_only_policy(),
}
}
}

View File

@@ -1,22 +0,0 @@
#[cfg(not(target_os = "linux"))]
fn main() -> anyhow::Result<()> {
eprintln!("codex-linux-sandbox is not supported on this platform.");
std::process::exit(1);
}
#[cfg(target_os = "linux")]
fn main() -> anyhow::Result<()> {
use clap::Parser;
use codex_cli::create_sandbox_policy;
use codex_cli::landlock;
use codex_cli::LandlockCommand;
let LandlockCommand {
full_auto,
sandbox,
command,
} = LandlockCommand::parse();
let sandbox_policy = create_sandbox_policy(full_auto, sandbox);
landlock::run_landlock(command, sandbox_policy)?;
Ok(())
}

View File

@@ -1,10 +1,13 @@
mod proto;
mod seatbelt;
use std::path::PathBuf;
use clap::ArgAction;
use clap::Parser;
use codex_cli::create_sandbox_policy;
use codex_cli::proto;
use codex_cli::seatbelt;
use codex_cli::LandlockCommand;
use codex_cli::SeatbeltCommand;
use codex_core::SandboxModeCliArg;
use codex_exec::Cli as ExecCli;
use codex_interactive::Cli as InteractiveCli;
use codex_repl::Cli as ReplCli;
use codex_tui::Cli as TuiCli;
@@ -22,7 +25,7 @@ use crate::proto::ProtoCli;
)]
struct MultitoolCli {
#[clap(flatten)]
interactive: TuiCli,
interactive: InteractiveCli,
#[clap(subcommand)]
subcommand: Option<Subcommand>,
@@ -34,6 +37,10 @@ enum Subcommand {
#[clap(visible_alias = "e")]
Exec(ExecCli),
/// Run the TUI.
#[clap(visible_alias = "t")]
Tui(TuiCli),
/// Run the REPL.
#[clap(visible_alias = "r")]
Repl(ReplCli),
@@ -56,9 +63,21 @@ struct DebugArgs {
enum DebugCommand {
/// Run a command under Seatbelt (macOS only).
Seatbelt(SeatbeltCommand),
}
/// Run a command under Landlock+seccomp (Linux only).
Landlock(LandlockCommand),
#[derive(Debug, Parser)]
struct SeatbeltCommand {
/// Writable folder for sandbox in full-auto mode (can be specified multiple times).
#[arg(long = "writable-root", short = 'w', value_name = "DIR", action = ArgAction::Append, use_value_delimiter = false)]
writable_roots: Vec<PathBuf>,
/// Configure the process restrictions for the command.
#[arg(long = "sandbox", short = 's')]
sandbox_policy: SandboxModeCliArg,
/// Full command args to run under seatbelt.
#[arg(trailing_var_arg = true)]
command: Vec<String>,
}
#[derive(Debug, Parser)]
@@ -70,11 +89,14 @@ async fn main() -> anyhow::Result<()> {
match cli.subcommand {
None => {
codex_tui::run_main(cli.interactive)?;
codex_interactive::run_main(cli.interactive).await?;
}
Some(Subcommand::Exec(exec_cli)) => {
codex_exec::run_main(exec_cli).await?;
}
Some(Subcommand::Tui(tui_cli)) => {
codex_tui::run_main(tui_cli)?;
}
Some(Subcommand::Repl(repl_cli)) => {
codex_repl::run_main(repl_cli).await?;
}
@@ -84,24 +106,10 @@ async fn main() -> anyhow::Result<()> {
Some(Subcommand::Debug(debug_args)) => match debug_args.cmd {
DebugCommand::Seatbelt(SeatbeltCommand {
command,
sandbox,
full_auto,
sandbox_policy,
writable_roots,
}) => {
let sandbox_policy = create_sandbox_policy(full_auto, sandbox);
seatbelt::run_seatbelt(command, sandbox_policy).await?;
}
#[cfg(target_os = "linux")]
DebugCommand::Landlock(LandlockCommand {
command,
sandbox,
full_auto,
}) => {
let sandbox_policy = create_sandbox_policy(full_auto, sandbox);
codex_cli::landlock::run_landlock(command, sandbox_policy)?;
}
#[cfg(not(target_os = "linux"))]
DebugCommand::Landlock(_) => {
anyhow::bail!("Landlock is only supported on Linux.");
seatbelt::run_seatbelt(command, sandbox_policy.into(), writable_roots).await?;
}
},
}

View File

@@ -1,11 +1,13 @@
use codex_core::exec::create_seatbelt_command;
use codex_core::protocol::SandboxPolicy;
use std::path::PathBuf;
pub async fn run_seatbelt(
pub(crate) async fn run_seatbelt(
command: Vec<String>,
sandbox_policy: SandboxPolicy,
writable_roots: Vec<PathBuf>,
) -> anyhow::Result<()> {
let seatbelt_command = create_seatbelt_command(command, &sandbox_policy);
let seatbelt_command = create_seatbelt_command(command, sandbox_policy, &writable_roots);
let status = tokio::process::Command::new(seatbelt_command[0].clone())
.args(&seatbelt_command[1..])
.spawn()

View File

@@ -21,7 +21,6 @@ fs-err = "3.1.0"
futures = "0.3"
mime_guess = "2.0"
patch = "0.7"
path-absolutize = "3.1.1"
rand = "0.9"
reqwest = { version = "0.12", features = ["json", "stream"] }
serde = { version = "1", features = ["derive"] }

View File

@@ -1,16 +1,14 @@
//! Standard type to use with the `--approval-mode` CLI option.
//! Available when the `cli` feature is enabled for the crate.
use std::path::PathBuf;
use clap::ArgAction;
use clap::Parser;
use clap::ValueEnum;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPermission;
use crate::protocol::SandboxPolicy;
use serde::Deserialize;
use serde::Serialize;
#[derive(Clone, Copy, Debug, ValueEnum)]
#[derive(Clone, Copy, Debug, ValueEnum, Serialize, Deserialize)]
#[value(rename_all = "kebab-case")]
pub enum ApprovalModeCliArg {
/// Run all commands without asking for user approval.
@@ -28,6 +26,19 @@ pub enum ApprovalModeCliArg {
Never,
}
#[derive(Clone, Copy, Debug, ValueEnum, Serialize, Deserialize)]
#[value(rename_all = "kebab-case")]
pub enum SandboxModeCliArg {
/// Network syscalls will be blocked
NetworkRestricted,
/// Filesystem writes will be restricted
FileWriteRestricted,
/// Network and filesystem writes will be restricted
NetworkAndFileWriteRestricted,
/// No restrictions; full "unsandboxed" mode
DangerousNoRestrictions,
}
impl From<ApprovalModeCliArg> for AskForApproval {
fn from(value: ApprovalModeCliArg) -> Self {
match value {
@@ -38,83 +49,15 @@ impl From<ApprovalModeCliArg> for AskForApproval {
}
}
#[derive(Parser, Debug)]
pub struct SandboxPermissionOption {
/// Specify this flag multiple times to specify the full set of permissions
/// to grant to Codex.
///
/// ```shell
/// codex -s disk-full-read-access \
/// -s disk-write-cwd \
/// -s disk-write-platform-user-temp-folder \
/// -s disk-write-platform-global-temp-folder
/// ```
///
/// Note disk-write-folder takes a value:
///
/// ```shell
/// -s disk-write-folder=$HOME/.pyenv/shims
/// ```
///
/// These permissions are quite broad and should be used with caution:
///
/// ```shell
/// -s disk-full-write-access
/// -s network-full-access
/// ```
#[arg(long = "sandbox-permission", short = 's', action = ArgAction::Append, value_parser = parse_sandbox_permission)]
pub permissions: Option<Vec<SandboxPermission>>,
}
/// Custom value-parser so we can keep the CLI surface small *and*
/// still handle the parameterised `disk-write-folder` case.
fn parse_sandbox_permission(raw: &str) -> std::io::Result<SandboxPermission> {
let base_path = std::env::current_dir()?;
parse_sandbox_permission_with_base_path(raw, base_path)
}
pub(crate) fn parse_sandbox_permission_with_base_path(
raw: &str,
base_path: PathBuf,
) -> std::io::Result<SandboxPermission> {
use SandboxPermission::*;
if let Some(path) = raw.strip_prefix("disk-write-folder=") {
return if path.is_empty() {
Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"--sandbox-permission disk-write-folder=<PATH> requires a non-empty PATH",
))
} else {
use path_absolutize::*;
let file = PathBuf::from(path);
let absolute_path = if file.is_relative() {
file.absolutize_from(base_path)
} else {
file.absolutize()
impl From<SandboxModeCliArg> for SandboxPolicy {
fn from(value: SandboxModeCliArg) -> Self {
match value {
SandboxModeCliArg::NetworkRestricted => SandboxPolicy::NetworkRestricted,
SandboxModeCliArg::FileWriteRestricted => SandboxPolicy::FileWriteRestricted,
SandboxModeCliArg::NetworkAndFileWriteRestricted => {
SandboxPolicy::NetworkAndFileWriteRestricted
}
.map(|path| path.into_owned())?;
Ok(DiskWriteFolder {
folder: absolute_path,
})
};
}
match raw {
"disk-full-read-access" => Ok(DiskFullReadAccess),
"disk-write-platform-user-temp-folder" => Ok(DiskWritePlatformUserTempFolder),
"disk-write-platform-global-temp-folder" => Ok(DiskWritePlatformGlobalTempFolder),
"disk-write-cwd" => Ok(DiskWriteCwd),
"disk-full-write-access" => Ok(DiskFullWriteAccess),
"network-full-access" => Ok(NetworkFullAccess),
_ => Err(
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!(
"`{raw}` is not a recognised permission.\nRun with `--help` to see the accepted values."
),
)
),
SandboxModeCliArg::DangerousNoRestrictions => SandboxPolicy::DangerousNoRestrictions,
}
}
}

View File

@@ -3,6 +3,8 @@ use std::collections::HashSet;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use std::process::Stdio;
use std::sync::Arc;
use std::sync::Mutex;
@@ -861,7 +863,7 @@ async fn handle_function_call(
assess_command_safety(
&params.command,
sess.approval_policy,
&sess.sandbox_policy,
sess.sandbox_policy,
&state.approved_commands,
)
};
@@ -916,11 +918,14 @@ async fn handle_function_call(
)
.await;
let roots_snapshot = { sess.writable_roots.lock().unwrap().clone() };
let output_result = process_exec_tool_call(
params.clone(),
sandbox_type,
&roots_snapshot,
sess.ctrl_c.clone(),
&sess.sandbox_policy,
sess.sandbox_policy,
)
.await;
@@ -1003,13 +1008,16 @@ async fn handle_function_call(
)
.await;
let retry_roots = { sess.writable_roots.lock().unwrap().clone() };
// This is an escalated retry; the policy will not be
// examined and the sandbox has been set to `None`.
let retry_output_result = process_exec_tool_call(
params.clone(),
SandboxType::None,
&retry_roots,
sess.ctrl_c.clone(),
&sess.sandbox_policy,
sess.sandbox_policy,
)
.await;
@@ -1338,7 +1346,6 @@ fn convert_apply_patch_to_protocol(
ApplyPatchFileChange::Update {
unified_diff,
move_path,
new_content: _new_content,
} => FileChange::Update {
unified_diff: unified_diff.clone(),
move_path: move_path.clone(),
@@ -1393,10 +1400,28 @@ fn apply_changes_from_apply_patch(
deleted.push(path.clone());
}
ApplyPatchFileChange::Update {
unified_diff: _unified_diff,
unified_diff,
move_path,
new_content,
} => {
// TODO(mbolin): `patch` is not guaranteed to be available.
// Allegedly macOS provides it, but minimal Linux installs
// might omit it.
Command::new("patch")
.arg(path)
.arg("-p0")
.stdout(Stdio::null())
.stderr(Stdio::null())
.stdin(Stdio::piped())
.spawn()
.and_then(|mut child| {
let mut stdin = child.stdin.take().unwrap();
stdin.write_all(unified_diff.as_bytes())?;
stdin.flush()?;
// Drop stdin to send EOF.
drop(stdin);
child.wait()
})
.with_context(|| format!("Failed to apply patch to {}", path.display()))?;
if let Some(move_path) = move_path {
if let Some(parent) = move_path.parent() {
if !parent.as_os_str().is_empty() {
@@ -1408,14 +1433,11 @@ fn apply_changes_from_apply_patch(
})?;
}
}
std::fs::rename(path, move_path)
.with_context(|| format!("Failed to rename file {}", path.display()))?;
std::fs::write(move_path, new_content)?;
modified.push(move_path.clone());
deleted.push(path.clone());
} else {
std::fs::write(path, new_content)?;
modified.push(path.clone());
}
}

View File

@@ -15,7 +15,10 @@ use tokio::sync::Notify;
/// Returns the wrapped [`Codex`] **and** the `SessionInitialized` event that
/// is received as a response to the initial `ConfigureSession` submission so
/// that callers can surface the information to the UI.
pub async fn init_codex(config: Config) -> anyhow::Result<(CodexWrapper, Event, Arc<Notify>)> {
pub async fn init_codex(
config: Config,
disable_response_storage: bool,
) -> anyhow::Result<(CodexWrapper, Event, Arc<Notify>)> {
let ctrl_c = notify_on_sigint();
let codex = CodexWrapper::new(Codex::spawn(ctrl_c.clone())?);
let init_id = codex
@@ -24,7 +27,7 @@ pub async fn init_codex(config: Config) -> anyhow::Result<(CodexWrapper, Event,
instructions: config.instructions.clone(),
approval_policy: config.approval_policy,
sandbox_policy: config.sandbox_policy,
disable_response_storage: config.disable_response_storage,
disable_response_storage,
})
.await?;

View File

@@ -1,7 +1,5 @@
use crate::approval_mode_cli_arg::parse_sandbox_permission_with_base_path;
use crate::flags::OPENAI_DEFAULT_MODEL;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPermission;
use crate::protocol::SandboxPolicy;
use dirs::home_dir;
use serde::Deserialize;
@@ -13,53 +11,60 @@ use std::path::PathBuf;
const EMBEDDED_INSTRUCTIONS: &str = include_str!("../prompt.md");
/// Application configuration loaded from disk and merged with overrides.
#[derive(Debug, Clone)]
#[derive(Deserialize, Debug, Clone)]
pub struct Config {
/// Optional override of model selection.
#[serde(default = "default_model")]
pub model: String,
/// Approval policy for executing commands.
pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy,
/// Disable server-side response storage (sends the full conversation
/// context with every request). Currently necessary for OpenAI customers
/// who have opted into Zero Data Retention (ZDR).
pub disable_response_storage: bool,
/// System instructions.
pub instructions: Option<String>,
}
/// Base config deserialized from ~/.codex/config.toml.
#[derive(Deserialize, Debug, Clone, Default)]
pub struct ConfigToml {
/// Optional override of model selection.
pub model: Option<String>,
/// Default approval policy for executing commands.
pub approval_policy: Option<AskForApproval>,
// The `default` attribute ensures that the field is treated as `None` when
// the key is omitted from the TOML. Without it, Serde treats the field as
// required because we supply a custom deserializer.
#[serde(default, deserialize_with = "deserialize_sandbox_permissions")]
pub sandbox_permissions: Option<Vec<SandboxPermission>>,
/// Disable server-side response storage (sends the full conversation
/// context with every request). Currently necessary for OpenAI customers
/// who have opted into Zero Data Retention (ZDR).
pub disable_response_storage: Option<bool>,
#[serde(default)]
pub approval_policy: AskForApproval,
#[serde(default)]
pub sandbox_policy: SandboxPolicy,
/// System instructions.
pub instructions: Option<String>,
}
impl ConfigToml {
/// Attempt to parse the file at `~/.codex/config.toml`. If it does not
/// exist, return a default config. Though if it exists and cannot be
/// parsed, report that to the user and force them to fix it.
/// Optional overrides for user configuration (e.g., from CLI flags).
#[derive(Default, Debug, Clone)]
pub struct ConfigOverrides {
pub model: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub sandbox_policy: Option<SandboxPolicy>,
}
impl Config {
/// Load configuration, optionally applying overrides (CLI flags). Merges
/// ~/.codex/config.toml, ~/.codex/instructions.md, embedded defaults, and
/// any values provided in `overrides` (highest precedence).
pub fn load_with_overrides(overrides: ConfigOverrides) -> std::io::Result<Self> {
let mut cfg: Config = Self::load_from_toml()?;
tracing::warn!("Config parsed from config.toml: {cfg:?}");
// Instructions: user-provided instructions.md > embedded default.
cfg.instructions =
Self::load_instructions().or_else(|| Some(EMBEDDED_INSTRUCTIONS.to_string()));
// Destructure ConfigOverrides fully to ensure all overrides are applied.
let ConfigOverrides {
model,
approval_policy,
sandbox_policy,
} = overrides;
if let Some(model) = model {
cfg.model = model;
}
if let Some(approval_policy) = approval_policy {
cfg.approval_policy = approval_policy;
}
if let Some(sandbox_policy) = sandbox_policy {
cfg.sandbox_policy = sandbox_policy;
}
Ok(cfg)
}
/// Attempt to parse the file at `~/.codex/config.toml` into a Config.
fn load_from_toml() -> std::io::Result<Self> {
let config_toml_path = codex_dir()?.join("config.toml");
match std::fs::read_to_string(&config_toml_path) {
@@ -69,7 +74,7 @@ impl ConfigToml {
}),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
tracing::info!("config.toml not found, using defaults");
Ok(Self::default())
Ok(Self::load_default_config())
}
Err(e) => {
tracing::error!("Failed to read config.toml: {e}");
@@ -77,92 +82,17 @@ impl ConfigToml {
}
}
}
}
fn deserialize_sandbox_permissions<'de, D>(
deserializer: D,
) -> Result<Option<Vec<SandboxPermission>>, D::Error>
where
D: serde::Deserializer<'de>,
{
let permissions: Option<Vec<String>> = Option::deserialize(deserializer)?;
match permissions {
Some(raw_permissions) => {
let base_path = codex_dir().map_err(serde::de::Error::custom)?;
let converted = raw_permissions
.into_iter()
.map(|raw| {
parse_sandbox_permission_with_base_path(&raw, base_path.clone())
.map_err(serde::de::Error::custom)
})
.collect::<Result<Vec<_>, D::Error>>()?;
Ok(Some(converted))
}
None => Ok(None),
}
}
/// Optional overrides for user configuration (e.g., from CLI flags).
#[derive(Default, Debug, Clone)]
pub struct ConfigOverrides {
pub model: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub sandbox_policy: Option<SandboxPolicy>,
pub disable_response_storage: Option<bool>,
}
impl Config {
/// Load configuration, optionally applying overrides (CLI flags). Merges
/// ~/.codex/config.toml, ~/.codex/instructions.md, embedded defaults, and
/// any values provided in `overrides` (highest precedence).
pub fn load_with_overrides(overrides: ConfigOverrides) -> std::io::Result<Self> {
let cfg: ConfigToml = ConfigToml::load_from_toml()?;
tracing::warn!("Config parsed from config.toml: {cfg:?}");
Ok(Self::load_from_base_config_with_overrides(cfg, overrides))
/// Meant to be used exclusively for tests: load_with_overrides() should be
/// used in all other cases.
pub fn load_default_config_for_test() -> Self {
Self::load_default_config()
}
fn load_from_base_config_with_overrides(cfg: ConfigToml, overrides: ConfigOverrides) -> Self {
// Instructions: user-provided instructions.md > embedded default.
let instructions =
Self::load_instructions().or_else(|| Some(EMBEDDED_INSTRUCTIONS.to_string()));
// Destructure ConfigOverrides fully to ensure all overrides are applied.
let ConfigOverrides {
model,
approval_policy,
sandbox_policy,
disable_response_storage,
} = overrides;
let sandbox_policy = match sandbox_policy {
Some(sandbox_policy) => sandbox_policy,
None => {
// Derive a SandboxPolicy from the permissions in the config.
match cfg.sandbox_permissions {
// Note this means the user can explicitly set permissions
// to the empty list in the config file, granting it no
// permissions whatsoever.
Some(permissions) => SandboxPolicy::from(permissions),
// Default to read only rather than completely locked down.
None => SandboxPolicy::new_read_only_policy(),
}
}
};
Self {
model: model.or(cfg.model).unwrap_or_else(default_model),
approval_policy: approval_policy
.or(cfg.approval_policy)
.unwrap_or_else(AskForApproval::default),
sandbox_policy,
disable_response_storage: disable_response_storage
.or(cfg.disable_response_storage)
.unwrap_or(false),
instructions,
}
fn load_default_config() -> Self {
// Load from an empty string to exercise #[serde(default)] to
// get the default values for each field.
toml::from_str::<Self>("").expect("empty string should parse as TOML")
}
fn load_instructions() -> Option<String> {
@@ -170,15 +100,6 @@ impl Config {
p.push("instructions.md");
std::fs::read_to_string(&p).ok()
}
/// Meant to be used exclusively for tests: `load_with_overrides()` should
/// be used in all other cases.
pub fn load_default_config_for_test() -> Self {
Self::load_from_base_config_with_overrides(
ConfigToml::default(),
ConfigOverrides::default(),
)
}
}
fn default_model() -> String {
@@ -205,60 +126,3 @@ pub fn log_dir() -> std::io::Result<PathBuf> {
p.push("log");
Ok(p)
}
#[cfg(test)]
mod tests {
use super::*;
/// Verify that the `sandbox_permissions` field on `ConfigToml` correctly
/// differentiates between a value that is completely absent in the
/// provided TOML (i.e. `None`) and one that is explicitly specified as an
/// empty array (i.e. `Some(vec![])`). This ensures that downstream logic
/// that treats these two cases differently (default read-only policy vs a
/// fully locked-down sandbox) continues to function.
#[test]
fn test_sandbox_permissions_none_vs_empty_vec() {
// Case 1: `sandbox_permissions` key is *absent* from the TOML source.
let toml_source_without_key = "";
let cfg_without_key: ConfigToml = toml::from_str(toml_source_without_key)
.expect("TOML deserialization without key should succeed");
assert!(cfg_without_key.sandbox_permissions.is_none());
// Case 2: `sandbox_permissions` is present but set to an *empty array*.
let toml_source_with_empty = "sandbox_permissions = []";
let cfg_with_empty: ConfigToml = toml::from_str(toml_source_with_empty)
.expect("TOML deserialization with empty array should succeed");
assert_eq!(Some(vec![]), cfg_with_empty.sandbox_permissions);
// Case 3: `sandbox_permissions` contains a non-empty list of valid values.
let toml_source_with_values = r#"
sandbox_permissions = ["disk-full-read-access", "network-full-access"]
"#;
let cfg_with_values: ConfigToml = toml::from_str(toml_source_with_values)
.expect("TOML deserialization with valid permissions should succeed");
assert_eq!(
Some(vec![
SandboxPermission::DiskFullReadAccess,
SandboxPermission::NetworkFullAccess
]),
cfg_with_values.sandbox_permissions
);
}
/// Deserializing a TOML string containing an *invalid* permission should
/// fail with a helpful error rather than silently defaulting or
/// succeeding.
#[test]
fn test_sandbox_permissions_illegal_value() {
let toml_bad = r#"sandbox_permissions = ["not-a-real-permission"]"#;
let err = toml::from_str::<ConfigToml>(toml_bad)
.expect_err("Deserialization should fail for invalid permission");
// Make sure the error message contains the invalid value so users have
// useful feedback.
let msg = err.to_string();
assert!(msg.contains("not-a-real-permission"));
}
}

View File

@@ -1,6 +1,7 @@
use std::io;
#[cfg(target_family = "unix")]
use std::os::unix::process::ExitStatusExt;
use std::path::PathBuf;
use std::process::ExitStatus;
use std::process::Stdio;
use std::sync::Arc;
@@ -32,13 +33,7 @@ const DEFAULT_TIMEOUT_MS: u64 = 10_000;
const SIGKILL_CODE: i32 = 9;
const TIMEOUT_CODE: i32 = 64;
const MACOS_SEATBELT_BASE_POLICY: &str = include_str!("seatbelt_base_policy.sbpl");
/// When working with `sandbox-exec`, only consider `sandbox-exec` in `/usr/bin`
/// to defend against an attacker trying to inject a malicious version on the
/// PATH. If /usr/bin/sandbox-exec has been tampered with, then the attacker
/// already has root access.
const MACOS_PATH_TO_SEATBELT_EXECUTABLE: &str = "/usr/bin/sandbox-exec";
const MACOS_SEATBELT_READONLY_POLICY: &str = include_str!("seatbelt_readonly_policy.sbpl");
#[derive(Deserialize, Debug, Clone)]
pub struct ExecParams {
@@ -66,17 +61,19 @@ pub enum SandboxType {
#[cfg(target_os = "linux")]
async fn exec_linux(
params: ExecParams,
writable_roots: &[PathBuf],
ctrl_c: Arc<Notify>,
sandbox_policy: &SandboxPolicy,
sandbox_policy: SandboxPolicy,
) -> Result<RawExecToolCallOutput> {
crate::linux::exec_linux(params, ctrl_c, sandbox_policy).await
crate::linux::exec_linux(params, writable_roots, ctrl_c, sandbox_policy).await
}
#[cfg(not(target_os = "linux"))]
async fn exec_linux(
_params: ExecParams,
_writable_roots: &[PathBuf],
_ctrl_c: Arc<Notify>,
_sandbox_policy: &SandboxPolicy,
_sandbox_policy: SandboxPolicy,
) -> Result<RawExecToolCallOutput> {
Err(CodexErr::Io(io::Error::new(
io::ErrorKind::InvalidInput,
@@ -87,8 +84,9 @@ async fn exec_linux(
pub async fn process_exec_tool_call(
params: ExecParams,
sandbox_type: SandboxType,
writable_roots: &[PathBuf],
ctrl_c: Arc<Notify>,
sandbox_policy: &SandboxPolicy,
sandbox_policy: SandboxPolicy,
) -> Result<ExecToolCallOutput> {
let start = Instant::now();
@@ -100,7 +98,7 @@ pub async fn process_exec_tool_call(
workdir,
timeout_ms,
} = params;
let seatbelt_command = create_seatbelt_command(command, sandbox_policy);
let seatbelt_command = create_seatbelt_command(command, sandbox_policy, writable_roots);
exec(
ExecParams {
command: seatbelt_command,
@@ -111,7 +109,9 @@ pub async fn process_exec_tool_call(
)
.await
}
SandboxType::LinuxSeccomp => exec_linux(params, ctrl_c, sandbox_policy).await,
SandboxType::LinuxSeccomp => {
exec_linux(params, writable_roots, ctrl_c, sandbox_policy).await
}
};
let duration = start.elapsed();
match raw_output_result {
@@ -156,61 +156,41 @@ pub async fn process_exec_tool_call(
pub fn create_seatbelt_command(
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
sandbox_policy: SandboxPolicy,
writable_roots: &[PathBuf],
) -> Vec<String> {
let (file_write_policy, extra_cli_args) = {
if sandbox_policy.has_full_disk_write_access() {
// Allegedly, this is more permissive than `(allow file-write*)`.
(
r#"(allow file-write* (regex #"^/"))"#.to_string(),
Vec::<String>::new(),
)
} else {
let writable_roots = sandbox_policy.get_writable_roots();
let (writable_folder_policies, cli_args): (Vec<String>, Vec<String>) = writable_roots
.iter()
.enumerate()
.map(|(index, root)| {
let param_name = format!("WRITABLE_ROOT_{index}");
let policy: String = format!("(subpath (param \"{param_name}\"))");
let cli_arg = format!("-D{param_name}={}", root.to_string_lossy());
(policy, cli_arg)
})
.unzip();
if writable_folder_policies.is_empty() {
("".to_string(), Vec::<String>::new())
} else {
let file_write_policy = format!(
"(allow file-write*\n{}\n)",
writable_folder_policies.join(" ")
);
(file_write_policy, cli_args)
}
}
};
let file_read_policy = if sandbox_policy.has_full_disk_read_access() {
"; allow read-only file operations\n(allow file-read*)"
} else {
""
};
let (policies, cli_args): (Vec<String>, Vec<String>) = writable_roots
.iter()
.enumerate()
.map(|(index, root)| {
let param_name = format!("WRITABLE_ROOT_{index}");
let policy: String = format!("(subpath (param \"{param_name}\"))");
let cli_arg = format!("-D{param_name}={}", root.to_string_lossy());
(policy, cli_arg)
})
.unzip();
// TODO(ragona): The seatbelt policy should reflect the SandboxPolicy that
// is passed, but everything is currently hardcoded to use
// MACOS_SEATBELT_READONLY_POLICY.
// TODO(mbolin): apply_patch calls must also honor the SandboxPolicy.
let network_policy = if sandbox_policy.has_full_network_access() {
"(allow network-outbound)\n(allow network-inbound)\n(allow system-socket)"
if !matches!(sandbox_policy, SandboxPolicy::NetworkRestricted) {
tracing::error!("specified sandbox policy {sandbox_policy:?} will not be honroed");
}
let full_policy = if policies.is_empty() {
MACOS_SEATBELT_READONLY_POLICY.to_string()
} else {
""
let scoped_write_policy = format!("(allow file-write*\n{}\n)", policies.join(" "));
format!("{MACOS_SEATBELT_READONLY_POLICY}\n{scoped_write_policy}")
};
let full_policy = format!(
"{MACOS_SEATBELT_BASE_POLICY}\n{file_read_policy}\n{file_write_policy}\n{network_policy}"
);
let mut seatbelt_command: Vec<String> = vec![
MACOS_PATH_TO_SEATBELT_EXECUTABLE.to_string(),
"sandbox-exec".to_string(),
"-p".to_string(),
full_policy,
full_policy.to_string(),
];
seatbelt_command.extend(extra_cli_args);
seatbelt_command.extend(cli_args);
seatbelt_command.push("--".to_string());
seatbelt_command.extend(command);
seatbelt_command

View File

@@ -14,7 +14,7 @@ pub mod exec;
mod flags;
mod is_safe_command;
#[cfg(target_os = "linux")]
pub mod linux;
mod linux;
mod models;
pub mod protocol;
mod safety;
@@ -28,4 +28,4 @@ mod approval_mode_cli_arg;
#[cfg(feature = "cli")]
pub use approval_mode_cli_arg::ApprovalModeCliArg;
#[cfg(feature = "cli")]
pub use approval_mode_cli_arg::SandboxPermissionOption;
pub use approval_mode_cli_arg::SandboxModeCliArg;

View File

@@ -32,13 +32,14 @@ use tokio::sync::Notify;
pub async fn exec_linux(
params: ExecParams,
writable_roots: &[PathBuf],
ctrl_c: Arc<Notify>,
sandbox_policy: &SandboxPolicy,
sandbox_policy: SandboxPolicy,
) -> Result<RawExecToolCallOutput> {
// Allow READ on /
// Allow WRITE on /dev/null
let ctrl_c_copy = ctrl_c.clone();
let sandbox_policy = sandbox_policy.clone();
let writable_roots_copy = writable_roots.to_vec();
// Isolate thread to run the sandbox from
let tool_call_output = std::thread::spawn(move || {
@@ -48,7 +49,14 @@ pub async fn exec_linux(
.expect("Failed to create runtime");
rt.block_on(async {
apply_sandbox_policy_to_current_thread(sandbox_policy)?;
if sandbox_policy.is_network_restricted() {
install_network_seccomp_filter_on_current_thread()?;
}
if sandbox_policy.is_file_write_restricted() {
install_filesystem_landlock_rules_on_current_thread(writable_roots_copy)?;
}
exec(params, ctrl_c_copy).await
})
})
@@ -64,30 +72,6 @@ pub async fn exec_linux(
}
}
/// Apply sandbox policies inside this thread so only the child inherits
/// them, not the entire CLI process.
pub fn apply_sandbox_policy_to_current_thread(sandbox_policy: SandboxPolicy) -> Result<()> {
if !sandbox_policy.has_full_network_access() {
install_network_seccomp_filter_on_current_thread()?;
}
if !sandbox_policy.has_full_disk_write_access() {
let writable_roots = sandbox_policy.get_writable_roots();
install_filesystem_landlock_rules_on_current_thread(writable_roots)?;
}
// TODO(ragona): Add appropriate restrictions if
// `sandbox_policy.has_full_disk_read_access()` is `false`.
Ok(())
}
/// Installs Landlock file-system rules on the current thread allowing read
/// access to the entire file-system while restricting write access to
/// `/dev/null` and the provided list of `writable_roots`.
///
/// # Errors
/// Returns [`CodexErr::Sandbox`] variants when the ruleset fails to apply.
fn install_filesystem_landlock_rules_on_current_thread(writable_roots: Vec<PathBuf>) -> Result<()> {
let abi = ABI::V5;
let access_rw = AccessFs::from_all(abi);
@@ -114,8 +98,6 @@ fn install_filesystem_landlock_rules_on_current_thread(writable_roots: Vec<PathB
Ok(())
}
/// Installs a seccomp filter that blocks outbound network access except for
/// AF_UNIX domain sockets.
fn install_network_seccomp_filter_on_current_thread() -> std::result::Result<(), SandboxErr> {
// Build rule map.
let mut rules: BTreeMap<i64, Vec<SeccompRule>> = BTreeMap::new();
@@ -192,14 +174,15 @@ mod tests_linux {
workdir: None,
timeout_ms: Some(timeout_ms),
};
let sandbox_policy =
SandboxPolicy::new_read_only_policy_with_writable_roots(writable_roots);
let ctrl_c = Arc::new(Notify::new());
let res =
process_exec_tool_call(params, SandboxType::LinuxSeccomp, ctrl_c, &sandbox_policy)
.await
.unwrap();
let res = process_exec_tool_call(
params,
SandboxType::LinuxSeccomp,
writable_roots,
Arc::new(Notify::new()),
SandboxPolicy::NetworkAndFileWriteRestricted,
)
.await
.unwrap();
if res.exit_code != 0 {
println!("stdout:\n{}", res.stdout);
@@ -242,9 +225,7 @@ mod tests_linux {
&format!("echo blah > {}", file_path.to_string_lossy()),
],
&[tmpdir.path().to_path_buf()],
// We have seen timeouts when running this test in CI on GitHub,
// so we are using a generous timeout until we can diagnose further.
1_000,
500,
)
.await;
}
@@ -268,11 +249,14 @@ mod tests_linux {
timeout_ms: Some(2_000),
};
let sandbox_policy = SandboxPolicy::new_read_only_policy();
let ctrl_c = Arc::new(Notify::new());
let result =
process_exec_tool_call(params, SandboxType::LinuxSeccomp, ctrl_c, &sandbox_policy)
.await;
let result = process_exec_tool_call(
params,
SandboxType::LinuxSeccomp,
&[],
Arc::new(Notify::new()),
SandboxPolicy::NetworkRestricted,
)
.await;
let (exit_code, stdout, stderr) = match result {
Ok(output) => (output.exit_code, output.stdout, output.stderr),

View File

@@ -93,159 +93,44 @@ pub enum AskForApproval {
}
/// Determines execution restrictions for model shell commands
#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct SandboxPolicy {
permissions: Vec<SandboxPermission>,
}
impl From<Vec<SandboxPermission>> for SandboxPolicy {
fn from(permissions: Vec<SandboxPermission>) -> Self {
Self { permissions }
}
pub enum SandboxPolicy {
/// Network syscalls will be blocked
NetworkRestricted,
/// Filesystem writes will be restricted
FileWriteRestricted,
/// Network and filesystem writes will be restricted
#[default]
NetworkAndFileWriteRestricted,
/// No restrictions; full "unsandboxed" mode
DangerousNoRestrictions,
}
impl SandboxPolicy {
pub fn new_read_only_policy() -> Self {
Self {
permissions: vec![SandboxPermission::DiskFullReadAccess],
pub fn is_dangerous(&self) -> bool {
match self {
SandboxPolicy::NetworkRestricted => false,
SandboxPolicy::FileWriteRestricted => false,
SandboxPolicy::NetworkAndFileWriteRestricted => false,
SandboxPolicy::DangerousNoRestrictions => true,
}
}
pub fn new_read_only_policy_with_writable_roots(writable_roots: &[PathBuf]) -> Self {
let mut permissions = Self::new_read_only_policy().permissions;
permissions.extend(writable_roots.iter().map(|folder| {
SandboxPermission::DiskWriteFolder {
folder: folder.clone(),
}
}));
Self { permissions }
pub fn is_network_restricted(&self) -> bool {
matches!(
self,
SandboxPolicy::NetworkRestricted | SandboxPolicy::NetworkAndFileWriteRestricted
)
}
pub fn new_full_auto_policy() -> Self {
Self {
permissions: vec![
SandboxPermission::DiskFullReadAccess,
SandboxPermission::DiskWritePlatformUserTempFolder,
SandboxPermission::DiskWriteCwd,
],
}
}
pub fn has_full_disk_read_access(&self) -> bool {
self.permissions
.iter()
.any(|perm| matches!(perm, SandboxPermission::DiskFullReadAccess))
}
pub fn has_full_disk_write_access(&self) -> bool {
self.permissions
.iter()
.any(|perm| matches!(perm, SandboxPermission::DiskFullWriteAccess))
}
pub fn has_full_network_access(&self) -> bool {
self.permissions
.iter()
.any(|perm| matches!(perm, SandboxPermission::NetworkFullAccess))
}
pub fn get_writable_roots(&self) -> Vec<PathBuf> {
let mut writable_roots = Vec::<PathBuf>::new();
for perm in &self.permissions {
use SandboxPermission::*;
match perm {
DiskWritePlatformUserTempFolder => {
if cfg!(target_os = "macos") {
if let Some(tempdir) = std::env::var_os("TMPDIR") {
// Likely something that starts with /var/folders/...
let tmpdir_path = PathBuf::from(&tempdir);
if tmpdir_path.is_absolute() {
writable_roots.push(tmpdir_path.clone());
match tmpdir_path.canonicalize() {
Ok(canonicalized) => {
// Likely something that starts with /private/var/folders/...
if canonicalized != tmpdir_path {
writable_roots.push(canonicalized);
}
}
Err(e) => {
tracing::error!("Failed to canonicalize TMPDIR: {e}");
}
}
} else {
tracing::error!("TMPDIR is not an absolute path: {tempdir:?}");
}
}
}
// For Linux, should this be XDG_RUNTIME_DIR, /run/user/<uid>, or something else?
}
DiskWritePlatformGlobalTempFolder => {
if cfg!(unix) {
writable_roots.push(PathBuf::from("/tmp"));
}
}
DiskWriteCwd => match std::env::current_dir() {
Ok(cwd) => writable_roots.push(cwd),
Err(err) => {
tracing::error!("Failed to get current working directory: {err}");
}
},
DiskWriteFolder { folder } => {
writable_roots.push(folder.clone());
}
DiskFullReadAccess | NetworkFullAccess => {}
DiskFullWriteAccess => {
// Currently, we expect callers to only invoke this method
// after verifying has_full_disk_write_access() is false.
}
}
}
writable_roots
}
pub fn is_unrestricted(&self) -> bool {
self.has_full_disk_read_access()
&& self.has_full_disk_write_access()
&& self.has_full_network_access()
pub fn is_file_write_restricted(&self) -> bool {
matches!(
self,
SandboxPolicy::FileWriteRestricted | SandboxPolicy::NetworkAndFileWriteRestricted
)
}
}
/// Permissions that should be granted to the sandbox in which the agent
/// operates.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum SandboxPermission {
/// Is allowed to read all files on disk.
DiskFullReadAccess,
/// Is allowed to write to the operating system's temp dir that
/// is restricted to the user the agent is running as. For
/// example, on macOS, this is generally something under
/// `/var/folders` as opposed to `/tmp`.
DiskWritePlatformUserTempFolder,
/// Is allowed to write to the operating system's shared temp
/// dir. On UNIX, this is generally `/tmp`.
DiskWritePlatformGlobalTempFolder,
/// Is allowed to write to the current working directory (in practice, this
/// is the `cwd` where `codex` was spawned).
DiskWriteCwd,
/// Is allowed to the specified folder. `PathBuf` must be an
/// absolute path, though it is up to the caller to canonicalize
/// it if the path contains symlinks.
DiskWriteFolder { folder: PathBuf },
/// Is allowed to write to any file on disk.
DiskFullWriteAccess,
/// Can make arbitrary network requests.
NetworkFullAccess,
}
/// User input
#[non_exhaustive]
#[derive(Debug, Clone, Deserialize, Serialize)]

View File

@@ -65,7 +65,7 @@ pub fn assess_patch_safety(
pub fn assess_command_safety(
command: &[String],
approval_policy: AskForApproval,
sandbox_policy: &SandboxPolicy,
sandbox_policy: SandboxPolicy,
approved: &HashSet<Vec<String>>,
) -> SafetyCheck {
let approve_without_sandbox = || SafetyCheck::AutoApprove {
@@ -81,10 +81,11 @@ pub fn assess_command_safety(
}
// Command was not known-safe or allow-listed
if sandbox_policy.is_unrestricted() {
approve_without_sandbox()
} else {
match get_platform_sandbox() {
match sandbox_policy {
// Only the dangerous sandbox policy will run arbitrary commands outside a sandbox
SandboxPolicy::DangerousNoRestrictions => approve_without_sandbox(),
// All other policies try to run the command in a sandbox if it is available
_ => match get_platform_sandbox() {
// We have a sandbox, so we can approve the command in all modes
Some(sandbox_type) => SafetyCheck::AutoApprove { sandbox_type },
None => {
@@ -98,7 +99,7 @@ pub fn assess_command_safety(
_ => SafetyCheck::AskUser,
}
}
}
},
}
}

View File

@@ -6,6 +6,9 @@
; start with closed-by-default
(deny default)
; allow read-only file operations
(allow file-read*)
; child processes inherit the policy of their parent
(allow process-exec)
(allow process-fork)

View File

@@ -55,7 +55,7 @@ async fn spawn_codex() -> Codex {
model: config.model,
instructions: None,
approval_policy: config.approval_policy,
sandbox_policy: SandboxPolicy::new_read_only_policy(),
sandbox_policy: SandboxPolicy::NetworkAndFileWriteRestricted,
disable_response_storage: false,
},
})

View File

@@ -95,7 +95,7 @@ async fn keeps_previous_response_id_between_tasks() {
model: config.model,
instructions: None,
approval_policy: config.approval_policy,
sandbox_policy: SandboxPolicy::new_read_only_policy(),
sandbox_policy: SandboxPolicy::NetworkAndFileWriteRestricted,
disable_response_storage: false,
},
})

View File

@@ -78,7 +78,7 @@ async fn retries_on_early_close() {
model: config.model,
instructions: None,
approval_policy: config.approval_policy,
sandbox_policy: SandboxPolicy::new_read_only_policy(),
sandbox_policy: SandboxPolicy::NetworkAndFileWriteRestricted,
disable_response_storage: false,
},
})

View File

@@ -1,6 +1,6 @@
[package]
name = "codex-exec"
version = { workspace = true }
version = "0.1.0"
edition = "2021"
[[bin]]
@@ -13,11 +13,8 @@ path = "src/lib.rs"
[dependencies]
anyhow = "1"
chrono = "0.4.40"
clap = { version = "4", features = ["derive"] }
codex-core = { path = "../core", features = ["cli"] }
owo-colors = "4.2.0"
shlex = "1.3.0"
tokio = { version = "1", features = [
"io-std",
"macros",

View File

@@ -1,9 +1,10 @@
use clap::Parser;
use clap::ValueEnum;
use codex_core::SandboxPermissionOption;
use codex_core::SandboxModeCliArg;
use std::path::PathBuf;
#[derive(Parser, Debug)]
/// Command-line interface for the non-interactive `codex-exec` agent.
///
#[derive(Parser, Debug, Clone)]
#[command(version)]
pub struct Cli {
/// Optional image(s) to attach to the initial prompt.
@@ -14,12 +15,11 @@ pub struct Cli {
#[arg(long, short = 'm')]
pub model: Option<String>,
/// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR)
#[arg(long = "full-auto", default_value_t = false)]
pub full_auto: bool,
#[clap(flatten)]
pub sandbox: SandboxPermissionOption,
/// Configure the process restrictions when a command is executed.
///
/// Uses OS-specific sandboxing tools; Seatbelt on OSX, landlock+seccomp on Linux.
#[arg(long = "sandbox", short = 's')]
pub sandbox_policy: Option<SandboxModeCliArg>,
/// Allow running Codex outside a Git repository.
#[arg(long = "skip-git-repo-check", default_value_t = false)]
@@ -29,19 +29,38 @@ pub struct Cli {
#[arg(long = "disable-response-storage", default_value_t = false)]
pub disable_response_storage: bool,
/// Specifies color settings for use in the output.
#[arg(long = "color", value_enum, default_value_t = Color::Auto)]
pub color: Color,
/// Initial instructions for the agent.
pub prompt: String,
pub prompt: Option<String>,
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, ValueEnum)]
#[value(rename_all = "kebab-case")]
pub enum Color {
Always,
Never,
#[default]
Auto,
impl Cli {
/// This is effectively the opposite of Clap; we want the ability to take
/// a structured `Cli` object, and then pass it to a binary as argv[].
pub fn to_args(&self) -> Vec<String> {
let mut args = Vec::new();
for img in &self.images {
args.push("--image".into());
args.push(img.to_string_lossy().into_owned());
}
if let Some(model) = &self.model {
args.push("--model".into());
args.push(model.clone());
}
if self.skip_git_repo_check {
args.push("--skip-git-repo-check".into());
}
if self.disable_response_storage {
args.push("--disable-response-storage".into());
}
if let Some(prompt) = &self.prompt {
args.push(prompt.clone());
}
args
}
}

View File

@@ -1,307 +0,0 @@
use chrono::Utc;
use codex_core::protocol::Event;
use codex_core::protocol::EventMsg;
use codex_core::protocol::FileChange;
use owo_colors::OwoColorize;
use owo_colors::Style;
use shlex::try_join;
use std::collections::HashMap;
/// This should be configurable. When used in CI, users may not want to impose
/// a limit so they can see the full transcript.
const MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL: usize = 20;
pub(crate) struct EventProcessor {
call_id_to_command: HashMap<String, ExecCommandBegin>,
call_id_to_patch: HashMap<String, PatchApplyBegin>,
// To ensure that --color=never is respected, ANSI escapes _must_ be added
// using .style() with one of these fields. If you need a new style, add a
// new field here.
bold: Style,
dimmed: Style,
magenta: Style,
red: Style,
green: Style,
}
impl EventProcessor {
pub(crate) fn create_with_ansi(with_ansi: bool) -> Self {
let call_id_to_command = HashMap::new();
let call_id_to_patch = HashMap::new();
if with_ansi {
Self {
call_id_to_command,
call_id_to_patch,
bold: Style::new().bold(),
dimmed: Style::new().dimmed(),
magenta: Style::new().magenta(),
red: Style::new().red(),
green: Style::new().green(),
}
} else {
Self {
call_id_to_command,
call_id_to_patch,
bold: Style::new(),
dimmed: Style::new(),
magenta: Style::new(),
red: Style::new(),
green: Style::new(),
}
}
}
}
struct ExecCommandBegin {
command: Vec<String>,
start_time: chrono::DateTime<Utc>,
}
struct PatchApplyBegin {
start_time: chrono::DateTime<Utc>,
auto_approved: bool,
}
macro_rules! ts_println {
($($arg:tt)*) => {{
let now = Utc::now();
let formatted = now.format("%Y-%m-%dT%H:%M:%S").to_string();
print!("[{}] ", formatted);
println!($($arg)*);
}};
}
impl EventProcessor {
pub(crate) fn process_event(&mut self, event: Event) {
let Event { id, msg } = event;
match msg {
EventMsg::Error { message } => {
let prefix = "ERROR:".style(self.red);
ts_println!("{prefix} {message}");
}
EventMsg::BackgroundEvent { message } => {
ts_println!("{}", message.style(self.dimmed));
}
EventMsg::TaskStarted => {
let msg = format!("Task started: {id}");
ts_println!("{}", msg.style(self.dimmed));
}
EventMsg::TaskComplete => {
let msg = format!("Task complete: {id}");
ts_println!("{}", msg.style(self.bold));
}
EventMsg::AgentMessage { message } => {
let prefix = "Agent message:".style(self.bold);
ts_println!("{prefix} {message}");
}
EventMsg::ExecCommandBegin {
call_id,
command,
cwd,
} => {
self.call_id_to_command.insert(
call_id.clone(),
ExecCommandBegin {
command: command.clone(),
start_time: Utc::now(),
},
);
ts_println!(
"{} {} in {}",
"exec".style(self.magenta),
escape_command(&command).style(self.bold),
cwd,
);
}
EventMsg::ExecCommandEnd {
call_id,
stdout,
stderr,
exit_code,
} => {
let exec_command = self.call_id_to_command.remove(&call_id);
let (duration, call) = if let Some(ExecCommandBegin {
command,
start_time,
}) = exec_command
{
(
format_duration(start_time),
format!("{}", escape_command(&command).style(self.bold)),
)
} else {
("".to_string(), format!("exec('{call_id}')"))
};
let output = if exit_code == 0 { stdout } else { stderr };
let truncated_output = output
.lines()
.take(MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL)
.collect::<Vec<_>>()
.join("\n");
match exit_code {
0 => {
let title = format!("{call} succeded{duration}:");
ts_println!("{}", title.style(self.green));
}
_ => {
let title = format!("{call} exited {exit_code}{duration}:");
ts_println!("{}", title.style(self.red));
}
}
println!("{}", truncated_output.style(self.dimmed));
}
EventMsg::PatchApplyBegin {
call_id,
auto_approved,
changes,
} => {
// Store metadata so we can calculate duration later when we
// receive the corresponding PatchApplyEnd event.
self.call_id_to_patch.insert(
call_id.clone(),
PatchApplyBegin {
start_time: Utc::now(),
auto_approved,
},
);
ts_println!(
"{} auto_approved={}:",
"apply_patch".style(self.magenta),
auto_approved,
);
// Pretty-print the patch summary with colored diff markers so
// its easy to scan in the terminal output.
for (path, change) in changes.iter() {
match change {
FileChange::Add { content } => {
let header = format!(
"{} {}",
format_file_change(change),
path.to_string_lossy()
);
println!("{}", header.style(self.magenta));
for line in content.lines() {
println!("{}", line.style(self.green));
}
}
FileChange::Delete => {
let header = format!(
"{} {}",
format_file_change(change),
path.to_string_lossy()
);
println!("{}", header.style(self.magenta));
}
FileChange::Update {
unified_diff,
move_path,
} => {
let header = if let Some(dest) = move_path {
format!(
"{} {} -> {}",
format_file_change(change),
path.to_string_lossy(),
dest.to_string_lossy()
)
} else {
format!("{} {}", format_file_change(change), path.to_string_lossy())
};
println!("{}", header.style(self.magenta));
// Colorize diff lines. We keep file header lines
// (--- / +++) without extra coloring so they are
// still readable.
for diff_line in unified_diff.lines() {
if diff_line.starts_with('+') && !diff_line.starts_with("+++") {
println!("{}", diff_line.style(self.green));
} else if diff_line.starts_with('-')
&& !diff_line.starts_with("---")
{
println!("{}", diff_line.style(self.red));
} else {
println!("{diff_line}");
}
}
}
}
}
}
EventMsg::PatchApplyEnd {
call_id,
stdout,
stderr,
success,
} => {
let patch_begin = self.call_id_to_patch.remove(&call_id);
// Compute duration and summary label similar to exec commands.
let (duration, label) = if let Some(PatchApplyBegin {
start_time,
auto_approved,
}) = patch_begin
{
(
format_duration(start_time),
format!("apply_patch(auto_approved={})", auto_approved),
)
} else {
(String::new(), format!("apply_patch('{call_id}')"))
};
let (exit_code, output, title_style) = if success {
(0, stdout, self.green)
} else {
(1, stderr, self.red)
};
let title = format!("{label} exited {exit_code}{duration}:");
ts_println!("{}", title.style(title_style));
for line in output.lines() {
println!("{}", line.style(self.dimmed));
}
}
EventMsg::ExecApprovalRequest { .. } => {
// Should we exit?
}
EventMsg::ApplyPatchApprovalRequest { .. } => {
// Should we exit?
}
_ => {
// Ignore event.
}
}
}
}
fn escape_command(command: &[String]) -> String {
try_join(command.iter().map(|s| s.as_str())).unwrap_or_else(|_| command.join(" "))
}
fn format_file_change(change: &FileChange) -> &'static str {
match change {
FileChange::Add { .. } => "A",
FileChange::Delete => "D",
FileChange::Update {
move_path: Some(_), ..
} => "R",
FileChange::Update {
move_path: None, ..
} => "M",
}
}
fn format_duration(start_time: chrono::DateTime<Utc>) -> String {
let elapsed = Utc::now().signed_duration_since(start_time);
let millis = elapsed.num_milliseconds();
if millis < 1000 {
format!(" in {}ms", millis)
} else {
format!(" in {:.2}s", millis as f64 / 1000.0)
}
}

View File

@@ -1,7 +1,4 @@
mod cli;
mod event_processor;
use std::io::IsTerminal;
use std::sync::Arc;
pub use cli::Cli;
@@ -11,79 +8,58 @@ use codex_core::config::ConfigOverrides;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::Event;
use codex_core::protocol::EventMsg;
use codex_core::protocol::FileChange;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_core::protocol::SandboxPolicy;
use codex_core::util::is_inside_git_repo;
use event_processor::EventProcessor;
use owo_colors::OwoColorize;
use owo_colors::Style;
use tracing::debug;
use tracing::error;
use tracing::info;
use tracing_subscriber::EnvFilter;
pub async fn run_main(cli: Cli) -> anyhow::Result<()> {
let Cli {
images,
model,
full_auto,
sandbox,
skip_git_repo_check,
disable_response_storage,
color,
prompt,
} = cli;
let (stdout_with_ansi, stderr_with_ansi) = match color {
cli::Color::Always => (true, true),
cli::Color::Never => (false, false),
cli::Color::Auto => (
std::io::stdout().is_terminal(),
std::io::stderr().is_terminal(),
),
};
assert_api_key(stderr_with_ansi);
if !skip_git_repo_check && !is_inside_git_repo() {
eprintln!("Not inside a Git repo and --skip-git-repo-check was not specified.");
std::process::exit(1);
}
// TODO(mbolin): Take a more thoughtful approach to logging.
let default_level = "error";
let allow_ansi = true;
let _ = tracing_subscriber::fmt()
.with_env_filter(
EnvFilter::try_from_default_env()
.or_else(|_| EnvFilter::try_new(default_level))
.unwrap(),
)
.with_ansi(stderr_with_ansi)
.with_ansi(allow_ansi)
.with_writer(std::io::stderr)
.try_init();
let sandbox_policy = if full_auto {
Some(SandboxPolicy::new_full_auto_policy())
} else {
sandbox.permissions.clone().map(Into::into)
};
let Cli {
images,
model,
sandbox_policy,
skip_git_repo_check,
disable_response_storage,
prompt,
..
} = cli;
if !skip_git_repo_check && !is_inside_git_repo() {
eprintln!("Not inside a Git repo and --skip-git-repo-check was not specified.");
std::process::exit(1);
} else if images.is_empty() && prompt.is_none() {
eprintln!("No images or prompt specified.");
std::process::exit(1);
}
// Load configuration and determine approval policy
let overrides = ConfigOverrides {
model,
model: model.clone(),
// This CLI is intended to be headless and has no affordances for asking
// the user for approval.
approval_policy: Some(AskForApproval::Never),
sandbox_policy,
disable_response_storage: if disable_response_storage {
Some(true)
} else {
None
},
sandbox_policy: sandbox_policy.map(Into::into),
};
let config = Config::load_with_overrides(overrides)?;
let (codex_wrapper, event, ctrl_c) = codex_wrapper::init_codex(config).await?;
let (codex_wrapper, event, ctrl_c) =
codex_wrapper::init_codex(config, disable_response_storage).await?;
let codex = Arc::new(codex_wrapper);
info!("Codex initialized with event: {event:?}");
@@ -109,6 +85,7 @@ pub async fn run_main(cli: Cli) -> anyhow::Result<()> {
res = codex.next_event() => match res {
Ok(event) => {
debug!("Received event: {event:?}");
process_event(&event);
if let Err(e) = tx.send(event) {
error!("Error sending event: {e:?}");
break;
@@ -124,8 +101,8 @@ pub async fn run_main(cli: Cli) -> anyhow::Result<()> {
});
}
// Send images first, if any.
if !images.is_empty() {
// Send images first.
let items: Vec<InputItem> = images
.into_iter()
.map(|path| InputItem::LocalImage { path })
@@ -139,56 +116,101 @@ pub async fn run_main(cli: Cli) -> anyhow::Result<()> {
}
}
// Send the prompt.
let items: Vec<InputItem> = vec![InputItem::Text { text: prompt }];
let initial_prompt_task_id = codex.submit(Op::UserInput { items }).await?;
info!("Sent prompt with event ID: {initial_prompt_task_id}");
// Run the loop until the task is complete.
let mut event_processor = EventProcessor::create_with_ansi(stdout_with_ansi);
while let Some(event) = rx.recv().await {
let last_event =
event.id == initial_prompt_task_id && matches!(event.msg, EventMsg::TaskComplete);
event_processor.process_event(event);
if last_event {
break;
if let Some(prompt) = prompt {
// Send the prompt.
let items: Vec<InputItem> = vec![InputItem::Text { text: prompt }];
let initial_prompt_task_id = codex.submit(Op::UserInput { items }).await?;
info!("Sent prompt with event ID: {initial_prompt_task_id}");
while let Some(event) = rx.recv().await {
if event.id == initial_prompt_task_id && matches!(event.msg, EventMsg::TaskComplete) {
break;
}
}
}
Ok(())
}
/// If a valid API key is not present in the environment, print an error to
/// stderr and exits with 1; otherwise, does nothing.
fn assert_api_key(stderr_with_ansi: bool) {
if !has_api_key() {
let (msg_style, var_style, url_style) = if stderr_with_ansi {
(
Style::new().red(),
Style::new().bold(),
Style::new().bold().underline(),
)
} else {
(Style::new(), Style::new(), Style::new())
};
eprintln!(
"\n{msg}\n\nSet the environment variable {var} and re-run this command.\nYou can create a key here: {url}\n",
msg = "Missing OpenAI API key.".style(msg_style),
var = "OPENAI_API_KEY".style(var_style),
url = "https://platform.openai.com/account/api-keys".style(url_style),
);
std::process::exit(1);
fn process_event(event: &Event) {
let Event { id, msg } = event;
match msg {
EventMsg::Error { message } => {
println!("Error: {message}");
}
EventMsg::BackgroundEvent { .. } => {
// Ignore these for now.
}
EventMsg::TaskStarted => {
println!("Task started: {id}");
}
EventMsg::TaskComplete => {
println!("Task complete: {id}");
}
EventMsg::AgentMessage { message } => {
println!("Agent message: {message}");
}
EventMsg::ExecCommandBegin {
call_id,
command,
cwd,
} => {
println!("exec('{call_id}'): {:?} in {cwd}", command);
}
EventMsg::ExecCommandEnd {
call_id,
stdout,
stderr,
exit_code,
} => {
let output = if *exit_code == 0 { stdout } else { stderr };
let truncated_output = output.lines().take(5).collect::<Vec<_>>().join("\n");
println!("exec('{call_id}') exited {exit_code}:\n{truncated_output}");
}
EventMsg::PatchApplyBegin {
call_id,
auto_approved,
changes,
} => {
let changes = changes
.iter()
.map(|(path, change)| {
format!("{} {}", format_file_change(change), path.to_string_lossy())
})
.collect::<Vec<_>>()
.join("\n");
println!("apply_patch('{call_id}') auto_approved={auto_approved}:\n{changes}");
}
EventMsg::PatchApplyEnd {
call_id,
stdout,
stderr,
success,
} => {
let (exit_code, output) = if *success { (0, stdout) } else { (1, stderr) };
let truncated_output = output.lines().take(5).collect::<Vec<_>>().join("\n");
println!("apply_patch('{call_id}') exited {exit_code}:\n{truncated_output}");
}
EventMsg::ExecApprovalRequest { .. } => {
// Should we exit?
}
EventMsg::ApplyPatchApprovalRequest { .. } => {
// Should we exit?
}
_ => {
// Ignore event.
}
}
}
/// Returns `true` if a recognized API key is present in the environment.
///
/// At present we only support `OPENAI_API_KEY`, mirroring the behavior of the
/// Node-based `codex-cli`. Additional providers can be added here when the
/// Rust implementation gains first-class support for them.
fn has_api_key() -> bool {
std::env::var("OPENAI_API_KEY")
.map(|s| !s.trim().is_empty())
.unwrap_or(false)
fn format_file_change(change: &FileChange) -> &'static str {
match change {
FileChange::Add { .. } => "A",
FileChange::Delete => "D",
FileChange::Update {
move_path: Some(_), ..
} => "R",
FileChange::Update {
move_path: None, ..
} => "M",
}
}

View File

@@ -0,0 +1,24 @@
[package]
name = "codex-interactive"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "codex-interactive"
path = "src/main.rs"
[lib]
name = "codex_interactive"
path = "src/lib.rs"
[dependencies]
anyhow = "1"
clap = { version = "4", features = ["derive"] }
codex-core = { path = "../core", features = ["cli"] }
tokio = { version = "1", features = [
"io-std",
"macros",
"process",
"rt-multi-thread",
"signal",
] }

View File

@@ -0,0 +1,33 @@
use clap::Parser;
use codex_core::ApprovalModeCliArg;
use codex_core::SandboxModeCliArg;
use std::path::PathBuf;
#[derive(Parser, Debug, Clone)]
#[command(version)]
pub struct Cli {
/// Optional image(s) to attach to the initial prompt.
#[arg(long = "image", short = 'i', value_name = "FILE", value_delimiter = ',', num_args = 1..)]
pub images: Vec<PathBuf>,
/// Model the agent should use.
#[arg(long, short = 'm')]
pub model: Option<String>,
/// Configure when the model requires human approval before executing a command.
#[arg(long = "ask-for-approval", short = 'a', value_enum, default_value_t = ApprovalModeCliArg::OnFailure)]
pub approval_policy: ApprovalModeCliArg,
/// Configure the process restrictions when a command is executed.
///
/// Uses OS-specific sandboxing tools; Seatbelt on OSX, landlock+seccomp on Linux.
#[arg(long = "sandbox", short = 's')]
pub sandbox_policy: Option<SandboxModeCliArg>,
/// Allow running Codex outside a Git repository.
#[arg(long = "skip-git-repo-check", default_value_t = false)]
pub skip_git_repo_check: bool,
/// Initial instructions for the agent.
pub prompt: Option<String>,
}

View File

@@ -0,0 +1,7 @@
mod cli;
pub use cli::Cli;
pub async fn run_main(_cli: Cli) -> anyhow::Result<()> {
eprintln!("Interactive mode is not implemented yet.");
std::process::exit(1);
}

View File

@@ -0,0 +1,11 @@
use clap::Parser;
use codex_interactive::run_main;
use codex_interactive::Cli;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
run_main(cli).await?;
Ok(())
}

View File

@@ -1,6 +1,6 @@
[package]
name = "codex-repl"
version = { workspace = true }
version = "0.1.0"
edition = "2021"
[[bin]]
@@ -25,4 +25,4 @@ tokio = { version = "1", features = [
"signal",
] }
tracing = { version = "0.1.41", features = ["log"] }
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }

View File

@@ -1,11 +1,13 @@
use clap::ArgAction;
use clap::Parser;
use clap::ValueEnum;
use codex_core::ApprovalModeCliArg;
use codex_core::SandboxPermissionOption;
use codex_core::SandboxModeCliArg;
use std::path::PathBuf;
/// Commandline arguments.
#[derive(Debug, Parser)]
/// Command-line interface for the interactive `codex-repl` agent.
#[derive(Debug, Parser, Clone)]
#[command(
author,
version,
@@ -37,12 +39,11 @@ pub struct Cli {
#[arg(long = "ask-for-approval", short = 'a')]
pub approval_policy: Option<ApprovalModeCliArg>,
/// Convenience alias for low-friction sandboxed automatic execution (-a on-failure, network-disabled sandbox that can write to cwd and TMPDIR)
#[arg(long = "full-auto", default_value_t = false)]
pub full_auto: bool,
#[clap(flatten)]
pub sandbox: SandboxPermissionOption,
/// Configure the process restrictions when a command is executed.
///
/// Uses OS-specific sandboxing tools; Seatbelt on OSX, landlock+seccomp on Linux.
#[arg(long = "sandbox", short = 's')]
pub sandbox_policy: Option<SandboxModeCliArg>,
/// Allow running Codex outside a Git repository. By default the CLI
/// aborts early when the current working directory is **not** inside a
@@ -63,3 +64,70 @@ pub struct Cli {
#[arg(short = 'E', long)]
pub record_events: Option<PathBuf>,
}
impl Cli {
/// This is effectively the opposite of Clap; we want the ability to take
/// a structured `Cli` object, and then pass it to a binary as argv[].
pub fn to_args(&self) -> Vec<String> {
let mut args = vec![];
if let Some(model) = &self.model {
args.push("--model".into());
args.push(model.clone());
}
for img in &self.images {
args.push("--image".into());
args.push(img.to_string_lossy().into_owned());
}
if self.no_ansi {
args.push("--no-ansi".into());
}
for _ in 0..self.verbose {
args.push("-v".into());
}
args.push("--ask-for-approval".into());
args.push(
self.approval_policy
.to_possible_value()
.expect("foo")
.get_name()
.to_string(),
);
args.push("--sandbox".into());
args.push(
self.sandbox_policy
.to_possible_value()
.expect("foo")
.get_name()
.to_string(),
);
if self.allow_no_git_exec {
args.push("--allow-no-git-exec".into());
}
if self.disable_response_storage {
args.push("--disable-response-storage".into());
}
if let Some(path) = &self.record_submissions {
args.push("--record-submissions".into());
args.push(path.to_string_lossy().into_owned());
}
if let Some(path) = &self.record_events {
args.push("--record-events".into());
args.push(path.to_string_lossy().into_owned());
}
if let Some(prompt) = &self.prompt {
args.push(prompt.clone());
}
args
}
}

View File

@@ -6,9 +6,7 @@ use std::sync::Arc;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::protocol;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::FileChange;
use codex_core::protocol::SandboxPolicy;
use codex_core::util::is_inside_git_repo;
use codex_core::util::notify_on_sigint;
use codex_core::Codex;
@@ -78,26 +76,11 @@ pub async fn run_main(cli: Cli) -> anyhow::Result<()> {
// Initialize logging before any other work so early errors are captured.
init_logger(cli.verbose, !cli.no_ansi);
let (sandbox_policy, approval_policy) = if cli.full_auto {
(
Some(SandboxPolicy::new_full_auto_policy()),
Some(AskForApproval::OnFailure),
)
} else {
let sandbox_policy = cli.sandbox.permissions.clone().map(Into::into);
(sandbox_policy, cli.approval_policy.map(Into::into))
};
// Load config file and apply CLI overrides (model & approval policy)
let overrides = ConfigOverrides {
model: cli.model.clone(),
approval_policy,
sandbox_policy,
disable_response_storage: if cli.disable_response_storage {
Some(true)
} else {
None
},
approval_policy: cli.approval_policy.map(Into::into),
sandbox_policy: cli.sandbox_policy.map(Into::into),
};
let config = Config::load_with_overrides(overrides)?;
@@ -121,7 +104,7 @@ async fn codex_main(cli: Cli, cfg: Config, ctrl_c: Arc<Notify>) -> anyhow::Resul
instructions: cfg.instructions,
approval_policy: cfg.approval_policy,
sandbox_policy: cfg.sandbox_policy,
disable_response_storage: cfg.disable_response_storage,
disable_response_storage: cli.disable_response_storage,
},
};

View File

@@ -0,0 +1,56 @@
[package]
name = "codex-session"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "codex-session"
path = "src/main.rs"
[lib]
name = "codex_session"
path = "src/lib.rs"
[dependencies]
anyhow = "1"
clap = { version = "4", features = ["derive"] }
codex-core = { path = "../core" }
tokio = { version = "1", features = [
"io-std",
"macros",
"process",
"rt-multi-thread",
"signal",
] }
tracing = { version = "0.1.41", features = ["log"] }
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
uuid = { version = "1", features = ["v4"] }
chrono = { version = "0.4", features = ["serde"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1"
dirs = "6"
sysinfo = "0.29"
tabwriter = "1.3"
names = { version = "0.14", default-features = false }
nix = { version = "0.28", default-features = false, features = ["process", "signal", "term", "fs"] }
petname = "2.0.2"
rand = "0.9.1"
# Re-use the codex-exec library for its CLI definition
codex_exec = { package = "codex-exec", path = "../exec" }
codex_repl = { package = "codex-repl", path = "../repl" }
humansize = "2.1.3"
command-group = { version = "5.0.1", features = ["with-tokio"] }
[dev-dependencies]
tempfile = "3"
[target.'cfg(unix)'.dependencies]
libc = "0.2"
[target.'cfg(windows)'.dependencies]
windows-sys = { version = "0.48", features = [
"Win32_Foundation",
"Win32_System_Console",
"Win32_System_Threading",
] }

18
codex-rs/session/build.rs Normal file
View File

@@ -0,0 +1,18 @@
// build.rs -- emit the current git commit so the code can embed it in the
// session metadata file.
fn main() {
// Try to run `git rev-parse HEAD` -- if that fails we fall back to
// "unknown" so the build does not break when the source is not a git
// repository (e.g., during `cargo publish`).
let git_sha = std::process::Command::new("git")
.args(["rev-parse", "HEAD"])
.output()
.ok()
.filter(|o| o.status.success())
.and_then(|o| String::from_utf8(o.stdout).ok())
.map(|s| s.trim().to_owned())
.unwrap_or_else(|| "unknown".into());
println!("cargo:rustc-env=GIT_SHA={git_sha}");
}

View File

@@ -0,0 +1,9 @@
//! Build-time information helpers (git commit hash, version, ...).
/// Return the git commit hash that was recorded at compile time via the
/// `build.rs` build-script. Falls back to the static string "unknown" when the
/// build script failed to determine the hash (e.g. when building from a
/// source tarball without the `.git` directory).
pub fn git_sha() -> &'static str {
env!("GIT_SHA")
}

477
codex-rs/session/src/cli.rs Normal file
View File

@@ -0,0 +1,477 @@
//! CLI command definitions and implementation for `codex-session`.
//!
//! The session manager can spawn two different Codex agent flavors:
//!
//! * `codex-exec` -- non-interactive single-turn agent
//! * `codex-repl` -- interactive multi-turn agent
//!
//! The `create` command therefore has mutually exclusive sub-commands so the appropriate
//! arguments can be forwarded to the underlying agent binaries.
use crate::meta::SessionMeta;
use crate::spawn;
use crate::store;
use anyhow::Context;
use anyhow::Result;
use chrono::SecondsFormat;
use clap::Args;
use clap::Parser;
use clap::Subcommand;
use petname::Generator;
use petname::Petnames;
use serde::Serialize;
#[cfg(unix)]
use codex_repl as _;
#[derive(Parser)]
#[command(
name = "codex-session",
about = "Manage background Codex agent sessions"
)]
pub struct Cli {
#[command(subcommand)]
cmd: Commands,
}
impl Cli {
pub async fn dispatch(self) -> Result<()> {
match self.cmd {
Commands::Create(x) => x.run().await,
Commands::Attach(x) => x.run().await,
Commands::Delete(x) => x.run().await,
Commands::Logs(x) => x.run().await,
Commands::List(x) => x.run().await,
Commands::Get(x) => x.run().await,
}
}
}
#[derive(Subcommand)]
enum Commands {
/// Spawn a new background session.
Create(CreateCmd),
/// Attach the current terminal to a running interactive session.
Attach(AttachCmd),
/// Terminate a session and remove its on-disk state.
Delete(DeleteCmd),
/// Show (and optionally follow) the stdout / stderr logs of a session.
Logs(LogsCmd),
/// List all known sessions.
List(ListCmd),
/// Print the raw metadata JSON for a session.
Get(GetCmd),
}
#[derive(Subcommand)]
enum AgentKind {
/// Non-interactive execution agent.
Exec(ExecCreateCmd),
/// Interactive Read-Eval-Print-Loop agent.
#[cfg(unix)]
Repl(ReplCreateCmd),
}
#[derive(Args)]
pub struct CreateCmd {
/// Explicit session name. If omitted, a memorable random one is generated.
#[arg(long)]
id: Option<String>,
#[command(subcommand)]
agent: AgentKind,
}
#[derive(Args)]
pub struct ExecCreateCmd {
#[clap(flatten)]
exec_cli: codex_exec::Cli,
}
#[cfg(unix)]
#[derive(Args)]
pub struct ReplCreateCmd {
#[clap(flatten)]
repl_cli: codex_repl::Cli,
}
impl CreateCmd {
pub async fn run(self) -> Result<()> {
let id = match &self.id {
Some(explicit) => explicit.clone(),
None => generate_session_id()?,
};
let paths = store::paths_for(&id)?;
// Prepare session directory *before* spawning the agent so stdout/
// stderr redirection works even when the child process itself fails
// immediately.
store::prepare_dirs(&paths)?;
// Spawn underlying agent.
//
// IMPORTANT: If the spawn call fails we end up with an empty (or
// almost empty) directory inside ~/.codex/sessions/. To avoid
// confusing stale entries we attempt to purge the directory before
// bubbling up the error to the caller.
//
// Capture the child PID *and* the full CLI config so we can persist it
// in the metadata file.
let spawn_result: Result<(
u32, // pid
Option<String>, // prompt preview
store::SessionKind, // kind
Vec<String>, // raw argv used to spawn the agent
)> = (|| match self.agent {
AgentKind::Exec(cmd) => {
let args = cmd.exec_cli.to_args();
let child = spawn::spawn_exec(&paths, &args)?;
let preview = cmd.exec_cli.prompt.as_ref().map(|p| truncate_preview(p));
Ok((
child.id().unwrap_or_default(),
preview,
store::SessionKind::Exec,
args.clone(),
))
}
#[cfg(unix)]
AgentKind::Repl(cmd) => {
let args = cmd.repl_cli.to_args();
let child = spawn::spawn_repl(&paths, &args)?;
let preview = cmd.repl_cli.prompt.as_ref().map(|p| truncate_preview(p));
Ok((
child.id().unwrap_or_default(),
preview,
store::SessionKind::Repl,
args.clone(),
))
}
})();
let (pid, prompt_preview, kind, argv) = match spawn_result {
Ok(tuple) => tuple,
Err(err) => {
// Best effort clean-up -- ignore failures so we don't mask the
// original spawn error.
let _ = store::purge(&id);
return Err(err);
}
};
// Persist metadata **after** the process has been spawned so we can record its PID.
let meta = SessionMeta::new(id.clone(), pid, kind, argv, prompt_preview);
store::write_meta(&paths, &meta)?;
println!("{id}");
Ok(())
}
}
fn truncate_preview(p: &str) -> String {
let slice: String = p.chars().take(40).collect();
if p.len() > 40 {
format!("{}...", slice)
} else {
slice
}
}
/// Generate a new unique session identifier.
///
/// We use the `petname` crate to create short, memorable names consisting of
/// two random words separated by a dash (e.g. "autumn-panda"). In the rare
/// event of a collision with an existing session directory we retry until we
/// find an unused ID.
fn generate_session_id() -> Result<String> {
let mut shortnames = Petnames::default();
shortnames.retain(|s| s.len() <= 5);
loop {
let id = shortnames
.generate_one(2, "-")
.context("failed to generate session ID")?;
if !store::paths_for(&id)?.dir.exists() {
return Ok(id);
}
}
}
#[derive(Args)]
pub struct AttachCmd {
/// Session selector (index, id or prefix) to attach to.
id: String,
/// Also print stderr stream in addition to stdout.
#[arg(long)]
stderr: bool,
}
impl AttachCmd {
pub async fn run(self) -> Result<()> {
let id = store::resolve_selector(&self.id)?;
let paths = store::paths_for(&id)?;
self.attach_line_oriented(&id, &paths).await
}
async fn attach_line_oriented(&self, id: &str, paths: &store::Paths) -> Result<()> {
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncWriteExt;
use tokio::time::sleep;
use tokio::time::Duration;
// Ensure stdin pipe exists.
if !paths.stdin.exists() {
anyhow::bail!("session '{id}' is not interactive (stdin pipe missing)");
}
// Open writer to the session's stdin pipe.
let mut pipe = tokio::fs::OpenOptions::new()
.write(true)
.open(&paths.stdin)
.await
.with_context(|| format!("failed to open stdin pipe for session '{id}'"))?;
// Log tailing setup
//
// Always open stdout so the select! branches below stay simple.
let file_out = tokio::fs::File::open(&paths.stdout).await?;
let mut reader_out = tokio::io::BufReader::new(file_out).lines();
// Conditionally open stderr if the user asked for it. Keeping the
// reader in an `Option` allows us to reuse the same select! loop -- the
// helper future simply parks forever when stderr is disabled.
let mut reader_err = if self.stderr {
let file_err = tokio::fs::File::open(&paths.stderr).await?;
Some(tokio::io::BufReader::new(file_err).lines())
} else {
None
};
let mut stdin_lines = tokio::io::BufReader::new(tokio::io::stdin()).lines();
loop {
tokio::select! {
// User supplied input (stdin -> session stdin pipe)
line = stdin_lines.next_line() => {
match line? {
Some(mut l) => {
l.push('\n');
pipe.write_all(l.as_bytes()).await?;
pipe.flush().await?;
}
// Ctrl-D -- end of interactive input
None => {
break;
}
}
}
// stdout updates
out_line = reader_out.next_line() => {
match out_line? {
Some(l) => println!("{l}"),
None => sleep(Duration::from_millis(200)).await,
}
}
// stderr updates (optional)
//
// To keep `tokio::select!` happy we always supply a branch -- when the
// user did *not* request stderr we hand it a future that will never
// finish (pending forever). This avoids `Option` juggling within the
// select! macro.
err_line = async {
if let Some(reader) = &mut reader_err {
reader.next_line().await
} else {
// Never resolves -- equivalent to `futures::future::pending()`
std::future::pending().await
}
} => {
if let Some(line) = err_line? {
// Use a visible prefix so users can distinguish the streams.
println!("[stderr] {line}");
} else {
sleep(Duration::from_millis(200)).await;
}
}
}
}
Ok(())
}
}
#[derive(Args)]
pub struct DeleteCmd {
id: String,
}
impl DeleteCmd {
pub async fn run(self) -> Result<()> {
let id = store::resolve_selector(&self.id)?;
store::kill_session(&id).await?;
store::purge(&id)?;
Ok(())
}
}
#[derive(Args)]
pub struct LogsCmd {
id: String,
#[arg(long)]
stderr: bool,
}
impl LogsCmd {
pub async fn run(self) -> Result<()> {
let id = store::resolve_selector(&self.id)?;
let paths = store::paths_for(&id)?;
let target = if self.stderr {
&paths.stderr
} else {
&paths.stdout
};
let file = tokio::fs::File::open(target).await?;
// Stream the complete file to stdout. Users can pipe to `tail -f`,
// `less +F`, etc. if they only want live updates.
tokio::io::copy(
&mut tokio::io::BufReader::new(file),
&mut tokio::io::stdout(),
)
.await?;
Ok(())
}
}
#[derive(Args)]
pub struct ListCmd {}
// -----------------------------------------------------------------------------
// get print metadata
// -----------------------------------------------------------------------------
#[derive(Args)]
pub struct GetCmd {
/// Session selector (index, id or prefix) to print metadata for.
id: String,
}
impl GetCmd {
pub async fn run(self) -> Result<()> {
// Re-use the same selector resolution that `attach`, `delete`, … use so users can refer
// to sessions by index or prefix.
let id = store::resolve_selector(&self.id)?;
let paths = store::paths_for(&id)?;
let bytes = std::fs::read(&paths.meta)
.with_context(|| format!("failed to read metadata for session '{id}'"))?;
// We *could* just write the file contents as-is but parsing + re-serialising guarantees
// the output is valid and nicely formatted even when the on-disk representation ever
// switches away from pretty-printed JSON.
let meta: SessionMeta =
serde_json::from_slice(&bytes).context("failed to deserialize session metadata")?;
let pretty = serde_json::to_string_pretty(&meta)?;
println!("{pretty}");
Ok(())
}
}
#[derive(Serialize)]
#[allow(missing_docs)]
pub struct StatusRow {
pub idx: usize,
pub id: String,
pub pid: u32,
pub kind: String,
pub status: String,
pub created: String,
pub prompt: String,
pub out: String,
pub err: String,
}
impl ListCmd {
pub async fn run(self) -> Result<()> {
use sysinfo::PidExt;
use sysinfo::SystemExt;
let metas = store::list_sessions_sorted()?;
let mut sys = sysinfo::System::new();
sys.refresh_processes();
let bytes_formatter = humansize::make_format(humansize::DECIMAL);
let rows: Vec<StatusRow> = metas
.into_iter()
.enumerate()
.map(|(idx, m)| {
let status = if m.pid == 0 {
"unknown"
} else if sys.process(sysinfo::Pid::from_u32(m.pid)).is_some() {
"running"
} else {
"exited"
};
let paths = store::paths_for(&m.id).ok();
let (out, err) = if let Some(p) = &paths {
let osz = std::fs::metadata(&p.stdout).map(|m| m.len()).unwrap_or(0);
let esz = std::fs::metadata(&p.stderr).map(|m| m.len()).unwrap_or(0);
(bytes_formatter(osz), bytes_formatter(esz))
} else {
("-".into(), "-".into())
};
StatusRow {
idx,
id: m.id,
pid: m.pid,
kind: format!("{:?}", m.kind).to_lowercase(),
status: status.into(),
created: m.created_at.to_rfc3339_opts(SecondsFormat::Secs, true),
prompt: m.prompt_preview.unwrap_or_default(),
out,
err,
}
})
.collect();
print_table(&rows)?;
Ok(())
}
}
pub fn print_table(rows: &[StatusRow]) -> Result<()> {
use std::io::Write;
use tabwriter::TabWriter;
let mut tw = TabWriter::new(Vec::new()).padding(2);
writeln!(tw, "#\tID\tPID\tTYPE\tSTATUS\tOUT\tERR\tCREATED\tPROMPT")?;
for r in rows {
writeln!(
tw,
"{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}",
r.idx, r.id, r.pid, r.kind, r.status, r.out, r.err, r.created, r.prompt
)?;
}
let out = String::from_utf8(tw.into_inner()?)?;
print!("{out}");
Ok(())
}

View File

@@ -0,0 +1,20 @@
//! Library entry-point re-exporting the CLI so the binary can stay tiny.
//! Manage background `codex-exec` agents.
//!
//! This library is thin: it only re-exports the clap CLI and helpers so
//! the binary can stay small and unit tests can call into pure Rust APIs.
pub mod build;
pub mod cli;
pub mod meta;
mod sig;
mod spawn;
pub mod store;
pub use cli::Cli;
/// Entry used by the bin crate.
pub async fn run_main(cli: Cli) -> anyhow::Result<()> {
cli.dispatch().await
}

View File

@@ -0,0 +1,11 @@
use clap::Parser;
use codex_session::run_main;
use codex_session::Cli;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
run_main(cli).await?;
Ok(())
}

View File

@@ -0,0 +1,72 @@
//! Lightweight on-disk session metadata.
//!
//! The metadata is persisted as `meta.json` inside each session directory so
//! users -- or other tooling -- can inspect **how** a session was started even
//! months later. Instead of serialising the full, typed CLI structs (which
//! would force every agent crate to depend on `serde`) we only keep the raw
//! argument vector that was passed to the spawned process. This keeps the
//! public API surface minimal while still giving us reproducibility -- a
//! session can always be re-spawned with `codex <args...>`.
use chrono::DateTime;
use chrono::Utc;
use serde::Deserialize;
use serde::Serialize;
use crate::store::SessionKind;
/// JSON envelope version. Bump when the structure changes in a
/// backwards-incompatible way.
pub const CURRENT_VERSION: u8 = 1;
/// Persisted session metadata.
#[derive(Debug, Serialize, Deserialize)]
pub struct SessionMeta {
/// Unique identifier (also doubles as directory name).
pub id: String,
/// Leader process id (PID).
pub pid: u32,
/// Whether the session is an `exec` or `repl` one.
pub kind: SessionKind,
/// Raw command-line arguments that were used to spawn the agent
/// (`codex-exec ...` or `codex-repl ...`).
pub argv: Vec<String>,
/// Short preview of the user prompt (if any).
#[serde(skip_serializing_if = "Option::is_none")]
pub prompt_preview: Option<String>,
/// Wall-clock timestamp when the session was created.
pub created_at: DateTime<Utc>,
/// Git commit hash of the build that produced this file.
pub codex_commit: String,
/// Schema version (see [`CURRENT_VERSION`]).
pub version: u8,
}
impl SessionMeta {
#[allow(clippy::too_many_arguments)]
pub fn new(
id: String,
pid: u32,
kind: SessionKind,
argv: Vec<String>,
prompt_preview: Option<String>,
) -> Self {
Self {
id,
pid,
kind,
argv,
prompt_preview,
created_at: Utc::now(),
codex_commit: crate::build::git_sha().to_owned(),
version: CURRENT_VERSION,
}
}
}

View File

@@ -0,0 +1,25 @@
//! Small safe wrappers around a handful of `nix::sys::signal` calls that are
//! considered `unsafe` by the `nix` crate. By concentrating the `unsafe` blocks
//! in a single, well-audited module we can keep the rest of the codebase — and
//! in particular `spawn.rs` — entirely `unsafe`-free.
#[cfg(unix)]
use nix::sys::signal::signal as nix_signal;
#[cfg(unix)]
use nix::sys::signal::SigHandler;
#[cfg(unix)]
use nix::sys::signal::Signal;
/// Safely ignore `SIGHUP` for the current process.
///
/// Internally this delegates to `nix::sys::signal::signal(…, SigIgn)` which is
/// marked *unsafe* because changing signal handlers can break invariants in
/// foreign code. In our very controlled environment we *only* ever install the
/// predefined, always-safe `SIG_IGN` handler, which is guaranteed not to cause
/// undefined behaviour. Therefore it is sound to wrap the call in `unsafe` and
/// expose it as a safe function.
#[cfg(unix)]
pub fn ignore_sighup() -> nix::Result<()> {
// SAFETY: Installing the built-in `SIG_IGN` handler is always safe.
unsafe { nix_signal(Signal::SIGHUP, SigHandler::SigIgn) }.map(|_| ())
}

View File

@@ -0,0 +1,118 @@
//! Spawn detached Codex agent processes for exec and repl sessions.
use crate::store::Paths;
use anyhow::Context;
use anyhow::Result;
use std::fs::OpenOptions;
use tokio::process::Child;
use tokio::process::Command;
#[cfg(unix)]
use command_group::AsyncCommandGroup;
#[cfg(unix)]
use nix::errno::Errno;
#[cfg(unix)]
use nix::sys::stat::Mode;
#[cfg(unix)]
use nix::unistd::mkfifo;
/// Open (and create if necessary) the log files that stdout / stderr of the
/// spawned agent will be redirected to.
fn open_log_files(paths: &Paths) -> Result<(std::fs::File, std::fs::File)> {
let stdout = OpenOptions::new()
.create(true)
.append(true)
.open(&paths.stdout)?;
let stderr = OpenOptions::new()
.create(true)
.append(true)
.open(&paths.stderr)?;
Ok((stdout, stderr))
}
/// Configure a `tokio::process::Command` with the common options that are the
/// same for both `codex-exec` and `codex-repl` sessions.
fn base_command(bin: &str, paths: &Paths) -> Result<Command> {
let (stdout, stderr) = open_log_files(paths)?;
let mut cmd = Command::new(bin);
cmd.stdin(std::process::Stdio::null())
.stdout(stdout)
.stderr(stderr);
Ok(cmd)
}
#[allow(dead_code)]
pub fn spawn_exec(paths: &Paths, exec_args: &[String]) -> Result<Child> {
#[cfg(unix)]
{
// Build the base command and add the user-supplied arguments.
let mut cmd = base_command("codex-exec", paths)?;
cmd.args(exec_args);
// exec is non-interactive, use /dev/null for stdin.
let stdin = OpenOptions::new().read(true).open("/dev/null")?;
cmd.stdin(stdin);
// Spawn the child as a process group / new session leader.
let child = cmd
.group_spawn()
.context("failed to spawn codex-exec")?
.into_inner();
crate::sig::ignore_sighup()?;
Ok(child)
}
#[cfg(windows)]
{
const DETACHED_PROCESS: u32 = 0x00000008;
const CREATE_NEW_PROCESS_GROUP: u32 = 0x00000200;
let mut cmd = base_command("codex-exec", paths)?;
cmd.args(exec_args)
.creation_flags(DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP);
let child = cmd.spawn().context("failed to spawn codex-exec")?;
Ok(child)
}
}
#[cfg(unix)]
pub fn spawn_repl(paths: &Paths, repl_args: &[String]) -> Result<Child> {
// Ensure a FIFO exists at `paths.stdin` with permissions rw-------
if !paths.stdin.exists() {
if let Err(e) = mkfifo(&paths.stdin, Mode::from_bits_truncate(0o600)) {
// If the FIFO already exists we silently accept, just as the
// previous implementation did.
if e != Errno::EEXIST {
return Err(std::io::Error::from(e)).context("mkfifo failed");
}
}
}
// Open the FIFO for *both* reading and writing so we don't deadlock
// when there is no writer yet (mimics the previous behaviour).
let stdin = OpenOptions::new()
.read(true)
.write(true)
.open(&paths.stdin)?;
// Build the command.
let mut cmd = base_command("codex-repl", paths)?;
cmd.args(repl_args).stdin(stdin);
// Detached spawn.
let child = cmd
.group_spawn()
.context("failed to spawn codex-repl")?
.into_inner();
crate::sig::ignore_sighup()?;
Ok(child)
}

View File

@@ -0,0 +1,299 @@
//! Session bookkeeping helpers.
//!
//! A session lives in `~/.codex/sessions/<id>/` and contains:
//! * stdout.log / stderr.log - redirect of agent io
//! * meta.json - small struct saved by `write_meta`.
use anyhow::Context;
use anyhow::Result;
// The rich metadata envelope lives in its own module so other parts of the
// crate can import it without pulling in the whole `store` implementation.
use crate::meta::SessionMeta;
use serde::Deserialize;
use serde::Serialize;
use std::path::PathBuf;
#[derive(Clone, Debug)]
pub struct Paths {
pub dir: PathBuf,
pub stdout: PathBuf,
pub stderr: PathBuf,
/// Named pipe used for interactive stdin when the session runs a `codex-repl` agent.
///
/// The file is **only** created for repl sessions. Exec sessions ignore the path.
pub stdin: PathBuf,
pub meta: PathBuf,
}
/// Calculate canonical paths for the given session ID.
/// Build a [`Paths`] struct for a given session identifier.
///
/// The function validates the input to avoid path-traversal attacks or
/// accidental creation of nested directories. Only the following ASCII
/// characters are accepted:
///
/// * `A-Z`, `a-z`, `0-9`
/// * underscore (`_`)
/// * hyphen (`-`)
///
/// Any other byte -- especially path separators such as `/` or `\\` -- results
/// in an error.
///
/// Keeping the validation local to this helper ensures that *all* call-sites
/// (CLI, library, tests) get the same guarantees.
pub fn paths_for(id: &str) -> Result<Paths> {
validate_id(id)?;
// No IO here. Only build the paths.
let dir = base_dir()?.join(id);
Ok(Paths {
dir: dir.clone(),
stdout: dir.join("stdout.log"),
stderr: dir.join("stderr.log"),
stdin: dir.join("stdin.pipe"),
meta: dir.join("meta.json"),
})
}
/// Internal helper: ensure the supplied session id is well-formed.
fn validate_id(id: &str) -> Result<()> {
if id.is_empty() {
anyhow::bail!("session id must not be empty");
}
for b in id.bytes() {
match b {
b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'_' | b'-' => {}
_ => anyhow::bail!("invalid character in session id: {:?}", b as char),
}
}
Ok(())
}
fn base_dir() -> Result<PathBuf> {
// ~/.codex/sessions
let home = dirs::home_dir().context("could not resolve home directory")?;
Ok(home.join(".codex").join("sessions"))
}
// Keep the original `SessionKind` enum here so we don't need a breaking change
// in all call-sites. The enum is re-exported so other modules (e.g. the newly
// added `meta` module) can still rely on the single source of truth.
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Default)]
#[serde(rename_all = "lowercase")]
pub enum SessionKind {
/// Non-interactive batch session -- `codex-exec`.
#[default]
Exec,
/// Line-oriented interactive session -- `codex-repl`.
Repl,
}
/// Create the on-disk directory structure and write metadata + empty log files.
/// Create directory & empty log files. Does **not** write metadata; caller should write that
/// once the child process has actually been spawned so we can record its PID.
pub fn prepare_dirs(paths: &Paths) -> Result<()> {
// Called before spawn to make sure log files already exist.
std::fs::create_dir_all(&paths.dir)?;
for p in [&paths.stdout, &paths.stderr] {
std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(p)?;
}
Ok(())
}
pub fn write_meta(paths: &Paths, meta: &SessionMeta) -> Result<()> {
// Persist metadata after successful spawn so we can record PID.
std::fs::write(&paths.meta, serde_json::to_vec_pretty(meta)?)?;
Ok(())
}
/// Enumerate all sessions by loading each `meta.json`.
pub fn list_sessions() -> Result<Vec<SessionMeta>> {
let mut res = Vec::new();
let base = base_dir()?;
if base.exists() {
for entry in std::fs::read_dir(base)? {
let entry = entry?;
let meta_path = entry.path().join("meta.json");
if let Ok(bytes) = std::fs::read(&meta_path) {
if let Ok(meta) = serde_json::from_slice::<SessionMeta>(&bytes) {
res.push(meta);
}
}
}
}
Ok(res)
}
/// List sessions sorted by newest first (created_at desc).
/// Newest-first list (created_at descending).
pub fn list_sessions_sorted() -> Result<Vec<SessionMeta>> {
let mut v = list_sessions()?;
v.sort_by(|a, b| b.created_at.cmp(&a.created_at));
Ok(v)
}
/// Resolve a user-supplied selector to a concrete session id.
///
/// Rules:
/// 1. Pure integer ⇒ index into newest-first list (0 = most recent)
/// 2. Otherwise try exact id match, then unique prefix match.
pub fn resolve_selector(sel: &str) -> Result<String> {
// Accept index, full id, or unique prefix.
let list = list_sessions_sorted()?;
// numeric index
if let Ok(idx) = sel.parse::<usize>() {
return list
.get(idx)
.map(|m| m.id.clone())
.context(format!("no session at index {idx}"));
}
// exact match
if let Some(m) = list.iter().find(|m| m.id == sel) {
return Ok(m.id.clone());
}
// unique prefix match
let mut matches: Vec<&SessionMeta> = list.iter().filter(|m| m.id.starts_with(sel)).collect();
match matches.len() {
1 => Ok(matches.remove(0).id.clone()),
0 => anyhow::bail!("no session matching '{sel}'"),
_ => anyhow::bail!("selector '{sel}' is ambiguous ({} matches)", matches.len()),
}
}
/// Send a polite termination request to the sessions process.
///
/// NOTE: Full PID accounting is a future improvement; for now the function
/// simply returns `Ok(())` so the `delete` command doesnt fail.
/// Attempt to terminate the process (group) that belongs to the given session id.
///
/// Behaviour
/// 1. A *graceful* `SIGTERM` (or `CTRL-BREAK` on Windows) is sent to the **process group**
/// that was created when the agent was spawned (`setsid` / `CREATE_NEW_PROCESS_GROUP`).
/// 2. We wait for a short grace period so the process can exit cleanly.
/// 3. If the process (identified by the original PID) is still alive we force-kill it
/// with `SIGKILL` (or the Win32 `TerminateProcess` API).
/// 4. The function is **idempotent** -- calling it again when the session is already
/// terminated returns an error (`Err(AlreadyDead)`) so callers can decide whether
/// they still need to clean up the directory (`store::purge`).
///
/// NOTE: only a very small amount of asynchronous work is required (the sleeps between
/// TERM → KILL). We keep the function `async` so the public signature stays unchanged.
pub async fn kill_session(id: &str) -> Result<()> {
use std::time::Duration;
// Resolve paths and read metadata so we know the target PID.
let paths = paths_for(id)?;
// Load meta.json -- we need the PID written at spawn time.
let bytes = std::fs::read(&paths.meta)
.with_context(|| format!("could not read metadata for session '{id}'"))?;
let meta: SessionMeta =
serde_json::from_slice(&bytes).context("failed to deserialize session metadata")?;
let pid_u32 = meta.pid;
// Helper -- cross-platform liveness probe based on the `sysinfo` crate.
fn is_alive(pid: u32) -> bool {
use sysinfo::PidExt;
use sysinfo::SystemExt;
let mut sys = sysinfo::System::new();
sys.refresh_process(sysinfo::Pid::from_u32(pid));
sys.process(sysinfo::Pid::from_u32(pid)).is_some()
}
// If the process is already gone we bail out so the caller knows the session
// directory might need manual clean-up.
let mut still_running = is_alive(pid_u32);
if !still_running {
anyhow::bail!(
"session process (PID {pid_u32}) is not running -- directory cleanup still required"
);
}
// Step 1 -- send graceful termination.
#[cfg(unix)]
{
// Negative PID = process-group.
let pgid = -(pid_u32 as i32);
unsafe {
libc::kill(pgid, libc::SIGTERM);
}
}
#[cfg(windows)]
{
use windows_sys::Win32::System::Console::GenerateConsoleCtrlEvent;
const CTRL_BREAK_EVENT: u32 = 1; // Using BREAK instead of C for detached groups.
// The process group id on Windows *is* the pid that we passed to CREATE_NEW_PROCESS_GROUP.
unsafe {
GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, pid_u32);
}
}
// Give the process up to 2 seconds to exit.
let grace_period = Duration::from_secs(2);
let poll_interval = Duration::from_millis(100);
let start = std::time::Instant::now();
while start.elapsed() < grace_period {
if !is_alive(pid_u32) {
still_running = false;
break;
}
tokio::time::sleep(poll_interval).await;
}
// Step 2 -- force kill if necessary.
if still_running {
#[cfg(unix)]
{
let pgid = -(pid_u32 as i32);
unsafe {
libc::kill(pgid, libc::SIGKILL);
}
}
#[cfg(windows)]
{
use windows_sys::Win32::Foundation::CloseHandle;
use windows_sys::Win32::Foundation::HANDLE;
use windows_sys::Win32::System::Threading::OpenProcess;
use windows_sys::Win32::System::Threading::TerminateProcess;
use windows_sys::Win32::System::Threading::PROCESS_TERMINATE;
unsafe {
let handle: HANDLE = OpenProcess(PROCESS_TERMINATE, 0, pid_u32);
if handle != 0 {
TerminateProcess(handle, 1);
CloseHandle(handle);
}
}
}
}
Ok(())
}
/// Remove the session directory and all its contents.
pub fn purge(id: &str) -> Result<()> {
let paths = paths_for(id)?;
if paths.dir.exists() {
std::fs::remove_dir_all(paths.dir)?;
}
Ok(())
}

View File

@@ -37,6 +37,7 @@ impl App<'_> {
initial_prompt: Option<String>,
show_git_warning: bool,
initial_images: Vec<std::path::PathBuf>,
disable_response_storage: bool,
) -> Self {
let (app_event_tx, app_event_rx) = channel();
let scroll_event_helper = ScrollEventHelper::new(app_event_tx.clone());
@@ -80,6 +81,7 @@ impl App<'_> {
app_event_tx.clone(),
initial_prompt.clone(),
initial_images,
disable_response_storage,
);
let app_state = if show_git_warning {

View File

@@ -49,6 +49,7 @@ impl ChatWidget<'_> {
app_event_tx: Sender<AppEvent>,
initial_prompt: Option<String>,
initial_images: Vec<std::path::PathBuf>,
disable_response_storage: bool,
) -> Self {
let (codex_op_tx, mut codex_op_rx) = unbounded_channel::<Op>();
@@ -61,14 +62,15 @@ impl ChatWidget<'_> {
// Create the Codex asynchronously so the UI loads as quickly as possible.
let config_for_agent_loop = config.clone();
tokio::spawn(async move {
let (codex, session_event, _ctrl_c) = match init_codex(config_for_agent_loop).await {
Ok(vals) => vals,
Err(e) => {
// TODO: surface this error to the user.
tracing::error!("failed to initialize codex: {e}");
return;
}
};
let (codex, session_event, _ctrl_c) =
match init_codex(config_for_agent_loop, disable_response_storage).await {
Ok(vals) => vals,
Err(e) => {
// TODO: surface this error to the user.
tracing::error!("failed to initialize codex: {e}");
return;
}
};
// Forward the captured `SessionInitialized` event that was consumed
// inside `init_codex()` so it can be rendered in the UI.

View File

@@ -1,6 +1,6 @@
use clap::Parser;
use codex_core::ApprovalModeCliArg;
use codex_core::SandboxPermissionOption;
use codex_core::SandboxModeCliArg;
use std::path::PathBuf;
#[derive(Parser, Debug)]
@@ -21,12 +21,11 @@ pub struct Cli {
#[arg(long = "ask-for-approval", short = 'a')]
pub approval_policy: Option<ApprovalModeCliArg>,
/// Convenience alias for low-friction sandboxed automatic execution (-a on-failure, network-disabled sandbox that can write to cwd and TMPDIR)
#[arg(long = "full-auto", default_value_t = false)]
pub full_auto: bool,
#[clap(flatten)]
pub sandbox: SandboxPermissionOption,
/// Configure the process restrictions when a command is executed.
///
/// Uses OS-specific sandboxing tools; Seatbelt on OSX, landlock+seccomp on Linux.
#[arg(long = "sandbox", short = 's')]
pub sandbox_policy: Option<SandboxModeCliArg>,
/// Allow running Codex outside a Git repository.
#[arg(long = "skip-git-repo-check", default_value_t = false)]
@@ -35,4 +34,12 @@ pub struct Cli {
/// Disable serverside response storage (sends the full conversation context with every request)
#[arg(long = "disable-response-storage", default_value_t = false)]
pub disable_response_storage: bool,
/// Convenience alias for low-friction sandboxed automatic execution (-a on-failure, -s network-and-file-write-restricted)
#[arg(long = "full-auto", default_value_t = true)]
pub full_auto: bool,
/// Convenience alias for supervised sandboxed execution (-a unless-allow-listed, -s network-and-file-write-restricted)
#[arg(long = "suggest", default_value_t = false)]
pub suggest: bool,
}

View File

@@ -48,11 +48,11 @@ impl ConversationHistoryWidget {
self.scroll_down(1);
true
}
KeyCode::PageUp | KeyCode::Char('b') => {
KeyCode::PageUp | KeyCode::Char('b') | KeyCode::Char('u') | KeyCode::Char('U') => {
self.scroll_page_up();
true
}
KeyCode::PageDown | KeyCode::Char(' ') => {
KeyCode::PageDown | KeyCode::Char(' ') | KeyCode::Char('d') | KeyCode::Char('D') => {
self.scroll_page_down();
true
}
@@ -238,7 +238,7 @@ impl WidgetRef for ConversationHistoryWidget {
fn render_ref(&self, area: Rect, buf: &mut Buffer) {
let (title, border_style) = if self.has_input_focus {
(
"Messages (↑/↓ or j/k = line, b/space = page)",
"Messages (↑/↓ or j/k = line, b/u = PgUp, space/d = PgDn)",
Style::default().fg(Color::LightYellow),
)
} else {

View File

@@ -6,8 +6,6 @@
use app::App;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::SandboxPolicy;
use codex_core::util::is_inside_git_repo;
use log_layer::TuiLogLayer;
use std::fs::OpenOptions;
@@ -35,27 +33,12 @@ pub use cli::Cli;
pub fn run_main(cli: Cli) -> std::io::Result<()> {
assert_env_var_set();
let (sandbox_policy, approval_policy) = if cli.full_auto {
(
Some(SandboxPolicy::new_full_auto_policy()),
Some(AskForApproval::OnFailure),
)
} else {
let sandbox_policy = cli.sandbox.permissions.clone().map(Into::into);
(sandbox_policy, cli.approval_policy.map(Into::into))
};
let config = {
// Load configuration and support CLI overrides.
let overrides = ConfigOverrides {
model: cli.model.clone(),
approval_policy,
sandbox_policy,
disable_response_storage: if cli.disable_response_storage {
Some(true)
} else {
None
},
approval_policy: cli.approval_policy.map(Into::into),
sandbox_policy: cli.sandbox_policy.map(Into::into),
};
#[allow(clippy::print_stderr)]
match Config::load_with_overrides(overrides) {
@@ -151,8 +134,19 @@ fn run_ratatui_app(
let mut terminal = tui::init()?;
terminal.clear()?;
let Cli { prompt, images, .. } = cli;
let mut app = App::new(config.clone(), prompt, show_git_warning, images);
let Cli {
prompt,
images,
disable_response_storage,
..
} = cli;
let mut app = App::new(
config,
prompt,
show_git_warning,
images,
disable_response_storage,
);
// Bridge log receiver into the AppEvent channel so latest log lines update the UI.
{

View File

@@ -1,152 +0,0 @@
#!/usr/bin/env python3
"""
Automate the release procedure documented in `../README.md → Releasing codex`.
Run this script from the repository *root*:
```bash
python release_codex.py
```
It performs the same steps that the README lists manually:
1. Create and switch to a `bump-version-<timestamp>` branch.
2. Bump the timestamp-based version in `codex-cli/package.json` **and**
`codex-cli/src/utils/session.ts`.
3. Commit with a DCO sign-off.
4. Copy the top-level `README.md` into `codex-cli/` (npm consumers see it).
5. Run `pnpm release` (copies README again, builds, publishes to npm).
6. Push the branch so you can open a PR that merges the version bump.
The current directory can live anywhere; all paths are resolved relative to
this file so moving it elsewhere (e.g. into `scripts/`) still works.
"""
from __future__ import annotations
import datetime as _dt
import json as _json
import os
import re
import shutil
import subprocess as _sp
import sys
from pathlib import Path
# ---------------------------------------------------------------------------
# Paths
# ---------------------------------------------------------------------------
# repo-root/
# ├── codex-cli/
# ├── scripts/ <-- you are here
# └── README.md
REPO_ROOT = Path(__file__).resolve().parent.parent
CODEX_CLI = REPO_ROOT / "codex-cli"
PKG_JSON = CODEX_CLI / "package.json"
SESSION_TS = CODEX_CLI / "src" / "utils" / "session.ts"
README_SRC = REPO_ROOT / "README.md"
README_DST = CODEX_CLI / "README.md"
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def sh(cmd: list[str] | str, *, cwd: Path | None = None) -> None:
"""Run *cmd* printing it first and exit on non-zero status."""
if isinstance(cmd, list):
printable = " ".join(cmd)
else:
printable = cmd
print("+", printable)
_sp.run(cmd, cwd=cwd, shell=isinstance(cmd, str), check=True)
def _new_version() -> str:
"""Return a new timestamp version string such as `0.1.2504301234`."""
return "0.1." + _dt.datetime.utcnow().strftime("%y%m%d%H%M")
def bump_version() -> str:
"""Update package.json and session.ts, returning the new version."""
new_ver = _new_version()
# ---- package.json
data = _json.loads(PKG_JSON.read_text())
old_ver = data.get("version", "<unknown>")
data["version"] = new_ver
PKG_JSON.write_text(_json.dumps(data, indent=2) + "\n")
# ---- session.ts
pattern = r'CLI_VERSION = "0\\.1\\.\\d{10}"'
repl = f'CLI_VERSION = "{new_ver}"'
_text = SESSION_TS.read_text()
if re.search(pattern, _text):
SESSION_TS.write_text(re.sub(pattern, repl, _text))
else:
print(
"WARNING: CLI_VERSION constant not found file format may have changed",
file=sys.stderr,
)
print(f"Version bump: {old_ver}{new_ver}")
return new_ver
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def main() -> None: # noqa: C901 readable top-level flow is desired
# Ensure we can locate required files.
for p in (CODEX_CLI, PKG_JSON, SESSION_TS, README_SRC):
if not p.exists():
sys.exit(f"Required path missing: {p.relative_to(REPO_ROOT)}")
os.chdir(REPO_ROOT)
# ------------------------------- create release branch
branch = "bump-version-" + _dt.datetime.utcnow().strftime("%Y%m%d-%H%M")
sh(["git", "checkout", "-b", branch])
# ------------------------------- bump version + commit
new_ver = bump_version()
sh(
[
"git",
"add",
str(PKG_JSON.relative_to(REPO_ROOT)),
str(SESSION_TS.relative_to(REPO_ROOT)),
]
)
sh(["git", "commit", "-s", "-m", f"chore(release): codex-cli v{new_ver}"])
# ------------------------------- copy README (shown on npmjs.com)
shutil.copyfile(README_SRC, README_DST)
# ------------------------------- build + publish via pnpm script
sh(["pnpm", "install"], cwd=CODEX_CLI)
sh(["pnpm", "release"], cwd=CODEX_CLI)
# ------------------------------- push branch
sh(["git", "push", "-u", "origin", branch])
print("\n✅ Release script finished!")
print(f" • npm publish run by pnpm script (branch: {branch})")
print(" • Open a PR to merge the version bump once CI passes.")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.exit("\nCancelled by user")