Compare commits

..

1 Commits

Author SHA1 Message Date
aibrahim-oai
39401b49cf Add /compact command to Rust CLI 2025-07-09 13:37:15 -07:00
61 changed files with 785 additions and 1357 deletions

View File

@@ -10,7 +10,7 @@
"devDependencies": {
"@types/bun": "^1.2.11",
"@types/node": "^22.15.21",
"prettier": "^3.6.2",
"prettier": "^3.5.3",
"typescript": "^5.8.3",
},
},
@@ -60,7 +60,7 @@
"once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="],
"prettier": ["prettier@3.6.2", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ=="],
"prettier": ["prettier@3.5.3", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw=="],
"tunnel": ["tunnel@0.0.6", "", {}, "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg=="],

View File

@@ -15,7 +15,7 @@
"devDependencies": {
"@types/bun": "^1.2.11",
"@types/node": "^22.15.21",
"prettier": "^3.6.2",
"prettier": "^3.5.3",
"typescript": "^5.8.3"
}
}

View File

@@ -1,26 +0,0 @@
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/dependabot-options-reference#package-ecosystem-
version: 2
updates:
- package-ecosystem: bun
directory: .github/actions/codex
schedule:
interval: weekly
- package-ecosystem: cargo
directories:
- codex-rs
- codex-rs/*
schedule:
interval: weekly
- package-ecosystem: devcontainers
directory: /
schedule:
interval: weekly
- package-ecosystem: docker
directory: codex-cli
schedule:
interval: weekly
- package-ecosystem: github-actions
directory: /
schedule:
interval: weekly

View File

@@ -70,7 +70,7 @@ jobs:
- name: Install dependencies
run: pnpm install
- uses: dtolnay/rust-toolchain@1.88
- uses: dtolnay/rust-toolchain@1.87
with:
targets: x86_64-unknown-linux-gnu
components: clippy

View File

@@ -26,7 +26,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.88
- uses: dtolnay/rust-toolchain@1.87
with:
components: rustfmt
- name: cargo fmt
@@ -64,7 +64,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.88
- uses: dtolnay/rust-toolchain@1.87
with:
targets: ${{ matrix.target }}
components: clippy

View File

@@ -73,7 +73,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.88
- uses: dtolnay/rust-toolchain@1.87
with:
targets: ${{ matrix.target }}

View File

@@ -1,4 +1,4 @@
FROM node:24-slim
FROM node:20-slim
ARG TZ
ENV TZ="$TZ"

View File

@@ -41,7 +41,6 @@ if (wantsNative) {
let targetTriple = null;
switch (platform) {
case "linux":
case "android":
switch (arch) {
case "x64":
targetTriple = "x86_64-unknown-linux-musl";

1168
codex-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -12,10 +12,11 @@ workspace = true
[dependencies]
anyhow = "1"
serde_json = "1.0.110"
similar = "2.7.0"
thiserror = "2.0.12"
tree-sitter = "0.25.3"
tree-sitter-bash = "0.25.0"
tree-sitter-bash = "0.23.3"
[dev-dependencies]
pretty_assertions = "1.4.1"

View File

@@ -633,7 +633,7 @@ mod tests {
/// Helper to construct a patch with the given body.
fn wrap_patch(body: &str) -> String {
format!("*** Begin Patch\n{body}\n*** End Patch")
format!("*** Begin Patch\n{}\n*** End Patch", body)
}
fn strs_to_strings(strs: &[&str]) -> Vec<String> {
@@ -661,7 +661,7 @@ mod tests {
}]
);
}
result => panic!("expected MaybeApplyPatch::Body got {result:?}"),
result => panic!("expected MaybeApplyPatch::Body got {:?}", result),
}
}
@@ -688,7 +688,7 @@ PATCH"#,
}]
);
}
result => panic!("expected MaybeApplyPatch::Body got {result:?}"),
result => panic!("expected MaybeApplyPatch::Body got {:?}", result),
}
}

View File

@@ -1,21 +0,0 @@
[package]
name = "codex-chatgpt"
version = { workspace = true }
edition = "2024"
[lints]
workspace = true
[dependencies]
anyhow = "1"
clap = { version = "4", features = ["derive"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
codex-common = { path = "../common", features = ["cli"] }
codex-core = { path = "../core" }
codex-login = { path = "../login" }
reqwest = { version = "0.12", features = ["json", "stream"] }
tokio = { version = "1", features = ["full"] }
[dev-dependencies]
tempfile = "3"

View File

@@ -1,5 +0,0 @@
# ChatGPT
This crate pertains to first party ChatGPT APIs and products such as Codex agent.
This crate should be primarily built and maintained by OpenAI employees. Please reach out to a maintainer before making an external contribution.

View File

@@ -1,89 +0,0 @@
use clap::Parser;
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use crate::chatgpt_token::init_chatgpt_token_from_auth;
use crate::get_task::GetTaskResponse;
use crate::get_task::OutputItem;
use crate::get_task::PrOutputItem;
use crate::get_task::get_task;
/// Applies the latest diff from a Codex agent task.
#[derive(Debug, Parser)]
pub struct ApplyCommand {
pub task_id: String,
#[clap(flatten)]
pub config_overrides: CliConfigOverrides,
}
pub async fn run_apply_command(apply_cli: ApplyCommand) -> anyhow::Result<()> {
let config = Config::load_with_cli_overrides(
apply_cli
.config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?,
ConfigOverrides::default(),
)?;
init_chatgpt_token_from_auth(&config.codex_home).await?;
let task_response = get_task(&config, apply_cli.task_id).await?;
apply_diff_from_task(task_response).await
}
pub async fn apply_diff_from_task(task_response: GetTaskResponse) -> anyhow::Result<()> {
let diff_turn = match task_response.current_diff_task_turn {
Some(turn) => turn,
None => anyhow::bail!("No diff turn found"),
};
let output_diff = diff_turn.output_items.iter().find_map(|item| match item {
OutputItem::Pr(PrOutputItem { output_diff }) => Some(output_diff),
_ => None,
});
match output_diff {
Some(output_diff) => apply_diff(&output_diff.diff).await,
None => anyhow::bail!("No PR output item found"),
}
}
async fn apply_diff(diff: &str) -> anyhow::Result<()> {
let toplevel_output = tokio::process::Command::new("git")
.args(vec!["rev-parse", "--show-toplevel"])
.output()
.await?;
if !toplevel_output.status.success() {
anyhow::bail!("apply must be run from a git repository.");
}
let repo_root = String::from_utf8(toplevel_output.stdout)?
.trim()
.to_string();
let mut git_apply_cmd = tokio::process::Command::new("git")
.args(vec!["apply", "--3way"])
.current_dir(&repo_root)
.stdin(std::process::Stdio::piped())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()?;
if let Some(mut stdin) = git_apply_cmd.stdin.take() {
tokio::io::AsyncWriteExt::write_all(&mut stdin, diff.as_bytes()).await?;
drop(stdin);
}
let output = git_apply_cmd.wait_with_output().await?;
if !output.status.success() {
anyhow::bail!(
"Git apply failed with status {}: {}",
output.status,
String::from_utf8_lossy(&output.stderr)
);
}
println!("Successfully applied diff");
Ok(())
}

View File

@@ -1,45 +0,0 @@
use codex_core::config::Config;
use crate::chatgpt_token::get_chatgpt_token_data;
use crate::chatgpt_token::init_chatgpt_token_from_auth;
use anyhow::Context;
use serde::de::DeserializeOwned;
/// Make a GET request to the ChatGPT backend API.
pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>(
config: &Config,
path: String,
) -> anyhow::Result<T> {
let chatgpt_base_url = &config.chatgpt_base_url;
init_chatgpt_token_from_auth(&config.codex_home).await?;
// Make direct HTTP request to ChatGPT backend API with the token
let client = reqwest::Client::new();
let url = format!("{chatgpt_base_url}{path}");
let token =
get_chatgpt_token_data().ok_or_else(|| anyhow::anyhow!("ChatGPT token not available"))?;
let response = client
.get(&url)
.bearer_auth(&token.access_token)
.header("chatgpt-account-id", &token.account_id)
.header("Content-Type", "application/json")
.header("User-Agent", "codex-cli")
.send()
.await
.context("Failed to send request")?;
if response.status().is_success() {
let result: T = response
.json()
.await
.context("Failed to parse JSON response")?;
Ok(result)
} else {
let status = response.status();
let body = response.text().await.unwrap_or_default();
anyhow::bail!("Request failed with status {}: {}", status, body)
}
}

View File

@@ -1,24 +0,0 @@
use std::path::Path;
use std::sync::LazyLock;
use std::sync::RwLock;
use codex_login::TokenData;
static CHATGPT_TOKEN: LazyLock<RwLock<Option<TokenData>>> = LazyLock::new(|| RwLock::new(None));
pub fn get_chatgpt_token_data() -> Option<TokenData> {
CHATGPT_TOKEN.read().ok()?.clone()
}
pub fn set_chatgpt_token_data(value: TokenData) {
if let Ok(mut guard) = CHATGPT_TOKEN.write() {
*guard = Some(value);
}
}
/// Initialize the ChatGPT token from auth.json file
pub async fn init_chatgpt_token_from_auth(codex_home: &Path) -> std::io::Result<()> {
let auth_json = codex_login::try_read_auth_json(codex_home).await?;
set_chatgpt_token_data(auth_json.tokens.clone());
Ok(())
}

View File

@@ -1,40 +0,0 @@
use codex_core::config::Config;
use serde::Deserialize;
use crate::chatgpt_client::chatgpt_get_request;
#[derive(Debug, Deserialize)]
pub struct GetTaskResponse {
pub current_diff_task_turn: Option<AssistantTurn>,
}
// Only relevant fields for our extraction
#[derive(Debug, Deserialize)]
pub struct AssistantTurn {
pub output_items: Vec<OutputItem>,
}
#[derive(Debug, Deserialize)]
#[serde(tag = "type")]
pub enum OutputItem {
#[serde(rename = "pr")]
Pr(PrOutputItem),
#[serde(other)]
Other,
}
#[derive(Debug, Deserialize)]
pub struct PrOutputItem {
pub output_diff: OutputDiff,
}
#[derive(Debug, Deserialize)]
pub struct OutputDiff {
pub diff: String,
}
pub(crate) async fn get_task(config: &Config, task_id: String) -> anyhow::Result<GetTaskResponse> {
let path = format!("/wham/tasks/{task_id}");
chatgpt_get_request(config, path).await
}

View File

@@ -1,4 +0,0 @@
pub mod apply_command;
mod chatgpt_client;
mod chatgpt_token;
pub mod get_task;

View File

@@ -1,191 +0,0 @@
#![expect(clippy::expect_used)]
use codex_chatgpt::apply_command::apply_diff_from_task;
use codex_chatgpt::get_task::GetTaskResponse;
use std::path::Path;
use tempfile::TempDir;
use tokio::process::Command;
/// Creates a temporary git repository with initial commit
async fn create_temp_git_repo() -> anyhow::Result<TempDir> {
let temp_dir = TempDir::new()?;
let repo_path = temp_dir.path();
let output = Command::new("git")
.args(["init"])
.current_dir(repo_path)
.output()
.await?;
if !output.status.success() {
anyhow::bail!(
"Failed to initialize git repo: {}",
String::from_utf8_lossy(&output.stderr)
);
}
Command::new("git")
.args(["config", "user.email", "test@example.com"])
.current_dir(repo_path)
.output()
.await?;
Command::new("git")
.args(["config", "user.name", "Test User"])
.current_dir(repo_path)
.output()
.await?;
std::fs::write(repo_path.join("README.md"), "# Test Repo\n")?;
Command::new("git")
.args(["add", "README.md"])
.current_dir(repo_path)
.output()
.await?;
let output = Command::new("git")
.args(["commit", "-m", "Initial commit"])
.current_dir(repo_path)
.output()
.await?;
if !output.status.success() {
anyhow::bail!(
"Failed to create initial commit: {}",
String::from_utf8_lossy(&output.stderr)
);
}
Ok(temp_dir)
}
async fn mock_get_task_with_fixture() -> anyhow::Result<GetTaskResponse> {
let fixture_path = Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/task_turn_fixture.json");
let fixture_content = std::fs::read_to_string(fixture_path)?;
let response: GetTaskResponse = serde_json::from_str(&fixture_content)?;
Ok(response)
}
#[tokio::test]
async fn test_apply_command_creates_fibonacci_file() {
let temp_repo = create_temp_git_repo()
.await
.expect("Failed to create temp git repo");
let repo_path = temp_repo.path();
let task_response = mock_get_task_with_fixture()
.await
.expect("Failed to load fixture");
let original_dir = std::env::current_dir().expect("Failed to get current dir");
std::env::set_current_dir(repo_path).expect("Failed to change directory");
struct DirGuard(std::path::PathBuf);
impl Drop for DirGuard {
fn drop(&mut self) {
let _ = std::env::set_current_dir(&self.0);
}
}
let _guard = DirGuard(original_dir);
apply_diff_from_task(task_response)
.await
.expect("Failed to apply diff from task");
// Assert that fibonacci.js was created in scripts/ directory
let fibonacci_path = repo_path.join("scripts/fibonacci.js");
assert!(fibonacci_path.exists(), "fibonacci.js was not created");
// Verify the file contents match expected
let contents = std::fs::read_to_string(&fibonacci_path).expect("Failed to read fibonacci.js");
assert!(
contents.contains("function fibonacci(n)"),
"fibonacci.js doesn't contain expected function"
);
assert!(
contents.contains("#!/usr/bin/env node"),
"fibonacci.js doesn't have shebang"
);
assert!(
contents.contains("module.exports = fibonacci;"),
"fibonacci.js doesn't export function"
);
// Verify file has correct number of lines (31 as specified in fixture)
let line_count = contents.lines().count();
assert_eq!(
line_count, 31,
"fibonacci.js should have 31 lines, got {line_count}",
);
}
#[tokio::test]
async fn test_apply_command_with_merge_conflicts() {
let temp_repo = create_temp_git_repo()
.await
.expect("Failed to create temp git repo");
let repo_path = temp_repo.path();
// Create conflicting fibonacci.js file first
let scripts_dir = repo_path.join("scripts");
std::fs::create_dir_all(&scripts_dir).expect("Failed to create scripts directory");
let conflicting_content = r#"#!/usr/bin/env node
// This is a different fibonacci implementation
function fib(num) {
if (num <= 1) return num;
return fib(num - 1) + fib(num - 2);
}
console.log("Running fibonacci...");
console.log(fib(10));
"#;
let fibonacci_path = scripts_dir.join("fibonacci.js");
std::fs::write(&fibonacci_path, conflicting_content).expect("Failed to write conflicting file");
Command::new("git")
.args(["add", "scripts/fibonacci.js"])
.current_dir(repo_path)
.output()
.await
.expect("Failed to add fibonacci.js");
Command::new("git")
.args(["commit", "-m", "Add conflicting fibonacci implementation"])
.current_dir(repo_path)
.output()
.await
.expect("Failed to commit conflicting file");
let original_dir = std::env::current_dir().expect("Failed to get current dir");
std::env::set_current_dir(repo_path).expect("Failed to change directory");
struct DirGuard(std::path::PathBuf);
impl Drop for DirGuard {
fn drop(&mut self) {
let _ = std::env::set_current_dir(&self.0);
}
}
let _guard = DirGuard(original_dir);
let task_response = mock_get_task_with_fixture()
.await
.expect("Failed to load fixture");
let apply_result = apply_diff_from_task(task_response).await;
assert!(
apply_result.is_err(),
"Expected apply to fail due to merge conflicts"
);
let contents = std::fs::read_to_string(&fibonacci_path).expect("Failed to read fibonacci.js");
assert!(
contents.contains("<<<<<<< HEAD")
|| contents.contains("=======")
|| contents.contains(">>>>>>> "),
"fibonacci.js should contain merge conflict markers, got: {contents}",
);
}

View File

@@ -1,65 +0,0 @@
{
"current_diff_task_turn": {
"output_items": [
{
"type": "pr",
"pr_title": "Add fibonacci script",
"pr_message": "## Summary\n- add a basic Fibonacci script under `scripts/`\n\n## Testing\n- `node scripts/fibonacci.js 10`\n- `npm run lint` *(fails: next not found)*",
"output_diff": {
"type": "output_diff",
"repo_id": "/workspace/rddit-vercel",
"base_commit_sha": "1a2e9baf2ce2fdd0c126b47b1bcfd512de2a9f7b",
"diff": "diff --git a/scripts/fibonacci.js b/scripts/fibonacci.js\nnew file mode 100644\nindex 0000000000000000000000000000000000000000..6c9fdfdbf8669b7968936411050525b995d0a9a6\n--- /dev/null\n+++ b/scripts/fibonacci.js\n@@ -0,0 +1,31 @@\n+#!/usr/bin/env node\n+\n+function fibonacci(n) {\n+ if (n < 0) {\n+ throw new Error(\"n must be non-negative\");\n+ }\n+ let a = 0;\n+ let b = 1;\n+ for (let i = 0; i < n; i++) {\n+ const next = a + b;\n+ a = b;\n+ b = next;\n+ }\n+ return a;\n+}\n+\n+function printUsage() {\n+ console.log(\"Usage: node scripts/fibonacci.js <n>\");\n+}\n+\n+if (require.main === module) {\n+ const arg = process.argv[2];\n+ if (arg === undefined || isNaN(Number(arg))) {\n+ printUsage();\n+ process.exit(1);\n+ }\n+ const n = Number(arg);\n+ console.log(fibonacci(n));\n+}\n+\n+module.exports = fibonacci;\n",
"external_storage_diff": {
"file_id": "file_00000000114c61f786900f8c2130ace7",
"ttl": null
},
"files_modified": 1,
"lines_added": 31,
"lines_removed": 0,
"commit_message": "Add fibonacci script"
}
},
{
"type": "message",
"role": "assistant",
"content": [
{
"content_type": "text",
"text": "**Summary**\n\n- Created a command-line Fibonacci script that validates input and prints the result when executed with Node"
},
{
"content_type": "repo_file_citation",
"path": "scripts/fibonacci.js",
"line_range_start": 1,
"line_range_end": 31
},
{
"content_type": "text",
"text": "\n\n**Testing**\n\n- ❌ `npm run lint` (failed to run `next lint`)"
},
{
"content_type": "terminal_chunk_citation",
"terminal_chunk_id": "7dd543",
"line_range_start": 1,
"line_range_end": 5
},
{
"content_type": "text",
"text": "\n- ✅ `node scripts/fibonacci.js 10` produced “55”"
},
{
"content_type": "terminal_chunk_citation",
"terminal_chunk_id": "6ee559",
"line_range_start": 1,
"line_range_end": 3
},
{
"content_type": "text",
"text": "\n\nCodex couldn't run certain commands due to environment limitations. Consider configuring a setup script or internet access in your Codex environment to install dependencies."
}
]
}
]
}
}

View File

@@ -18,7 +18,6 @@ workspace = true
anyhow = "1"
clap = { version = "4", features = ["derive"] }
clap_complete = "4"
codex-chatgpt = { path = "../chatgpt" }
codex-core = { path = "../core" }
codex-common = { path = "../common", features = ["cli"] }
codex-exec = { path = "../exec" }

View File

@@ -2,8 +2,6 @@ use clap::CommandFactory;
use clap::Parser;
use clap_complete::Shell;
use clap_complete::generate;
use codex_chatgpt::apply_command::ApplyCommand;
use codex_chatgpt::apply_command::run_apply_command;
use codex_cli::LandlockCommand;
use codex_cli::SeatbeltCommand;
use codex_cli::login::run_login_with_chatgpt;
@@ -57,10 +55,6 @@ enum Subcommand {
/// Internal debugging commands.
Debug(DebugArgs),
/// Apply the latest diff produced by Codex agent as a `git apply` to your local working tree.
#[clap(visible_alias = "a")]
Apply(ApplyCommand),
}
#[derive(Debug, Parser)]
@@ -143,10 +137,6 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
.await?;
}
},
Some(Subcommand::Apply(mut apply_cli)) => {
prepend_config_flags(&mut apply_cli.config_overrides, cli.config_overrides);
run_apply_command(apply_cli).await?;
}
}
Ok(())
@@ -165,6 +155,6 @@ fn prepend_config_flags(
fn print_completion(cmd: CompletionCommand) {
let mut app = MultitoolCli::command();
let name = "codex";
let name = app.get_name().to_string();
generate(cmd.shell, &mut app, name, &mut std::io::stdout());
}

View File

@@ -9,7 +9,7 @@ workspace = true
[dependencies]
clap = { version = "4", features = ["derive", "wrap_help"], optional = true }
codex-core = { path = "../core" }
toml = { version = "0.9", optional = true }
toml = { version = "0.8", optional = true }
serde = { version = "1", optional = true }
[features]

View File

@@ -20,7 +20,7 @@ pub fn format_duration(duration: Duration) -> String {
fn format_elapsed_millis(millis: i64) -> String {
if millis < 1000 {
format!("{millis}ms")
format!("{}ms", millis)
} else if millis < 60_000 {
format!("{:.2}s", millis as f64 / 1000.0)
} else {

View File

@@ -206,14 +206,6 @@ To disable reasoning summaries, set `model_reasoning_summary` to `"none"` in you
model_reasoning_summary = "none" # disable reasoning summaries
```
## model_supports_reasoning_summaries
By default, `reasoning` is only set on requests to OpenAI models that are known to support them. To force `reasoning` to set on requests to the current model, you can force this behavior by setting the following in `config.toml`:
```toml
model_supports_reasoning_summaries = true
```
## sandbox_mode
Codex executes model-generated shell commands inside an OS-level sandbox.

View File

@@ -13,21 +13,26 @@ workspace = true
[dependencies]
anyhow = "1"
async-channel = "2.3.1"
base64 = "0.22"
base64 = "0.21"
bytes = "1.10.1"
codex-apply-patch = { path = "../apply-patch" }
codex-login = { path = "../login" }
codex-mcp-client = { path = "../mcp-client" }
dirs = "6"
env-flags = "0.1.1"
eventsource-stream = "0.2.3"
fs2 = "0.4.3"
fs-err = "3.1.0"
futures = "0.3"
mcp-types = { path = "../mcp-types" }
mime_guess = "2.0"
patch = "0.7"
path-absolutize = "3.1.1"
rand = "0.9"
reqwest = { version = "0.12", features = ["json", "stream"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
strum = "0.27.1"
strum_macros = "0.27.1"
thiserror = "2.0.12"
time = { version = "0.3", features = ["formatting", "local-offset", "macros"] }
@@ -39,10 +44,10 @@ tokio = { version = "1", features = [
"signal",
] }
tokio-util = "0.7.14"
toml = "0.9.1"
toml = "0.8.20"
tracing = { version = "0.1.41", features = ["log"] }
tree-sitter = "0.25.3"
tree-sitter-bash = "0.25.0"
tree-sitter-bash = "0.23.3"
uuid = { version = "1", features = ["serde", "v4"] }
wildmatch = "2.4.0"

View File

@@ -462,66 +462,3 @@ pub(crate) trait AggregateStreamExt: Stream<Item = Result<ResponseEvent>> + Size
}
impl<T> AggregateStreamExt for T where T: Stream<Item = Result<ResponseEvent>> + Sized {}
#[cfg(test)]
mod tests {
use super::*;
use bytes::Bytes;
use futures::stream;
use tokio::sync::mpsc;
#[tokio::test]
async fn merges_function_call_chunks_and_completes() {
let chunks = vec![
"data: {\"choices\":[{\"delta\":{\"content\":\"Hello \"}}]}\n\n",
"data: {\"choices\":[{\"delta\":{\"content\":\"world\"}}]}\n\n",
"data: {\"choices\":[{\"delta\":{\"tool_calls\":[{\"index\":0,\"id\":\"call1\",\"type\":\"function\",\"function\":{\"name\":\"foo\"}}]}}]}\n\n",
"data: {\"choices\":[{\"delta\":{\"tool_calls\":[{\"index\":0,\"function\":{\"arguments\":\"{\\\"a\\\": \"}}]}}]}\n\n",
"data: {\"choices\":[{\"delta\":{\"tool_calls\":[{\"index\":0,\"function\":{\"arguments\":\"1}\"}}]},\"finish_reason\":\"tool_calls\"}]}\n\n",
];
let byte_stream = stream::iter(chunks.into_iter().map(|s| Ok(Bytes::from(s))));
let (tx, mut rx) = mpsc::channel::<Result<ResponseEvent>>(8);
tokio::spawn(process_chat_sse(byte_stream, tx));
let mut events = Vec::new();
while let Some(ev) = rx.recv().await {
match ev {
Ok(event) => events.push(event),
Err(e) => panic!("stream error: {e}"),
}
}
assert_eq!(events.len(), 4);
let mut text = String::new();
for (i, event) in events.iter().take(2).enumerate() {
match event {
ResponseEvent::OutputItemDone(ResponseItem::Message { role, content })
if role == "assistant" =>
{
if let Some(ContentItem::OutputText { text: t }) = content.first() {
text.push_str(t);
}
}
other => panic!("unexpected event {i}: {other:?}"),
}
}
assert_eq!(text, "Hello world");
match &events[2] {
ResponseEvent::OutputItemDone(ResponseItem::FunctionCall {
name,
arguments,
call_id,
}) => {
assert_eq!(name, "foo");
assert_eq!(call_id, "call1");
assert_eq!(arguments, "{\"a\": 1}");
}
other => panic!("unexpected third event: {other:?}"),
}
assert!(matches!(events[3], ResponseEvent::Completed { .. }));
}
}

View File

@@ -23,7 +23,6 @@ use crate::client_common::ResponseEvent;
use crate::client_common::ResponseStream;
use crate::client_common::ResponsesApiRequest;
use crate::client_common::create_reasoning_param_for_request;
use crate::config::Config;
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::CodexErr;
@@ -37,11 +36,10 @@ use crate::models::ResponseItem;
use crate::openai_tools::create_tools_json_for_responses_api;
use crate::protocol::TokenUsage;
use crate::util::backoff;
use std::sync::Arc;
#[derive(Clone)]
pub struct ModelClient {
config: Arc<Config>,
model: String,
client: reqwest::Client,
provider: ModelProviderInfo,
effort: ReasoningEffortConfig,
@@ -50,13 +48,13 @@ pub struct ModelClient {
impl ModelClient {
pub fn new(
config: Arc<Config>,
model: impl ToString,
provider: ModelProviderInfo,
effort: ReasoningEffortConfig,
summary: ReasoningSummaryConfig,
) -> Self {
Self {
config,
model: model.to_string(),
client: reqwest::Client::new(),
provider,
effort,
@@ -72,13 +70,9 @@ impl ModelClient {
WireApi::Responses => self.stream_responses(prompt).await,
WireApi::Chat => {
// Create the raw streaming connection first.
let response_stream = stream_chat_completions(
prompt,
&self.config.model,
&self.client,
&self.provider,
)
.await?;
let response_stream =
stream_chat_completions(prompt, &self.model, &self.client, &self.provider)
.await?;
// Wrap it with the aggregation adapter so callers see *only*
// the final assistant message per turn (matching the
@@ -112,11 +106,11 @@ impl ModelClient {
return stream_from_fixture(path).await;
}
let full_instructions = prompt.get_full_instructions(&self.config.model);
let tools_json = create_tools_json_for_responses_api(prompt, &self.config.model)?;
let reasoning = create_reasoning_param_for_request(&self.config, self.effort, self.summary);
let full_instructions = prompt.get_full_instructions(&self.model);
let tools_json = create_tools_json_for_responses_api(prompt, &self.model)?;
let reasoning = create_reasoning_param_for_request(&self.model, self.effort, self.summary);
let payload = ResponsesApiRequest {
model: &self.config.model,
model: &self.model,
instructions: &full_instructions,
input: &prompt.input,
tools: &tools_json,

View File

@@ -37,7 +37,7 @@ pub struct Prompt {
}
impl Prompt {
pub(crate) fn get_full_instructions(&self, model: &str) -> Cow<'_, str> {
pub(crate) fn get_full_instructions(&self, model: &str) -> Cow<str> {
let mut sections: Vec<&str> = vec![BASE_INSTRUCTIONS];
if let Some(ref user) = self.user_instructions {
sections.push(user);
@@ -131,16 +131,15 @@ pub(crate) struct ResponsesApiRequest<'a> {
pub(crate) stream: bool,
}
use crate::config::Config;
pub(crate) fn create_reasoning_param_for_request(
config: &Config,
model: &str,
effort: ReasoningEffortConfig,
summary: ReasoningSummaryConfig,
) -> Option<Reasoning> {
if model_supports_reasoning_summaries(config) {
let effort: Option<OpenAiReasoningEffort> = effort.into();
let effort = effort?;
let effort: Option<OpenAiReasoningEffort> = effort.into();
let effort = effort?;
if model_supports_reasoning_summaries(model) {
Some(Reasoning {
effort,
summary: summary.into(),
@@ -150,24 +149,19 @@ pub(crate) fn create_reasoning_param_for_request(
}
}
pub fn model_supports_reasoning_summaries(config: &Config) -> bool {
// Currently, we hardcode this rule to decide whether to enable reasoning.
pub fn model_supports_reasoning_summaries(model: &str) -> bool {
// Currently, we hardcode this rule to decide whether enable reasoning.
// We expect reasoning to apply only to OpenAI models, but we do not want
// users to have to mess with their config to disable reasoning for models
// that do not support it, such as `gpt-4.1`.
//
// Though if a user is using Codex with non-OpenAI models that, say, happen
// to start with "o", then they can set `model_reasoning_effort = "none"` in
// to start with "o", then they can set `model_reasoning_effort = "none` in
// config.toml to disable reasoning.
//
// Converseley, if a user has a non-OpenAI provider that supports reasoning,
// they can set the top-level `model_supports_reasoning_summaries = true`
// config option to enable reasoning.
if config.model_supports_reasoning_summaries {
return true;
}
let model = &config.model;
// Ultimately, this should also be configurable in config.toml, but we
// need to have defaults that "just work." Perhaps we could have a
// "reasoning models pattern" as part of ModelProviderInfo?
model.starts_with("o") || model.starts_with("codex")
}

View File

@@ -586,7 +586,7 @@ async fn submission_loop(
}
let client = ModelClient::new(
config.clone(),
model.clone(),
provider.clone(),
model_reasoning_effort,
model_reasoning_summary,
@@ -1297,7 +1297,7 @@ async fn handle_function_call(
ResponseInputItem::FunctionCallOutput {
call_id,
output: FunctionCallOutputPayload {
content: format!("unsupported call: {name}"),
content: format!("unsupported call: {}", name),
success: None,
},
}
@@ -1489,7 +1489,8 @@ async fn handle_sandbox_error(
call_id,
output: FunctionCallOutputPayload {
content: format!(
"failed in sandbox {sandbox_type:?} with execution error: {error}"
"failed in sandbox {:?} with execution error: {error}",
sandbox_type
),
success: Some(false),
},

View File

@@ -130,13 +130,6 @@ pub struct Config {
/// If not "none", the value to use for `reasoning.summary` when making a
/// request using the Responses API.
pub model_reasoning_summary: ReasoningSummary,
/// When set to `true`, overrides the default heuristic and forces
/// `model_supports_reasoning_summaries()` to return `true`.
pub model_supports_reasoning_summaries: bool,
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
pub chatgpt_base_url: String,
}
impl Config {
@@ -315,12 +308,6 @@ pub struct ConfigToml {
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
/// Override to force-enable reasoning summaries for the configured model.
pub model_supports_reasoning_summaries: Option<bool>,
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
pub chatgpt_base_url: Option<String>,
}
impl ConfigToml {
@@ -485,15 +472,6 @@ impl Config {
.model_reasoning_summary
.or(cfg.model_reasoning_summary)
.unwrap_or_default(),
model_supports_reasoning_summaries: cfg
.model_supports_reasoning_summaries
.unwrap_or(false),
chatgpt_base_url: config_profile
.chatgpt_base_url
.or(cfg.chatgpt_base_url)
.unwrap_or("https://chatgpt.com/backend-api/".to_string()),
};
Ok(config)
}
@@ -798,8 +776,6 @@ disable_response_storage = true
hide_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::High,
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: false,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
},
o3_profile_config
);
@@ -844,8 +820,6 @@ disable_response_storage = true
hide_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: false,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
};
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
@@ -905,8 +879,6 @@ disable_response_storage = true
hide_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: false,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
};
assert_eq!(expected_zdr_profile_config, zdr_profile_config);

View File

@@ -16,5 +16,4 @@ pub struct ConfigProfile {
pub disable_response_storage: Option<bool>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub chatgpt_base_url: Option<String>,
}

View File

@@ -240,7 +240,8 @@ mod tests {
] {
assert!(
!is_safe_to_call_with_exec(&args),
"expected {args:?} to be unsafe"
"expected {:?} to be unsafe",
args
);
}
}

View File

@@ -145,7 +145,7 @@ impl From<Vec<InputItem>> for ResponseInputItem {
.unwrap_or_else(|| "application/octet-stream".to_string());
let encoded = base64::engine::general_purpose::STANDARD.encode(bytes);
Some(ContentItem::InputImage {
image_url: format!("data:{mime};base64,{encoded}"),
image_url: format!("data:{};base64,{}", mime, encoded),
})
}
Err(err) => {

View File

@@ -46,7 +46,8 @@ impl Match for HasPrevId {
fn sse_completed(id: &str) -> String {
format!(
"event: response.completed\n\
data: {{\"type\":\"response.completed\",\"response\":{{\"id\":\"{id}\",\"output\":[]}}}}\n\n\n"
data: {{\"type\":\"response.completed\",\"response\":{{\"id\":\"{}\",\"output\":[]}}}}\n\n\n",
id
)
}

View File

@@ -29,7 +29,8 @@ fn sse_incomplete() -> String {
fn sse_completed(id: &str) -> String {
format!(
"event: response.completed\n\
data: {{\"type\":\"response.completed\",\"response\":{{\"id\":\"{id}\",\"output\":[]}}}}\n\n\n"
data: {{\"type\":\"response.completed\",\"response\":{{\"id\":\"{}\",\"output\":[]}}}}\n\n\n",
id
)
}

View File

@@ -25,6 +25,7 @@ codex-common = { path = "../common", features = [
"sandbox_summary",
] }
codex-linux-sandbox = { path = "../linux-sandbox" }
mcp-types = { path = "../mcp-types" }
owo-colors = "4.2.0"
serde_json = "1"
shlex = "1.3.0"

View File

@@ -139,7 +139,7 @@ impl EventProcessor {
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
];
if config.model_provider.wire_api == WireApi::Responses
&& model_supports_reasoning_summaries(config)
&& model_supports_reasoning_summaries(&config.model)
{
entries.push((
"reasoning effort",
@@ -415,7 +415,7 @@ impl EventProcessor {
{
(
format!(" in {}", format_elapsed(start_time)),
format!("apply_patch(auto_approved={auto_approved})"),
format!("apply_patch(auto_approved={})", auto_approved),
)
} else {
(String::new(), format!("apply_patch('{call_id}')"))

View File

@@ -19,7 +19,7 @@ anyhow = "1"
starlark = "0.13.0"
allocative = "0.3.3"
clap = { version = "4", features = ["derive"] }
derive_more = { version = "2", features = ["display"] }
derive_more = { version = "1", features = ["display"] }
env_logger = "0.11.5"
log = "0.4"
multimap = "0.10.0"
@@ -28,6 +28,4 @@ regex-lite = "0.1"
serde = { version = "1.0.194", features = ["derive"] }
serde_json = "1.0.110"
serde_with = { version = "3", features = ["macros"] }
[dev-dependencies]
tempfile = "3.13.0"

View File

@@ -21,7 +21,7 @@ impl Display for ExecCall {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.program)?;
for arg in &self.args {
write!(f, " {arg}")?;
write!(f, " {}", arg)?;
}
Ok(())
}

View File

@@ -89,7 +89,7 @@ fn main() -> Result<()> {
let (output, exit_code) = check_command(&policy, exec, args.require_safe);
let json = serde_json::to_string(&output)?;
println!("{json}");
println!("{}", json);
std::process::exit(exit_code);
}

View File

@@ -63,7 +63,7 @@ impl Policy {
arg: arg.clone(),
exec_call: exec_call.clone(),
},
reason: format!("arg `{arg}` contains forbidden substring"),
reason: format!("arg `{}` contains forbidden substring", arg),
});
}
}

View File

@@ -101,7 +101,7 @@ impl PolicyBuilder {
}
fn add_program_spec(&self, program_spec: ProgramSpec) {
info!("adding program spec: {program_spec:?}");
info!("adding program spec: {:?}", program_spec);
let name = program_spec.program.clone();
let mut programs = self.programs.borrow_mut();
programs.insert(name.clone(), program_spec);

View File

@@ -156,7 +156,7 @@ pub fn run(
let mut override_builder = OverrideBuilder::new(search_directory);
for exclude in exclude {
// The `!` prefix is used to indicate an exclude pattern.
let exclude_pattern = format!("!{exclude}");
let exclude_pattern = format!("!{}", exclude);
override_builder.add(&exclude_pattern)?;
}
let override_matcher = override_builder.build()?;

View File

@@ -43,12 +43,12 @@ impl Reporter for StdioReporter {
match indices_iter.peek() {
Some(next) if **next == i as u32 => {
// ANSI escape code for bold: \x1b[1m ... \x1b[0m
print!("\x1b[1m{c}\x1b[0m");
print!("\x1b[1m{}\x1b[0m", c);
// advance the iterator since we've consumed this index
indices_iter.next();
}
_ => {
print!("{c}");
print!("{}", c);
}
}
}

View File

@@ -81,7 +81,7 @@ async fn test_root_write() {
let tmpfile = NamedTempFile::new().unwrap();
let tmpfile_path = tmpfile.path().to_string_lossy();
run_cmd(
&["bash", "-lc", &format!("echo blah > {tmpfile_path}")],
&["bash", "-lc", &format!("echo blah > {}", tmpfile_path)],
&[],
SHORT_TIMEOUT_MS,
)
@@ -158,7 +158,7 @@ async fn assert_network_blocked(cmd: &[&str]) {
(exit_code, stdout, stderr)
}
_ => {
panic!("expected sandbox denied error, got: {result:?}");
panic!("expected sandbox denied error, got: {:?}", result);
}
};
@@ -171,7 +171,10 @@ async fn assert_network_blocked(cmd: &[&str]) {
// If—*and only if*—the command exits 0 we consider the sandbox breached.
if exit_code == 0 {
panic!("Network sandbox FAILED - {cmd:?} exited 0\nstdout:\n{stdout}\nstderr:\n{stderr}",);
panic!(
"Network sandbox FAILED - {:?} exited 0\nstdout:\n{}\nstderr:\n{}",
cmd, stdout, stderr
);
}
}

View File

@@ -59,13 +59,6 @@ pub async fn login_with_chatgpt(
/// Attempt to read the `OPENAI_API_KEY` from the `auth.json` file in the given
/// `CODEX_HOME` directory, refreshing it, if necessary.
pub async fn try_read_openai_api_key(codex_home: &Path) -> std::io::Result<String> {
let auth_dot_json = try_read_auth_json(codex_home).await?;
Ok(auth_dot_json.openai_api_key)
}
/// Attempt to read and refresh the `auth.json` file in the given `CODEX_HOME` directory.
/// Returns the full AuthDotJson structure after refreshing if necessary.
pub async fn try_read_auth_json(codex_home: &Path) -> std::io::Result<AuthDotJson> {
let auth_path = codex_home.join("auth.json");
let mut file = std::fs::File::open(&auth_path)?;
let mut contents = String::new();
@@ -95,9 +88,9 @@ pub async fn try_read_auth_json(codex_home: &Path) -> std::io::Result<AuthDotJso
file.flush()?;
}
Ok(auth_dot_json)
Ok(auth_dot_json.openai_api_key)
} else {
Ok(auth_dot_json)
Ok(auth_dot_json.openai_api_key)
}
}
@@ -153,24 +146,23 @@ struct RefreshResponse {
/// Expected structure for $CODEX_HOME/auth.json.
#[derive(Deserialize, Serialize)]
pub struct AuthDotJson {
struct AuthDotJson {
#[serde(rename = "OPENAI_API_KEY")]
pub openai_api_key: String,
openai_api_key: String,
pub tokens: TokenData,
tokens: TokenData,
pub last_refresh: DateTime<Utc>,
last_refresh: DateTime<Utc>,
}
#[derive(Deserialize, Serialize, Clone)]
pub struct TokenData {
#[derive(Deserialize, Serialize)]
struct TokenData {
/// This is a JWT.
pub id_token: String,
id_token: String,
/// This is a JWT.
pub access_token: String,
#[allow(dead_code)]
access_token: String,
pub refresh_token: String,
pub account_id: String,
refresh_token: String,
}

View File

@@ -51,7 +51,6 @@ class TokenData:
id_token: str
access_token: str
refresh_token: str
account_id: str
@dataclass
@@ -241,26 +240,20 @@ class _ApiKeyHTTPHandler(http.server.BaseHTTPRequestHandler):
)
) as resp:
payload = json.loads(resp.read().decode())
# Extract chatgpt_account_id from id_token
id_token_parts = payload["id_token"].split(".")
if len(id_token_parts) != 3:
raise ValueError("Invalid ID token")
id_token_claims = _decode_jwt_segment(id_token_parts[1])
auth_claims = id_token_claims.get("https://api.openai.com/auth", {})
chatgpt_account_id = auth_claims.get("chatgpt_account_id", "")
token_data = TokenData(
id_token=payload["id_token"],
access_token=payload["access_token"],
refresh_token=payload["refresh_token"],
account_id=chatgpt_account_id,
)
id_token_parts = token_data.id_token.split(".")
if len(id_token_parts) != 3:
raise ValueError("Invalid ID token")
access_token_parts = token_data.access_token.split(".")
if len(access_token_parts) != 3:
raise ValueError("Invalid access token")
id_token_claims = _decode_jwt_segment(id_token_parts[1])
access_token_claims = _decode_jwt_segment(access_token_parts[1])
token_claims = id_token_claims.get("https://api.openai.com/auth", {})
@@ -382,7 +375,6 @@ def _write_auth_file(*, auth: AuthBundle, codex_home: str) -> bool:
"id_token": auth.token_data.id_token,
"access_token": auth.token_data.access_token,
"refresh_token": auth.token_data.refresh_token,
"account_id": auth.token_data.account_id,
},
"last_refresh": auth.last_refresh,
}

View File

@@ -21,3 +21,6 @@ tokio = { version = "1", features = [
"sync",
"time",
] }
[dev-dependencies]
pretty_assertions = "1.4.1"

View File

@@ -22,7 +22,7 @@ mcp-types = { path = "../mcp-types" }
schemars = "0.8.22"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
toml = "0.9"
toml = "0.8"
tracing = { version = "0.1.41", features = ["log"] }
tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] }
tokio = { version = "1", features = [

View File

@@ -54,11 +54,13 @@ tokio = { version = "1", features = [
tracing = { version = "0.1.41", features = ["log"] }
tracing-appender = "0.2.3"
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
tui-input = "0.14.0"
tui-input = "0.11.1"
tui-markdown = "0.3.3"
tui-textarea = "0.7.0"
unicode-segmentation = "1.12.0"
uuid = "1"
reqwest = { version = "0.12", features = ["json"] }
serde = { version = "1", features = ["derive"] }
[dev-dependencies]
pretty_assertions = "1"

View File

@@ -277,6 +277,11 @@ impl<'a> App<'a> {
widget.add_diff_output(text);
}
}
SlashCommand::Compact => {
if let AppState::Chat { widget } = &mut self.app_state {
widget.compact().await;
}
}
},
AppEvent::StartFileSearch(query) => {
self.file_search.on_user_query(query);

View File

@@ -90,10 +90,13 @@ impl ChatComposer<'_> {
// percentage.
100
};
// When https://github.com/openai/codex/issues/1257 is resolved,
// check if `percent_remaining < 25`, and if so, recommend
// /compact.
format!("{BASE_PLACEHOLDER_TEXT}{percent_remaining}% context left")
if percent_remaining > 25 {
format!("{BASE_PLACEHOLDER_TEXT}{percent_remaining}% context left")
} else {
format!(
"{BASE_PLACEHOLDER_TEXT}{percent_remaining}% context left (consider /compact)"
)
}
}
(total_tokens, None) => {
format!("{BASE_PLACEHOLDER_TEXT}{total_tokens} tokens used")
@@ -675,7 +678,8 @@ mod tests {
let result = ChatComposer::current_at_token(&textarea);
assert_eq!(
result, expected,
"Failed for case: {description} - input: '{input}', cursor: {cursor_pos}"
"Failed for case: {} - input: '{}', cursor: {}",
description, input, cursor_pos
);
}
}

View File

@@ -39,6 +39,7 @@ use crate::conversation_history_widget::ConversationHistoryWidget;
use crate::history_cell::PatchEventType;
use crate::user_approval_widget::ApprovalRequest;
use codex_file_search::FileMatch;
use crate::compact::{generate_compact_summary, Role, TranscriptEntry};
pub(crate) struct ChatWidget<'a> {
app_event_tx: AppEventSender,
@@ -49,6 +50,7 @@ pub(crate) struct ChatWidget<'a> {
config: Config,
initial_user_message: Option<UserMessage>,
token_usage: TokenUsage,
transcript: Vec<TranscriptEntry>,
}
#[derive(Clone, Copy, Eq, PartialEq)]
@@ -135,6 +137,7 @@ impl ChatWidget<'_> {
initial_images,
),
token_usage: TokenUsage::default(),
transcript: Vec::new(),
}
}
@@ -208,6 +211,7 @@ impl ChatWidget<'_> {
// Only show text portion in conversation history for now.
if !text.is_empty() {
self.conversation_history.add_user_message(text);
self.transcript.push(TranscriptEntry { role: Role::User, text });
}
self.conversation_history.scroll_to_bottom();
}
@@ -236,6 +240,7 @@ impl ChatWidget<'_> {
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
self.conversation_history
.add_agent_message(&self.config, message);
self.transcript.push(TranscriptEntry { role: Role::Assistant, text: message });
self.request_redraw();
}
EventMsg::AgentReasoning(AgentReasoningEvent { text }) => {
@@ -410,6 +415,19 @@ impl ChatWidget<'_> {
self.bottom_pane.on_file_search_result(query, matches);
}
pub(crate) async fn compact(&mut self) {
let Ok(summary) = generate_compact_summary(&self.transcript, &self.config.model, &self.config).await else {
self.conversation_history.add_error("Failed to compact context".to_string());
self.request_redraw();
return;
};
self.conversation_history = ConversationHistoryWidget::new();
self.conversation_history.add_agent_message(&self.config, summary.clone());
self.transcript = vec![TranscriptEntry { role: Role::Assistant, text: summary }];
self.request_redraw();
}
/// Handle Ctrl-C key press.
/// Returns true if the key press was handled, false if it was not.
/// If the key press was not handled, the caller should handle it (likely by exiting the process).

View File

@@ -0,0 +1,90 @@
use anyhow::{anyhow, Result};
use serde::Serialize;
use codex_core::config::Config;
use codex_core::openai_api_key::get_openai_api_key;
#[derive(Clone)]
pub enum Role {
User,
Assistant,
}
#[derive(Clone)]
pub struct TranscriptEntry {
pub role: Role,
pub text: String,
}
impl TranscriptEntry {
fn role_str(&self) -> &'static str {
match self.role {
Role::User => "user",
Role::Assistant => "assistant",
}
}
}
#[derive(Serialize)]
struct Message<'a> {
role: &'a str,
content: String,
}
#[derive(Serialize)]
struct Payload<'a> {
model: &'a str,
messages: Vec<Message<'a>>,
}
/// Generate a concise summary of the provided transcript using the OpenAI chat
/// completions API.
pub async fn generate_compact_summary(
transcript: &[TranscriptEntry],
model: &str,
config: &Config,
) -> Result<String> {
let conversation_text = transcript
.iter()
.map(|e| format!("{}: {}", e.role_str(), e.text))
.collect::<Vec<_>>()
.join("\n");
let messages = vec![
Message {
role: "assistant",
content: "You are an expert coding assistant. Your goal is to generate a concise, structured summary of the conversation below that captures all essential information needed to continue development after context replacement. Include tasks performed, code areas modified or reviewed, key decisions or assumptions, test results or errors, and outstanding tasks or next steps.".to_string(),
},
Message {
role: "user",
content: format!(
"Here is the conversation so far:\n{conversation_text}\n\nPlease summarize this conversation, covering:\n1. Tasks performed and outcomes\n2. Code files, modules, or functions modified or examined\n3. Important decisions or assumptions made\n4. Errors encountered and test or build results\n5. Remaining tasks, open questions, or next steps\nProvide the summary in a clear, concise format."
),
},
];
let api_key = get_openai_api_key().ok_or_else(|| anyhow!("OpenAI API key not set"))?;
let client = reqwest::Client::new();
let base = config.model_provider.base_url.trim_end_matches('/');
let url = format!("{}/chat/completions", base);
let payload = Payload { model, messages };
let res = client
.post(url)
.bearer_auth(api_key)
.json(&payload)
.send()
.await?;
let body: serde_json::Value = res.json().await?;
if let Some(summary) = body
.get("choices")
.and_then(|c| c.get(0))
.and_then(|c| c.get("message"))
.and_then(|m| m.get("content"))
.and_then(|v| v.as_str())
{
Ok(summary.to_string())
} else {
Ok("Unable to generate summary.".to_string())
}
}

View File

@@ -56,7 +56,7 @@ pub(crate) fn get_git_diff() -> io::Result<(bool, String)> {
}
}
Ok((true, format!("{tracked_diff}{untracked_diff}")))
Ok((true, format!("{}{}", tracked_diff, untracked_diff)))
}
/// Helper that executes `git` with the given `args` and returns `stdout` as a

View File

@@ -140,7 +140,7 @@ impl HistoryCell {
Line::from(vec![
"OpenAI ".into(),
"Codex".bold(),
format!(" v{VERSION}").into(),
format!(" v{}", VERSION).into(),
" (research preview)".dim(),
]),
Line::from(""),
@@ -159,7 +159,7 @@ impl HistoryCell {
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
];
if config.model_provider.wire_api == WireApi::Responses
&& model_supports_reasoning_summaries(config)
&& model_supports_reasoning_summaries(&config.model)
{
entries.push((
"reasoning effort",
@@ -185,7 +185,7 @@ impl HistoryCell {
let lines = vec![
Line::from("model changed:".magenta().bold()),
Line::from(format!("requested: {}", config.model)),
Line::from(format!("used: {model}")),
Line::from(format!("used: {}", model)),
Line::from(""),
];
HistoryCell::SessionInfo {
@@ -276,7 +276,7 @@ impl HistoryCell {
}
let remaining = lines_iter.count();
if remaining > 0 {
lines.push(Line::from(format!("... {remaining} additional lines")).dim());
lines.push(Line::from(format!("... {} additional lines", remaining)).dim());
}
lines.push(Line::from(""));

View File

@@ -26,6 +26,7 @@ mod bottom_pane;
mod cell_widget;
mod chatwidget;
mod citation_regex;
mod compact;
mod cli;
mod conversation_history_widget;
mod exec_command;
@@ -216,7 +217,8 @@ fn run_ratatui_app(
fn restore() {
if let Err(err) = tui::restore() {
eprintln!(
"failed to restore terminal. Run `reset` or restart your terminal to recover: {err}"
"failed to restore terminal. Run `reset` or restart your terminal to recover: {}",
err
);
}
}

View File

@@ -53,7 +53,7 @@ where
impl Visit for Visitor<'_> {
fn record_debug(&mut self, _field: &Field, value: &dyn std::fmt::Debug) {
let _ = write!(self.buf, " {value:?}");
let _ = write!(self.buf, " {:?}", value);
}
}

View File

@@ -14,6 +14,7 @@ pub enum SlashCommand {
// more frequently used commands should be listed first.
New,
Diff,
Compact,
Quit,
ToggleMouseMode,
}
@@ -30,6 +31,7 @@ impl SlashCommand {
SlashCommand::Diff => {
"Show git diff of the working directory (including untracked files)"
}
SlashCommand::Compact => "Condense context into a summary.",
}
}

View File

@@ -85,7 +85,7 @@ pub(crate) fn truncate_text(text: &str, max_graphemes: usize) -> String {
let mut truncate_graphemes = text.grapheme_indices(true);
if let Some((truncate_byte_index, _)) = truncate_graphemes.nth(max_graphemes - 3) {
let truncated = &text[..truncate_byte_index];
format!("{truncated}...")
format!("{}...", truncated)
} else {
text.to_string()
}