mirror of
https://github.com/openai/codex.git
synced 2026-02-02 15:03:38 +00:00
Compare commits
1 Commits
otel-traci
...
patch-squa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8aa5e7770c |
1
.github/workflows/codespell.yml
vendored
1
.github/workflows/codespell.yml
vendored
@@ -25,4 +25,3 @@ jobs:
|
||||
uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2
|
||||
with:
|
||||
ignore_words_file: .codespellignore
|
||||
skip: frame*.txt
|
||||
|
||||
2
.github/workflows/rust-ci.yml
vendored
2
.github/workflows/rust-ci.yml
vendored
@@ -62,8 +62,6 @@ jobs:
|
||||
components: rustfmt
|
||||
- name: cargo fmt
|
||||
run: cargo fmt -- --config imports_granularity=Item --check
|
||||
- name: Verify codegen for mcp-types
|
||||
run: ./mcp-types/check_lib_rs.py
|
||||
|
||||
cargo_shear:
|
||||
name: cargo shear
|
||||
|
||||
67
.github/workflows/rust-release.yml
vendored
67
.github/workflows/rust-release.yml
vendored
@@ -167,12 +167,6 @@ jobs:
|
||||
needs: build
|
||||
name: release
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
actions: read
|
||||
outputs:
|
||||
version: ${{ steps.release_name.outputs.name }}
|
||||
tag: ${{ github.ref_name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -225,64 +219,3 @@ jobs:
|
||||
with:
|
||||
tag: ${{ github.ref_name }}
|
||||
config: .github/dotslash-config.json
|
||||
|
||||
# Publish to npm using OIDC authentication.
|
||||
# July 31, 2025: https://github.blog/changelog/2025-07-31-npm-trusted-publishing-with-oidc-is-generally-available/
|
||||
# npm docs: https://docs.npmjs.com/trusted-publishers
|
||||
publish-npm:
|
||||
# Skip this step for pre-releases (alpha/beta).
|
||||
if: ${{ !contains(needs.release.outputs.version, '-') }}
|
||||
name: publish-npm
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write # Required for OIDC
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: 22
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
scope: "@openai"
|
||||
|
||||
# Trusted publishing requires npm CLI version 11.5.1 or later.
|
||||
- name: Update npm
|
||||
run: npm install -g npm@latest
|
||||
|
||||
- name: Download npm tarball from release
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
version="${{ needs.release.outputs.version }}"
|
||||
tag="${{ needs.release.outputs.tag }}"
|
||||
mkdir -p dist/npm
|
||||
gh release download "$tag" \
|
||||
--repo "${GITHUB_REPOSITORY}" \
|
||||
--pattern "codex-npm-${version}.tgz" \
|
||||
--dir dist/npm
|
||||
|
||||
# No NODE_AUTH_TOKEN needed because we use OIDC.
|
||||
- name: Publish to npm
|
||||
run: npm publish "${GITHUB_WORKSPACE}/dist/npm/codex-npm-${{ needs.release.outputs.version }}.tgz"
|
||||
|
||||
update-branch:
|
||||
name: Update latest-alpha-cli branch
|
||||
permissions:
|
||||
contents: write
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Update latest-alpha-cli branch
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
gh api \
|
||||
repos/${GITHUB_REPOSITORY}/git/refs/heads/latest-alpha-cli \
|
||||
-X PATCH \
|
||||
-f sha="${GITHUB_SHA}" \
|
||||
-F force=true
|
||||
|
||||
@@ -4,7 +4,6 @@ In the codex-rs folder where the rust code lives:
|
||||
|
||||
- Crate names are prefixed with `codex-`. For example, the `core` folder's crate is named `codex-core`
|
||||
- When using format! and you can inline variables into {}, always do that.
|
||||
- Install any commands the repo relies on (for example `just`, `rg`, or `cargo-insta`) if they aren't already available before running instructions here.
|
||||
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`.
|
||||
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
|
||||
- Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate.
|
||||
|
||||
@@ -2,10 +2,7 @@
|
||||
|
||||
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install codex</code></p>
|
||||
|
||||
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
|
||||
</br>
|
||||
</br>If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE</a>
|
||||
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a></p>
|
||||
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, see <a href="https://chatgpt.com/codex">chatgpt.com/codex</a>.</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="./.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />
|
||||
|
||||
@@ -15,8 +15,7 @@
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/openai/codex.git",
|
||||
"directory": "codex-cli"
|
||||
"url": "git+https://github.com/openai/codex.git"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vscode/ripgrep": "^1.15.14"
|
||||
|
||||
630
codex-rs/Cargo.lock
generated
630
codex-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -17,7 +17,6 @@ members = [
|
||||
"ollama",
|
||||
"protocol",
|
||||
"protocol-ts",
|
||||
"otel",
|
||||
"tui",
|
||||
]
|
||||
resolver = "2"
|
||||
@@ -35,7 +34,6 @@ rust = {}
|
||||
|
||||
[workspace.lints.clippy]
|
||||
expect_used = "deny"
|
||||
redundant_clone = "deny"
|
||||
uninlined_format_args = "deny"
|
||||
unwrap_used = "deny"
|
||||
|
||||
|
||||
@@ -40,11 +40,6 @@ pub enum ApplyPatchError {
|
||||
/// Error that occurs while computing replacements when applying patch chunks
|
||||
#[error("{0}")]
|
||||
ComputeReplacements(String),
|
||||
/// A raw patch body was provided without an explicit `apply_patch` invocation.
|
||||
#[error(
|
||||
"patch detected without explicit call to apply_patch. Rerun as [\"apply_patch\", \"<patch>\"]"
|
||||
)]
|
||||
ImplicitInvocation,
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for ApplyPatchError {
|
||||
@@ -98,12 +93,10 @@ pub struct ApplyPatchArgs {
|
||||
|
||||
pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
|
||||
match argv {
|
||||
// Direct invocation: apply_patch <patch>
|
||||
[cmd, body] if APPLY_PATCH_COMMANDS.contains(&cmd.as_str()) => match parse_patch(body) {
|
||||
Ok(source) => MaybeApplyPatch::Body(source),
|
||||
Err(e) => MaybeApplyPatch::PatchParseError(e),
|
||||
},
|
||||
// Bash heredoc form: (optional `cd <path> &&`) apply_patch <<'EOF' ...
|
||||
[bash, flag, script] if bash == "bash" && flag == "-lc" => {
|
||||
match extract_apply_patch_from_bash(script) {
|
||||
Ok((body, workdir)) => match parse_patch(&body) {
|
||||
@@ -214,26 +207,6 @@ impl ApplyPatchAction {
|
||||
/// cwd must be an absolute path so that we can resolve relative paths in the
|
||||
/// patch.
|
||||
pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApplyPatchVerified {
|
||||
// Detect a raw patch body passed directly as the command or as the body of a bash -lc
|
||||
// script. In these cases, report an explicit error rather than applying the patch.
|
||||
match argv {
|
||||
[body] => {
|
||||
if parse_patch(body).is_ok() {
|
||||
return MaybeApplyPatchVerified::CorrectnessError(
|
||||
ApplyPatchError::ImplicitInvocation,
|
||||
);
|
||||
}
|
||||
}
|
||||
[bash, flag, script] if bash == "bash" && flag == "-lc" => {
|
||||
if parse_patch(script).is_ok() {
|
||||
return MaybeApplyPatchVerified::CorrectnessError(
|
||||
ApplyPatchError::ImplicitInvocation,
|
||||
);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
match maybe_parse_apply_patch(argv) {
|
||||
MaybeApplyPatch::Body(ApplyPatchArgs {
|
||||
patch,
|
||||
@@ -760,8 +733,6 @@ fn compute_replacements(
|
||||
}
|
||||
}
|
||||
|
||||
replacements.sort_by(|(lhs_idx, _, _), (rhs_idx, _, _)| lhs_idx.cmp(rhs_idx));
|
||||
|
||||
Ok(replacements)
|
||||
}
|
||||
|
||||
@@ -902,28 +873,6 @@ mod tests {
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_implicit_patch_single_arg_is_error() {
|
||||
let patch = "*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch".to_string();
|
||||
let args = vec![patch];
|
||||
let dir = tempdir().unwrap();
|
||||
assert!(matches!(
|
||||
maybe_parse_apply_patch_verified(&args, dir.path()),
|
||||
MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_implicit_patch_bash_script_is_error() {
|
||||
let script = "*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch";
|
||||
let args = args_bash(script);
|
||||
let dir = tempdir().unwrap();
|
||||
assert!(matches!(
|
||||
maybe_parse_apply_patch_verified(&args, dir.path()),
|
||||
MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_literal() {
|
||||
let args = strs_to_strings(&[
|
||||
@@ -1267,33 +1216,6 @@ PATCH"#,
|
||||
assert_eq!(contents, "a\nB\nc\nd\nE\nf\ng\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pure_addition_chunk_followed_by_removal() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path().join("panic.txt");
|
||||
fs::write(&path, "line1\nline2\nline3\n").unwrap();
|
||||
let patch = wrap_patch(&format!(
|
||||
r#"*** Update File: {}
|
||||
@@
|
||||
+after-context
|
||||
+second-line
|
||||
@@
|
||||
line1
|
||||
-line2
|
||||
-line3
|
||||
+line2-replacement"#,
|
||||
path.display()
|
||||
));
|
||||
let mut stdout = Vec::new();
|
||||
let mut stderr = Vec::new();
|
||||
apply_patch(&patch, &mut stdout, &mut stderr).unwrap();
|
||||
let contents = fs::read_to_string(path).unwrap();
|
||||
assert_eq!(
|
||||
contents,
|
||||
"line1\nline2-replacement\nafter-context\nsecond-line\n"
|
||||
);
|
||||
}
|
||||
|
||||
/// Ensure that patches authored with ASCII characters can update lines that
|
||||
/// contain typographic Unicode punctuation (e.g. EN DASH, NON-BREAKING
|
||||
/// HYPHEN). Historically `git apply` succeeds in such scenarios but our
|
||||
|
||||
@@ -617,7 +617,7 @@ fn test_parse_patch_lenient() {
|
||||
assert_eq!(
|
||||
parse_patch_text(&patch_text_in_double_quoted_heredoc, ParseMode::Lenient),
|
||||
Ok(ApplyPatchArgs {
|
||||
hunks: expected_patch,
|
||||
hunks: expected_patch.clone(),
|
||||
patch: patch_text.to_string(),
|
||||
workdir: None,
|
||||
})
|
||||
@@ -637,7 +637,7 @@ fn test_parse_patch_lenient() {
|
||||
"<<EOF\n*** Begin Patch\n*** Update File: file2.py\nEOF\n".to_string();
|
||||
assert_eq!(
|
||||
parse_patch_text(&patch_text_with_missing_closing_heredoc, ParseMode::Strict),
|
||||
Err(expected_error)
|
||||
Err(expected_error.clone())
|
||||
);
|
||||
assert_eq!(
|
||||
parse_patch_text(&patch_text_with_missing_closing_heredoc, ParseMode::Lenient),
|
||||
|
||||
@@ -11,6 +11,7 @@ anyhow = "1"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
codex-common = { path = "../common", features = ["cli"] }
|
||||
codex-core = { path = "../core" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use codex_core::CodexAuth;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use std::path::Path;
|
||||
use std::sync::LazyLock;
|
||||
use std::sync::RwLock;
|
||||
@@ -19,7 +20,7 @@ pub fn set_chatgpt_token_data(value: TokenData) {
|
||||
|
||||
/// Initialize the ChatGPT token from auth.json file
|
||||
pub async fn init_chatgpt_token_from_auth(codex_home: &Path) -> std::io::Result<()> {
|
||||
let auth = CodexAuth::from_codex_home(codex_home)?;
|
||||
let auth = CodexAuth::from_codex_home(codex_home, AuthMode::ChatGPT)?;
|
||||
if let Some(auth) = auth {
|
||||
let token_data = auth.get_token_data().await?;
|
||||
set_chatgpt_token_data(token_data);
|
||||
|
||||
@@ -36,11 +36,5 @@ tokio = { version = "1", features = [
|
||||
"signal",
|
||||
] }
|
||||
tracing = "0.1.41"
|
||||
tracing-subscriber = "0.3.20"
|
||||
tracing-subscriber = "0.3.19"
|
||||
codex-protocol-ts = { path = "../protocol-ts" }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "2"
|
||||
predicates = "3"
|
||||
pretty_assertions = "1"
|
||||
tempfile = "3"
|
||||
|
||||
@@ -64,6 +64,7 @@ async fn run_command_under_sandbox(
|
||||
sandbox_type: SandboxType,
|
||||
) -> anyhow::Result<()> {
|
||||
let sandbox_mode = create_sandbox_mode(full_auto);
|
||||
let cwd = std::env::current_dir()?;
|
||||
let config = Config::load_with_cli_overrides(
|
||||
config_overrides
|
||||
.parse_overrides()
|
||||
@@ -74,29 +75,13 @@ async fn run_command_under_sandbox(
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
|
||||
// In practice, this should be `std::env::current_dir()` because this CLI
|
||||
// does not support `--cwd`, but let's use the config value for consistency.
|
||||
let cwd = config.cwd.clone();
|
||||
// For now, we always use the same cwd for both the command and the
|
||||
// sandbox policy. In the future, we could add a CLI option to set them
|
||||
// separately.
|
||||
let sandbox_policy_cwd = cwd.clone();
|
||||
|
||||
let stdio_policy = StdioPolicy::Inherit;
|
||||
let env = create_env(&config.shell_environment_policy);
|
||||
|
||||
let mut child = match sandbox_type {
|
||||
SandboxType::Seatbelt => {
|
||||
spawn_command_under_seatbelt(
|
||||
command,
|
||||
cwd,
|
||||
&config.sandbox_policy,
|
||||
sandbox_policy_cwd.as_path(),
|
||||
stdio_policy,
|
||||
env,
|
||||
)
|
||||
.await?
|
||||
spawn_command_under_seatbelt(command, &config.sandbox_policy, cwd, stdio_policy, env)
|
||||
.await?
|
||||
}
|
||||
SandboxType::Landlock => {
|
||||
#[expect(clippy::expect_used)]
|
||||
@@ -106,9 +91,8 @@ async fn run_command_under_sandbox(
|
||||
spawn_command_under_linux_sandbox(
|
||||
codex_linux_sandbox_exe,
|
||||
command,
|
||||
cwd,
|
||||
&config.sandbox_policy,
|
||||
sandbox_policy_cwd.as_path(),
|
||||
cwd,
|
||||
stdio_policy,
|
||||
env,
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::auth::CLIENT_ID;
|
||||
use codex_core::auth::OPENAI_API_KEY_ENV_VAR;
|
||||
use codex_core::auth::login_with_api_key;
|
||||
use codex_core::auth::logout;
|
||||
use codex_core::config::Config;
|
||||
@@ -8,6 +9,7 @@ use codex_core::config::ConfigOverrides;
|
||||
use codex_login::ServerOptions;
|
||||
use codex_login::run_login_server;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub async fn login_with_chatgpt(codex_home: PathBuf) -> std::io::Result<()> {
|
||||
@@ -58,11 +60,19 @@ pub async fn run_login_with_api_key(
|
||||
pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
|
||||
match CodexAuth::from_codex_home(&config.codex_home) {
|
||||
match CodexAuth::from_codex_home(&config.codex_home, config.preferred_auth_method) {
|
||||
Ok(Some(auth)) => match auth.mode {
|
||||
AuthMode::ApiKey => match auth.get_token().await {
|
||||
Ok(api_key) => {
|
||||
eprintln!("Logged in using an API key - {}", safe_format_key(&api_key));
|
||||
|
||||
if let Ok(env_api_key) = env::var(OPENAI_API_KEY_ENV_VAR)
|
||||
&& env_api_key == api_key
|
||||
{
|
||||
eprintln!(
|
||||
" API loaded from OPENAI_API_KEY environment variable or .env file"
|
||||
);
|
||||
}
|
||||
std::process::exit(0);
|
||||
}
|
||||
Err(e) => {
|
||||
|
||||
@@ -17,9 +17,6 @@ use codex_exec::Cli as ExecCli;
|
||||
use codex_tui::Cli as TuiCli;
|
||||
use std::path::PathBuf;
|
||||
|
||||
mod mcp_cmd;
|
||||
|
||||
use crate::mcp_cmd::McpCli;
|
||||
use crate::proto::ProtoCli;
|
||||
|
||||
/// Codex CLI
|
||||
@@ -59,8 +56,8 @@ enum Subcommand {
|
||||
/// Remove stored authentication credentials.
|
||||
Logout(LogoutCommand),
|
||||
|
||||
/// [experimental] Run Codex as an MCP server and manage MCP servers.
|
||||
Mcp(McpCli),
|
||||
/// Experimental: run Codex as an MCP server.
|
||||
Mcp,
|
||||
|
||||
/// Run the Protocol stream via stdin/stdout
|
||||
#[clap(visible_alias = "p")]
|
||||
@@ -76,9 +73,6 @@ enum Subcommand {
|
||||
#[clap(visible_alias = "a")]
|
||||
Apply(ApplyCommand),
|
||||
|
||||
/// Resume a previous interactive session (picker by default; use --last to continue the most recent).
|
||||
Resume(ResumeCommand),
|
||||
|
||||
/// Internal: generate TypeScript protocol bindings.
|
||||
#[clap(hide = true)]
|
||||
GenerateTs(GenerateTsCommand),
|
||||
@@ -91,21 +85,6 @@ struct CompletionCommand {
|
||||
shell: Shell,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct ResumeCommand {
|
||||
/// Conversation/session id (UUID). When provided, resumes this session.
|
||||
/// If omitted, use --last to pick the most recent recorded session.
|
||||
#[arg(value_name = "SESSION_ID")]
|
||||
session_id: Option<String>,
|
||||
|
||||
/// Continue the most recent session without showing the picker.
|
||||
#[arg(long = "last", default_value_t = false, conflicts_with = "session_id")]
|
||||
last: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
config_overrides: TuiCli,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct DebugArgs {
|
||||
#[command(subcommand)]
|
||||
@@ -164,57 +143,26 @@ fn main() -> anyhow::Result<()> {
|
||||
}
|
||||
|
||||
async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()> {
|
||||
let MultitoolCli {
|
||||
config_overrides: root_config_overrides,
|
||||
mut interactive,
|
||||
subcommand,
|
||||
} = MultitoolCli::parse();
|
||||
let cli = MultitoolCli::parse();
|
||||
|
||||
match subcommand {
|
||||
match cli.subcommand {
|
||||
None => {
|
||||
prepend_config_flags(
|
||||
&mut interactive.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
let usage = codex_tui::run_main(interactive, codex_linux_sandbox_exe).await?;
|
||||
if !usage.token_usage.is_zero() {
|
||||
println!(
|
||||
"{}",
|
||||
codex_core::protocol::FinalOutput::from(usage.token_usage)
|
||||
);
|
||||
let mut tui_cli = cli.interactive;
|
||||
prepend_config_flags(&mut tui_cli.config_overrides, cli.config_overrides);
|
||||
let usage = codex_tui::run_main(tui_cli, codex_linux_sandbox_exe).await?;
|
||||
if !usage.is_zero() {
|
||||
println!("{}", codex_core::protocol::FinalOutput::from(usage));
|
||||
}
|
||||
}
|
||||
Some(Subcommand::Exec(mut exec_cli)) => {
|
||||
prepend_config_flags(
|
||||
&mut exec_cli.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
prepend_config_flags(&mut exec_cli.config_overrides, cli.config_overrides);
|
||||
codex_exec::run_main(exec_cli, codex_linux_sandbox_exe).await?;
|
||||
}
|
||||
Some(Subcommand::Mcp(mut mcp_cli)) => {
|
||||
// Propagate any root-level config overrides (e.g. `-c key=value`).
|
||||
prepend_config_flags(&mut mcp_cli.config_overrides, root_config_overrides.clone());
|
||||
mcp_cli.run(codex_linux_sandbox_exe).await?;
|
||||
}
|
||||
Some(Subcommand::Resume(ResumeCommand {
|
||||
session_id,
|
||||
last,
|
||||
config_overrides,
|
||||
})) => {
|
||||
interactive = finalize_resume_interactive(
|
||||
interactive,
|
||||
root_config_overrides.clone(),
|
||||
session_id,
|
||||
last,
|
||||
config_overrides,
|
||||
);
|
||||
codex_tui::run_main(interactive, codex_linux_sandbox_exe).await?;
|
||||
Some(Subcommand::Mcp) => {
|
||||
codex_mcp_server::run_main(codex_linux_sandbox_exe, cli.config_overrides).await?;
|
||||
}
|
||||
Some(Subcommand::Login(mut login_cli)) => {
|
||||
prepend_config_flags(
|
||||
&mut login_cli.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
prepend_config_flags(&mut login_cli.config_overrides, cli.config_overrides);
|
||||
match login_cli.action {
|
||||
Some(LoginSubcommand::Status) => {
|
||||
run_login_status(login_cli.config_overrides).await;
|
||||
@@ -229,17 +177,11 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
}
|
||||
}
|
||||
Some(Subcommand::Logout(mut logout_cli)) => {
|
||||
prepend_config_flags(
|
||||
&mut logout_cli.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
prepend_config_flags(&mut logout_cli.config_overrides, cli.config_overrides);
|
||||
run_logout(logout_cli.config_overrides).await;
|
||||
}
|
||||
Some(Subcommand::Proto(mut proto_cli)) => {
|
||||
prepend_config_flags(
|
||||
&mut proto_cli.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
prepend_config_flags(&mut proto_cli.config_overrides, cli.config_overrides);
|
||||
proto::run_main(proto_cli).await?;
|
||||
}
|
||||
Some(Subcommand::Completion(completion_cli)) => {
|
||||
@@ -247,10 +189,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
}
|
||||
Some(Subcommand::Debug(debug_args)) => match debug_args.cmd {
|
||||
DebugCommand::Seatbelt(mut seatbelt_cli) => {
|
||||
prepend_config_flags(
|
||||
&mut seatbelt_cli.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
prepend_config_flags(&mut seatbelt_cli.config_overrides, cli.config_overrides);
|
||||
codex_cli::debug_sandbox::run_command_under_seatbelt(
|
||||
seatbelt_cli,
|
||||
codex_linux_sandbox_exe,
|
||||
@@ -258,10 +197,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
.await?;
|
||||
}
|
||||
DebugCommand::Landlock(mut landlock_cli) => {
|
||||
prepend_config_flags(
|
||||
&mut landlock_cli.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
prepend_config_flags(&mut landlock_cli.config_overrides, cli.config_overrides);
|
||||
codex_cli::debug_sandbox::run_command_under_landlock(
|
||||
landlock_cli,
|
||||
codex_linux_sandbox_exe,
|
||||
@@ -270,10 +206,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
}
|
||||
},
|
||||
Some(Subcommand::Apply(mut apply_cli)) => {
|
||||
prepend_config_flags(
|
||||
&mut apply_cli.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
prepend_config_flags(&mut apply_cli.config_overrides, cli.config_overrides);
|
||||
run_apply_command(apply_cli, None).await?;
|
||||
}
|
||||
Some(Subcommand::GenerateTs(gen_cli)) => {
|
||||
@@ -295,208 +228,8 @@ fn prepend_config_flags(
|
||||
.splice(0..0, cli_config_overrides.raw_overrides);
|
||||
}
|
||||
|
||||
/// Build the final `TuiCli` for a `codex resume` invocation.
|
||||
fn finalize_resume_interactive(
|
||||
mut interactive: TuiCli,
|
||||
root_config_overrides: CliConfigOverrides,
|
||||
session_id: Option<String>,
|
||||
last: bool,
|
||||
resume_cli: TuiCli,
|
||||
) -> TuiCli {
|
||||
// Start with the parsed interactive CLI so resume shares the same
|
||||
// configuration surface area as `codex` without additional flags.
|
||||
let resume_session_id = session_id;
|
||||
interactive.resume_picker = resume_session_id.is_none() && !last;
|
||||
interactive.resume_last = last;
|
||||
interactive.resume_session_id = resume_session_id;
|
||||
|
||||
// Merge resume-scoped flags and overrides with highest precedence.
|
||||
merge_resume_cli_flags(&mut interactive, resume_cli);
|
||||
|
||||
// Propagate any root-level config overrides (e.g. `-c key=value`).
|
||||
prepend_config_flags(&mut interactive.config_overrides, root_config_overrides);
|
||||
|
||||
interactive
|
||||
}
|
||||
|
||||
/// Merge flags provided to `codex resume` so they take precedence over any
|
||||
/// root-level flags. Only overrides fields explicitly set on the resume-scoped
|
||||
/// CLI. Also appends `-c key=value` overrides with highest precedence.
|
||||
fn merge_resume_cli_flags(interactive: &mut TuiCli, resume_cli: TuiCli) {
|
||||
if let Some(model) = resume_cli.model {
|
||||
interactive.model = Some(model);
|
||||
}
|
||||
if resume_cli.oss {
|
||||
interactive.oss = true;
|
||||
}
|
||||
if let Some(profile) = resume_cli.config_profile {
|
||||
interactive.config_profile = Some(profile);
|
||||
}
|
||||
if let Some(sandbox) = resume_cli.sandbox_mode {
|
||||
interactive.sandbox_mode = Some(sandbox);
|
||||
}
|
||||
if let Some(approval) = resume_cli.approval_policy {
|
||||
interactive.approval_policy = Some(approval);
|
||||
}
|
||||
if resume_cli.full_auto {
|
||||
interactive.full_auto = true;
|
||||
}
|
||||
if resume_cli.dangerously_bypass_approvals_and_sandbox {
|
||||
interactive.dangerously_bypass_approvals_and_sandbox = true;
|
||||
}
|
||||
if let Some(cwd) = resume_cli.cwd {
|
||||
interactive.cwd = Some(cwd);
|
||||
}
|
||||
if resume_cli.web_search {
|
||||
interactive.web_search = true;
|
||||
}
|
||||
if !resume_cli.images.is_empty() {
|
||||
interactive.images = resume_cli.images;
|
||||
}
|
||||
if let Some(prompt) = resume_cli.prompt {
|
||||
interactive.prompt = Some(prompt);
|
||||
}
|
||||
|
||||
interactive
|
||||
.config_overrides
|
||||
.raw_overrides
|
||||
.extend(resume_cli.config_overrides.raw_overrides);
|
||||
}
|
||||
|
||||
fn print_completion(cmd: CompletionCommand) {
|
||||
let mut app = MultitoolCli::command();
|
||||
let name = "codex";
|
||||
generate(cmd.shell, &mut app, name, &mut std::io::stdout());
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn finalize_from_args(args: &[&str]) -> TuiCli {
|
||||
let cli = MultitoolCli::try_parse_from(args).expect("parse");
|
||||
let MultitoolCli {
|
||||
interactive,
|
||||
config_overrides: root_overrides,
|
||||
subcommand,
|
||||
} = cli;
|
||||
|
||||
let Subcommand::Resume(ResumeCommand {
|
||||
session_id,
|
||||
last,
|
||||
config_overrides: resume_cli,
|
||||
}) = subcommand.expect("resume present")
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
finalize_resume_interactive(interactive, root_overrides, session_id, last, resume_cli)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_model_flag_applies_when_no_root_flags() {
|
||||
let interactive = finalize_from_args(["codex", "resume", "-m", "gpt-5-test"].as_ref());
|
||||
|
||||
assert_eq!(interactive.model.as_deref(), Some("gpt-5-test"));
|
||||
assert!(interactive.resume_picker);
|
||||
assert!(!interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_picker_logic_none_and_not_last() {
|
||||
let interactive = finalize_from_args(["codex", "resume"].as_ref());
|
||||
assert!(interactive.resume_picker);
|
||||
assert!(!interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_picker_logic_last() {
|
||||
let interactive = finalize_from_args(["codex", "resume", "--last"].as_ref());
|
||||
assert!(!interactive.resume_picker);
|
||||
assert!(interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_picker_logic_with_session_id() {
|
||||
let interactive = finalize_from_args(["codex", "resume", "1234"].as_ref());
|
||||
assert!(!interactive.resume_picker);
|
||||
assert!(!interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id.as_deref(), Some("1234"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_merges_option_flags_and_full_auto() {
|
||||
let interactive = finalize_from_args(
|
||||
[
|
||||
"codex",
|
||||
"resume",
|
||||
"sid",
|
||||
"--oss",
|
||||
"--full-auto",
|
||||
"--search",
|
||||
"--sandbox",
|
||||
"workspace-write",
|
||||
"--ask-for-approval",
|
||||
"on-request",
|
||||
"-m",
|
||||
"gpt-5-test",
|
||||
"-p",
|
||||
"my-profile",
|
||||
"-C",
|
||||
"/tmp",
|
||||
"-i",
|
||||
"/tmp/a.png,/tmp/b.png",
|
||||
]
|
||||
.as_ref(),
|
||||
);
|
||||
|
||||
assert_eq!(interactive.model.as_deref(), Some("gpt-5-test"));
|
||||
assert!(interactive.oss);
|
||||
assert_eq!(interactive.config_profile.as_deref(), Some("my-profile"));
|
||||
assert!(matches!(
|
||||
interactive.sandbox_mode,
|
||||
Some(codex_common::SandboxModeCliArg::WorkspaceWrite)
|
||||
));
|
||||
assert!(matches!(
|
||||
interactive.approval_policy,
|
||||
Some(codex_common::ApprovalModeCliArg::OnRequest)
|
||||
));
|
||||
assert!(interactive.full_auto);
|
||||
assert_eq!(
|
||||
interactive.cwd.as_deref(),
|
||||
Some(std::path::Path::new("/tmp"))
|
||||
);
|
||||
assert!(interactive.web_search);
|
||||
let has_a = interactive
|
||||
.images
|
||||
.iter()
|
||||
.any(|p| p == std::path::Path::new("/tmp/a.png"));
|
||||
let has_b = interactive
|
||||
.images
|
||||
.iter()
|
||||
.any(|p| p == std::path::Path::new("/tmp/b.png"));
|
||||
assert!(has_a && has_b);
|
||||
assert!(!interactive.resume_picker);
|
||||
assert!(!interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id.as_deref(), Some("sid"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_merges_dangerously_bypass_flag() {
|
||||
let interactive = finalize_from_args(
|
||||
[
|
||||
"codex",
|
||||
"resume",
|
||||
"--dangerously-bypass-approvals-and-sandbox",
|
||||
]
|
||||
.as_ref(),
|
||||
);
|
||||
assert!(interactive.dangerously_bypass_approvals_and_sandbox);
|
||||
assert!(interactive.resume_picker);
|
||||
assert!(!interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id, None);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,370 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use anyhow::bail;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::find_codex_home;
|
||||
use codex_core::config::load_global_mcp_servers;
|
||||
use codex_core::config::write_global_mcp_servers;
|
||||
use codex_core::config_types::McpServerConfig;
|
||||
|
||||
/// [experimental] Launch Codex as an MCP server or manage configured MCP servers.
|
||||
///
|
||||
/// Subcommands:
|
||||
/// - `serve` — run the MCP server on stdio
|
||||
/// - `list` — list configured servers (with `--json`)
|
||||
/// - `get` — show a single server (with `--json`)
|
||||
/// - `add` — add a server launcher entry to `~/.codex/config.toml`
|
||||
/// - `remove` — delete a server entry
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct McpCli {
|
||||
#[clap(flatten)]
|
||||
pub config_overrides: CliConfigOverrides,
|
||||
|
||||
#[command(subcommand)]
|
||||
pub cmd: Option<McpSubcommand>,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
pub enum McpSubcommand {
|
||||
/// [experimental] Run the Codex MCP server (stdio transport).
|
||||
Serve,
|
||||
|
||||
/// [experimental] List configured MCP servers.
|
||||
List(ListArgs),
|
||||
|
||||
/// [experimental] Show details for a configured MCP server.
|
||||
Get(GetArgs),
|
||||
|
||||
/// [experimental] Add a global MCP server entry.
|
||||
Add(AddArgs),
|
||||
|
||||
/// [experimental] Remove a global MCP server entry.
|
||||
Remove(RemoveArgs),
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct ListArgs {
|
||||
/// Output the configured servers as JSON.
|
||||
#[arg(long)]
|
||||
pub json: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct GetArgs {
|
||||
/// Name of the MCP server to display.
|
||||
pub name: String,
|
||||
|
||||
/// Output the server configuration as JSON.
|
||||
#[arg(long)]
|
||||
pub json: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct AddArgs {
|
||||
/// Name for the MCP server configuration.
|
||||
pub name: String,
|
||||
|
||||
/// Environment variables to set when launching the server.
|
||||
#[arg(long, value_parser = parse_env_pair, value_name = "KEY=VALUE")]
|
||||
pub env: Vec<(String, String)>,
|
||||
|
||||
/// Command to launch the MCP server.
|
||||
#[arg(trailing_var_arg = true, num_args = 1..)]
|
||||
pub command: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct RemoveArgs {
|
||||
/// Name of the MCP server configuration to remove.
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl McpCli {
|
||||
pub async fn run(self, codex_linux_sandbox_exe: Option<PathBuf>) -> Result<()> {
|
||||
let McpCli {
|
||||
config_overrides,
|
||||
cmd,
|
||||
} = self;
|
||||
let subcommand = cmd.unwrap_or(McpSubcommand::Serve);
|
||||
|
||||
match subcommand {
|
||||
McpSubcommand::Serve => {
|
||||
codex_mcp_server::run_main(codex_linux_sandbox_exe, config_overrides).await?;
|
||||
}
|
||||
McpSubcommand::List(args) => {
|
||||
run_list(&config_overrides, args)?;
|
||||
}
|
||||
McpSubcommand::Get(args) => {
|
||||
run_get(&config_overrides, args)?;
|
||||
}
|
||||
McpSubcommand::Add(args) => {
|
||||
run_add(&config_overrides, args)?;
|
||||
}
|
||||
McpSubcommand::Remove(args) => {
|
||||
run_remove(&config_overrides, args)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<()> {
|
||||
// Validate any provided overrides even though they are not currently applied.
|
||||
config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
|
||||
let AddArgs { name, env, command } = add_args;
|
||||
|
||||
validate_server_name(&name)?;
|
||||
|
||||
let mut command_parts = command.into_iter();
|
||||
let command_bin = command_parts
|
||||
.next()
|
||||
.ok_or_else(|| anyhow!("command is required"))?;
|
||||
let command_args: Vec<String> = command_parts.collect();
|
||||
|
||||
let env_map = if env.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let mut map = HashMap::new();
|
||||
for (key, value) in env {
|
||||
map.insert(key, value);
|
||||
}
|
||||
Some(map)
|
||||
};
|
||||
|
||||
let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?;
|
||||
let mut servers = load_global_mcp_servers(&codex_home)
|
||||
.with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?;
|
||||
|
||||
let new_entry = McpServerConfig {
|
||||
command: command_bin,
|
||||
args: command_args,
|
||||
env: env_map,
|
||||
startup_timeout_ms: None,
|
||||
};
|
||||
|
||||
servers.insert(name.clone(), new_entry);
|
||||
|
||||
write_global_mcp_servers(&codex_home, &servers)
|
||||
.with_context(|| format!("failed to write MCP servers to {}", codex_home.display()))?;
|
||||
|
||||
println!("Added global MCP server '{name}'.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) -> Result<()> {
|
||||
config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
|
||||
let RemoveArgs { name } = remove_args;
|
||||
|
||||
validate_server_name(&name)?;
|
||||
|
||||
let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?;
|
||||
let mut servers = load_global_mcp_servers(&codex_home)
|
||||
.with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?;
|
||||
|
||||
let removed = servers.remove(&name).is_some();
|
||||
|
||||
if removed {
|
||||
write_global_mcp_servers(&codex_home, &servers)
|
||||
.with_context(|| format!("failed to write MCP servers to {}", codex_home.display()))?;
|
||||
}
|
||||
|
||||
if removed {
|
||||
println!("Removed global MCP server '{name}'.");
|
||||
} else {
|
||||
println!("No MCP server named '{name}' found.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
let mut entries: Vec<_> = config.mcp_servers.iter().collect();
|
||||
entries.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
|
||||
if list_args.json {
|
||||
let json_entries: Vec<_> = entries
|
||||
.into_iter()
|
||||
.map(|(name, cfg)| {
|
||||
let env = cfg.env.as_ref().map(|env| {
|
||||
env.iter()
|
||||
.map(|(k, v)| (k.clone(), v.clone()))
|
||||
.collect::<BTreeMap<_, _>>()
|
||||
});
|
||||
serde_json::json!({
|
||||
"name": name,
|
||||
"command": cfg.command,
|
||||
"args": cfg.args,
|
||||
"env": env,
|
||||
"startup_timeout_ms": cfg.startup_timeout_ms,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
let output = serde_json::to_string_pretty(&json_entries)?;
|
||||
println!("{output}");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if entries.is_empty() {
|
||||
println!("No MCP servers configured yet. Try `codex mcp add my-tool -- my-command`.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut rows: Vec<[String; 4]> = Vec::new();
|
||||
for (name, cfg) in entries {
|
||||
let args = if cfg.args.is_empty() {
|
||||
"-".to_string()
|
||||
} else {
|
||||
cfg.args.join(" ")
|
||||
};
|
||||
|
||||
let env = match cfg.env.as_ref() {
|
||||
None => "-".to_string(),
|
||||
Some(map) if map.is_empty() => "-".to_string(),
|
||||
Some(map) => {
|
||||
let mut pairs: Vec<_> = map.iter().collect();
|
||||
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
pairs
|
||||
.into_iter()
|
||||
.map(|(k, v)| format!("{k}={v}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
}
|
||||
};
|
||||
|
||||
rows.push([name.clone(), cfg.command.clone(), args, env]);
|
||||
}
|
||||
|
||||
let mut widths = ["Name".len(), "Command".len(), "Args".len(), "Env".len()];
|
||||
for row in &rows {
|
||||
for (i, cell) in row.iter().enumerate() {
|
||||
widths[i] = widths[i].max(cell.len());
|
||||
}
|
||||
}
|
||||
|
||||
println!(
|
||||
"{:<name_w$} {:<cmd_w$} {:<args_w$} {:<env_w$}",
|
||||
"Name",
|
||||
"Command",
|
||||
"Args",
|
||||
"Env",
|
||||
name_w = widths[0],
|
||||
cmd_w = widths[1],
|
||||
args_w = widths[2],
|
||||
env_w = widths[3],
|
||||
);
|
||||
|
||||
for row in rows {
|
||||
println!(
|
||||
"{:<name_w$} {:<cmd_w$} {:<args_w$} {:<env_w$}",
|
||||
row[0],
|
||||
row[1],
|
||||
row[2],
|
||||
row[3],
|
||||
name_w = widths[0],
|
||||
cmd_w = widths[1],
|
||||
args_w = widths[2],
|
||||
env_w = widths[3],
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&get_args.name) else {
|
||||
bail!("No MCP server named '{name}' found.", name = get_args.name);
|
||||
};
|
||||
|
||||
if get_args.json {
|
||||
let env = server.env.as_ref().map(|env| {
|
||||
env.iter()
|
||||
.map(|(k, v)| (k.clone(), v.clone()))
|
||||
.collect::<BTreeMap<_, _>>()
|
||||
});
|
||||
let output = serde_json::to_string_pretty(&serde_json::json!({
|
||||
"name": get_args.name,
|
||||
"command": server.command,
|
||||
"args": server.args,
|
||||
"env": env,
|
||||
"startup_timeout_ms": server.startup_timeout_ms,
|
||||
}))?;
|
||||
println!("{output}");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("{}", get_args.name);
|
||||
println!(" command: {}", server.command);
|
||||
let args = if server.args.is_empty() {
|
||||
"-".to_string()
|
||||
} else {
|
||||
server.args.join(" ")
|
||||
};
|
||||
println!(" args: {args}");
|
||||
let env_display = match server.env.as_ref() {
|
||||
None => "-".to_string(),
|
||||
Some(map) if map.is_empty() => "-".to_string(),
|
||||
Some(map) => {
|
||||
let mut pairs: Vec<_> = map.iter().collect();
|
||||
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
pairs
|
||||
.into_iter()
|
||||
.map(|(k, v)| format!("{k}={v}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
}
|
||||
};
|
||||
println!(" env: {env_display}");
|
||||
if let Some(timeout) = server.startup_timeout_ms {
|
||||
println!(" startup_timeout_ms: {timeout}");
|
||||
}
|
||||
println!(" remove: codex mcp remove {}", get_args.name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_env_pair(raw: &str) -> Result<(String, String), String> {
|
||||
let mut parts = raw.splitn(2, '=');
|
||||
let key = parts
|
||||
.next()
|
||||
.map(str::trim)
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| "environment entries must be in KEY=VALUE form".to_string())?;
|
||||
let value = parts
|
||||
.next()
|
||||
.map(str::to_string)
|
||||
.ok_or_else(|| "environment entries must be in KEY=VALUE form".to_string())?;
|
||||
|
||||
Ok((key.to_string(), value))
|
||||
}
|
||||
|
||||
fn validate_server_name(name: &str) -> Result<()> {
|
||||
let is_valid = !name.is_empty()
|
||||
&& name
|
||||
.chars()
|
||||
.all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_');
|
||||
|
||||
if is_valid {
|
||||
Ok(())
|
||||
} else {
|
||||
bail!("invalid server name '{name}' (use letters, numbers, '-', '_')");
|
||||
}
|
||||
}
|
||||
@@ -37,8 +37,10 @@ pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> {
|
||||
|
||||
let config = Config::load_with_cli_overrides(overrides_vec, ConfigOverrides::default())?;
|
||||
// Use conversation_manager API to start a conversation
|
||||
let conversation_manager =
|
||||
ConversationManager::new(AuthManager::shared(config.codex_home.clone()));
|
||||
let conversation_manager = ConversationManager::new(AuthManager::shared(
|
||||
config.codex_home.clone(),
|
||||
config.preferred_auth_method,
|
||||
));
|
||||
let NewConversation {
|
||||
conversation_id: _,
|
||||
conversation,
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::config::load_global_mcp_servers;
|
||||
use predicates::str::contains;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
let mut cmd = assert_cmd::Command::cargo_bin("codex")?;
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_and_remove_server_updates_global_config() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add_cmd = codex_command(codex_home.path())?;
|
||||
add_cmd
|
||||
.args(["mcp", "add", "docs", "--", "echo", "hello"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("Added global MCP server 'docs'."));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
assert_eq!(servers.len(), 1);
|
||||
let docs = servers.get("docs").expect("server should exist");
|
||||
assert_eq!(docs.command, "echo");
|
||||
assert_eq!(docs.args, vec!["hello".to_string()]);
|
||||
assert!(docs.env.is_none());
|
||||
|
||||
let mut remove_cmd = codex_command(codex_home.path())?;
|
||||
remove_cmd
|
||||
.args(["mcp", "remove", "docs"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("Removed global MCP server 'docs'."));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
let mut remove_again_cmd = codex_command(codex_home.path())?;
|
||||
remove_again_cmd
|
||||
.args(["mcp", "remove", "docs"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("No MCP server named 'docs' found."));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_with_env_preserves_key_order_and_values() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add_cmd = codex_command(codex_home.path())?;
|
||||
add_cmd
|
||||
.args([
|
||||
"mcp",
|
||||
"add",
|
||||
"envy",
|
||||
"--env",
|
||||
"FOO=bar",
|
||||
"--env",
|
||||
"ALPHA=beta",
|
||||
"--",
|
||||
"python",
|
||||
"server.py",
|
||||
])
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let envy = servers.get("envy").expect("server should exist");
|
||||
let env = envy.env.as_ref().expect("env should be present");
|
||||
|
||||
assert_eq!(env.len(), 2);
|
||||
assert_eq!(env.get("FOO"), Some(&"bar".to_string()));
|
||||
assert_eq!(env.get("ALPHA"), Some(&"beta".to_string()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use predicates::str::contains;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::Value as JsonValue;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
let mut cmd = assert_cmd::Command::cargo_bin("codex")?;
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_shows_empty_state() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut cmd = codex_command(codex_home.path())?;
|
||||
let output = cmd.args(["mcp", "list"]).output()?;
|
||||
assert!(output.status.success());
|
||||
let stdout = String::from_utf8(output.stdout)?;
|
||||
assert!(stdout.contains("No MCP servers configured yet."));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_and_get_render_expected_output() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add = codex_command(codex_home.path())?;
|
||||
add.args([
|
||||
"mcp",
|
||||
"add",
|
||||
"docs",
|
||||
"--env",
|
||||
"TOKEN=secret",
|
||||
"--",
|
||||
"docs-server",
|
||||
"--port",
|
||||
"4000",
|
||||
])
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let mut list_cmd = codex_command(codex_home.path())?;
|
||||
let list_output = list_cmd.args(["mcp", "list"]).output()?;
|
||||
assert!(list_output.status.success());
|
||||
let stdout = String::from_utf8(list_output.stdout)?;
|
||||
assert!(stdout.contains("Name"));
|
||||
assert!(stdout.contains("docs"));
|
||||
assert!(stdout.contains("docs-server"));
|
||||
assert!(stdout.contains("TOKEN=secret"));
|
||||
|
||||
let mut list_json_cmd = codex_command(codex_home.path())?;
|
||||
let json_output = list_json_cmd.args(["mcp", "list", "--json"]).output()?;
|
||||
assert!(json_output.status.success());
|
||||
let stdout = String::from_utf8(json_output.stdout)?;
|
||||
let parsed: JsonValue = serde_json::from_str(&stdout)?;
|
||||
let array = parsed.as_array().expect("expected array");
|
||||
assert_eq!(array.len(), 1);
|
||||
let entry = &array[0];
|
||||
assert_eq!(entry.get("name"), Some(&JsonValue::String("docs".into())));
|
||||
assert_eq!(
|
||||
entry.get("command"),
|
||||
Some(&JsonValue::String("docs-server".into()))
|
||||
);
|
||||
|
||||
let args = entry
|
||||
.get("args")
|
||||
.and_then(|v| v.as_array())
|
||||
.expect("args array");
|
||||
assert_eq!(
|
||||
args,
|
||||
&vec![
|
||||
JsonValue::String("--port".into()),
|
||||
JsonValue::String("4000".into())
|
||||
]
|
||||
);
|
||||
|
||||
let env = entry
|
||||
.get("env")
|
||||
.and_then(|v| v.as_object())
|
||||
.expect("env map");
|
||||
assert_eq!(env.get("TOKEN"), Some(&JsonValue::String("secret".into())));
|
||||
|
||||
let mut get_cmd = codex_command(codex_home.path())?;
|
||||
let get_output = get_cmd.args(["mcp", "get", "docs"]).output()?;
|
||||
assert!(get_output.status.success());
|
||||
let stdout = String::from_utf8(get_output.stdout)?;
|
||||
assert!(stdout.contains("docs"));
|
||||
assert!(stdout.contains("command: docs-server"));
|
||||
assert!(stdout.contains("args: --port 4000"));
|
||||
assert!(stdout.contains("env: TOKEN=secret"));
|
||||
assert!(stdout.contains("remove: codex mcp remove docs"));
|
||||
|
||||
let mut get_json_cmd = codex_command(codex_home.path())?;
|
||||
get_json_cmd
|
||||
.args(["mcp", "get", "docs", "--json"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("\"name\": \"docs\""));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -17,10 +17,7 @@ pub fn create_config_summary_entries(config: &Config) -> Vec<(&'static str, Stri
|
||||
{
|
||||
entries.push((
|
||||
"reasoning effort",
|
||||
config
|
||||
.model_reasoning_effort
|
||||
.map(|effort| effort.to_string())
|
||||
.unwrap_or_else(|| "none".to_string()),
|
||||
config.model_reasoning_effort.to_string(),
|
||||
));
|
||||
entries.push((
|
||||
"reasoning summaries",
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
/// Returns a string representing the elapsed time since `start_time` like
|
||||
/// "1m 15s" or "1.50s".
|
||||
/// "1m15s" or "1.50s".
|
||||
pub fn format_elapsed(start_time: Instant) -> String {
|
||||
format_duration(start_time.elapsed())
|
||||
}
|
||||
@@ -12,7 +12,7 @@ pub fn format_elapsed(start_time: Instant) -> String {
|
||||
/// Formatting rules:
|
||||
/// * < 1 s -> "{milli}ms"
|
||||
/// * < 60 s -> "{sec:.2}s" (two decimal places)
|
||||
/// * >= 60 s -> "{min}m {sec:02}s"
|
||||
/// * >= 60 s -> "{min}m{sec:02}s"
|
||||
pub fn format_duration(duration: Duration) -> String {
|
||||
let millis = duration.as_millis() as i64;
|
||||
format_elapsed_millis(millis)
|
||||
@@ -26,7 +26,7 @@ fn format_elapsed_millis(millis: i64) -> String {
|
||||
} else {
|
||||
let minutes = millis / 60_000;
|
||||
let seconds = (millis % 60_000) / 1000;
|
||||
format!("{minutes}m {seconds:02}s")
|
||||
format!("{minutes}m{seconds:02}s")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,18 +61,12 @@ mod tests {
|
||||
fn test_format_duration_minutes() {
|
||||
// Durations ≥ 1 minute should be printed mmss.
|
||||
let dur = Duration::from_millis(75_000); // 1m15s
|
||||
assert_eq!(format_duration(dur), "1m 15s");
|
||||
assert_eq!(format_duration(dur), "1m15s");
|
||||
|
||||
let dur_exact = Duration::from_millis(60_000); // 1m0s
|
||||
assert_eq!(format_duration(dur_exact), "1m 00s");
|
||||
assert_eq!(format_duration(dur_exact), "1m00s");
|
||||
|
||||
let dur_long = Duration::from_millis(3_601_000);
|
||||
assert_eq!(format_duration(dur_long), "60m 01s");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_duration_one_hour_has_space() {
|
||||
let dur_hour = Duration::from_millis(3_600_000);
|
||||
assert_eq!(format_duration(dur_hour), "60m 00s");
|
||||
assert_eq!(format_duration(dur_long), "60m01s");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
use codex_core::config::GPT_5_CODEX_MEDIUM_MODEL;
|
||||
use codex_core::protocol_config_types::ReasoningEffort;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
|
||||
/// A simple preset pairing a model slug with a reasoning effort.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
@@ -14,68 +12,43 @@ pub struct ModelPreset {
|
||||
/// Model slug (e.g., "gpt-5").
|
||||
pub model: &'static str,
|
||||
/// Reasoning effort to apply for this preset.
|
||||
pub effort: Option<ReasoningEffort>,
|
||||
pub effort: ReasoningEffort,
|
||||
}
|
||||
|
||||
const PRESETS: &[ModelPreset] = &[
|
||||
ModelPreset {
|
||||
id: "gpt-5-codex-low",
|
||||
label: "gpt-5-codex low",
|
||||
description: "",
|
||||
model: "gpt-5-codex",
|
||||
effort: Some(ReasoningEffort::Low),
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-codex-medium",
|
||||
label: "gpt-5-codex medium",
|
||||
description: "",
|
||||
model: "gpt-5-codex",
|
||||
effort: None,
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-codex-high",
|
||||
label: "gpt-5-codex high",
|
||||
description: "",
|
||||
model: "gpt-5-codex",
|
||||
effort: Some(ReasoningEffort::High),
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-minimal",
|
||||
label: "gpt-5 minimal",
|
||||
description: "— fastest responses with limited reasoning; ideal for coding, instructions, or lightweight tasks",
|
||||
model: "gpt-5",
|
||||
effort: Some(ReasoningEffort::Minimal),
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-low",
|
||||
label: "gpt-5 low",
|
||||
description: "— balances speed with some reasoning; useful for straightforward queries and short explanations",
|
||||
model: "gpt-5",
|
||||
effort: Some(ReasoningEffort::Low),
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-medium",
|
||||
label: "gpt-5 medium",
|
||||
description: "— default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks",
|
||||
model: "gpt-5",
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-high",
|
||||
label: "gpt-5 high",
|
||||
description: "— maximizes reasoning depth for complex or ambiguous problems",
|
||||
model: "gpt-5",
|
||||
effort: Some(ReasoningEffort::High),
|
||||
},
|
||||
];
|
||||
|
||||
pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
|
||||
match auth_mode {
|
||||
Some(AuthMode::ApiKey) => PRESETS
|
||||
.iter()
|
||||
.copied()
|
||||
.filter(|p| p.model != GPT_5_CODEX_MEDIUM_MODEL)
|
||||
.collect(),
|
||||
_ => PRESETS.to_vec(),
|
||||
}
|
||||
/// Built-in list of model presets that pair a model with a reasoning effort.
|
||||
///
|
||||
/// Keep this UI-agnostic so it can be reused by both TUI and MCP server.
|
||||
pub fn builtin_model_presets() -> &'static [ModelPreset] {
|
||||
// Order reflects effort from minimal to high.
|
||||
const PRESETS: &[ModelPreset] = &[
|
||||
ModelPreset {
|
||||
id: "gpt-5-minimal",
|
||||
label: "gpt-5 minimal",
|
||||
description: "— fastest responses with limited reasoning; ideal for coding, instructions, or lightweight tasks",
|
||||
model: "gpt-5",
|
||||
effort: ReasoningEffort::Minimal,
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-low",
|
||||
label: "gpt-5 low",
|
||||
description: "— balances speed with some reasoning; useful for straightforward queries and short explanations",
|
||||
model: "gpt-5",
|
||||
effort: ReasoningEffort::Low,
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-medium",
|
||||
label: "gpt-5 medium",
|
||||
description: "— default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks",
|
||||
model: "gpt-5",
|
||||
effort: ReasoningEffort::Medium,
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-high",
|
||||
label: "gpt-5 high",
|
||||
description: "— maximizes reasoning depth for complex or ambiguous problems",
|
||||
model: "gpt-5",
|
||||
effort: ReasoningEffort::High,
|
||||
},
|
||||
];
|
||||
PRESETS
|
||||
}
|
||||
|
||||
@@ -4,25 +4,22 @@ name = "codex-core"
|
||||
version = { workspace = true }
|
||||
|
||||
[lib]
|
||||
doctest = false
|
||||
name = "codex_core"
|
||||
path = "src/lib.rs"
|
||||
doctest = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
askama = "0.12"
|
||||
async-channel = "2.3.1"
|
||||
base64 = "0.22"
|
||||
bytes = "1.10.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
codex-apply-patch = { path = "../apply-patch" }
|
||||
codex-file-search = { path = "../file-search" }
|
||||
codex-mcp-client = { path = "../mcp-client" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
codex-otel = { path = "../otel", features = ["otel"] }
|
||||
dirs = "6"
|
||||
env-flags = "0.1.1"
|
||||
eventsource-stream = "0.2.3"
|
||||
@@ -42,12 +39,7 @@ similar = "2.7.0"
|
||||
strum_macros = "0.27.2"
|
||||
tempfile = "3"
|
||||
thiserror = "2.0.16"
|
||||
time = { version = "0.3", features = [
|
||||
"formatting",
|
||||
"parsing",
|
||||
"local-offset",
|
||||
"macros",
|
||||
] }
|
||||
time = { version = "0.3", features = ["formatting", "parsing", "local-offset", "macros"] }
|
||||
tokio = { version = "1", features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
@@ -63,7 +55,7 @@ tree-sitter = "0.25.9"
|
||||
tree-sitter-bash = "0.25.0"
|
||||
uuid = { version = "1", features = ["serde", "v4"] }
|
||||
which = "6"
|
||||
wildmatch = "2.5.0"
|
||||
wildmatch = "2.4.0"
|
||||
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
@@ -88,7 +80,6 @@ tempfile = "3"
|
||||
tokio-test = "0.4"
|
||||
walkdir = "2.5.0"
|
||||
wiremock = "0.6"
|
||||
tracing-test = { version = "0.2.5", features = ["no-env-filter"] }
|
||||
|
||||
[package.metadata.cargo-shear]
|
||||
ignored = ["openssl-sys"]
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.
|
||||
|
||||
## General
|
||||
|
||||
- The arguments to `shell` will be passed to execvp(). Most terminal commands should be prefixed with ["bash", "-lc"].
|
||||
- Always set the `workdir` param when using the shell function. Do not use `cd` unless absolutely necessary.
|
||||
- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)
|
||||
|
||||
## Editing constraints
|
||||
|
||||
- Default to ASCII when editing or creating files. Only introduce non-ASCII or other Unicode characters when there is a clear justification and the file already uses them.
|
||||
- Add succinct code comments that explain what is going on if code is not self-explanatory. You should not add comments like "Assigns the value to the variable", but a brief comment might be useful ahead of a complex code block that the user would otherwise have to spend time parsing out. Usage of these comments should be rare.
|
||||
- You may be in a dirty git worktree.
|
||||
* NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user.
|
||||
* If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes.
|
||||
* If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them.
|
||||
* If the changes are in unrelated files, just ignore them and don't revert them.
|
||||
- While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed.
|
||||
|
||||
## Plan tool
|
||||
|
||||
When using the planning tool:
|
||||
- Skip using the planning tool for straightforward tasks (roughly the easiest 25%).
|
||||
- Do not make single-step plans.
|
||||
- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan.
|
||||
|
||||
## Codex CLI harness, sandboxing, and approvals
|
||||
|
||||
The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from.
|
||||
|
||||
Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are:
|
||||
- **read-only**: The sandbox only permits reading files.
|
||||
- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval.
|
||||
- **danger-full-access**: No filesystem sandboxing - all commands are permitted.
|
||||
|
||||
Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are:
|
||||
- **restricted**: Requires approval
|
||||
- **enabled**: No approval needed
|
||||
|
||||
Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are
|
||||
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
|
||||
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)
|
||||
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
||||
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
||||
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `with_escalated_permissions` and `justification` parameters - do not message the user before requesting approval for the command.
|
||||
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
|
||||
- (for all of these, you should weigh alternative paths that do not require approval)
|
||||
|
||||
When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read.
|
||||
|
||||
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure.
|
||||
|
||||
Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals.
|
||||
|
||||
When requesting approval to execute a command that will require escalated privileges:
|
||||
- Provide the `with_escalated_permissions` parameter with the boolean value true
|
||||
- Include a short, 1 sentence explanation for why you need to enable `with_escalated_permissions` in the justification parameter
|
||||
|
||||
## Special user requests
|
||||
|
||||
- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so.
|
||||
- If the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps.
|
||||
|
||||
## Presenting your work and final message
|
||||
|
||||
You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value.
|
||||
|
||||
- Default: be very concise; friendly coding teammate tone.
|
||||
- Ask only when needed; suggest ideas; mirror the user's style.
|
||||
- For substantial work, summarize clearly; follow final‑answer formatting.
|
||||
- Skip heavy formatting for simple confirmations.
|
||||
- Don't dump large files you've written; reference paths only.
|
||||
- No "save/copy this file" - User is on the same machine.
|
||||
- Offer logical next steps (tests, commits, build) briefly; add verify steps if you couldn't do something.
|
||||
- For code changes:
|
||||
* Lead with a quick explanation of the change, and then give more details on the context covering where and why a change was made. Do not start this explanation with "summary", just jump right in.
|
||||
* If there are natural next steps the user may want to take, suggest them at the end of your response. Do not make suggestions if there are no natural next steps.
|
||||
* When suggesting multiple options, use numeric lists for the suggestions so the user can quickly respond with a single number.
|
||||
- The user does not command execution outputs. When asked to show the output of a command (e.g. `git show`), relay the important details in your answer or summarize the key lines so the user understands the result.
|
||||
|
||||
### Final answer structure and style guidelines
|
||||
|
||||
- Plain text; CLI handles styling. Use structure only when it helps scanability.
|
||||
- Headers: optional; short Title Case (1-3 words) wrapped in **…**; no blank line before the first bullet; add only if they truly help.
|
||||
- Bullets: use - ; merge related points; keep to one line when possible; 4–6 per list ordered by importance; keep phrasing consistent.
|
||||
- Monospace: backticks for commands/paths/env vars/code ids and inline examples; use for literal keyword bullets; never combine with **.
|
||||
- Code samples or multi-line snippets should be wrapped in fenced code blocks; add a language hint whenever obvious.
|
||||
- Structure: group related bullets; order sections general → specific → supporting; for subsections, start with a bolded keyword bullet, then items; match complexity to the task.
|
||||
- Tone: collaborative, concise, factual; present tense, active voice; self‑contained; no "above/below"; parallel wording.
|
||||
- Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers.
|
||||
- Adaptation: code explanations → precise, structured with code refs; simple tasks → lead with outcome; big changes → logical walkthrough + rationale + next actions; casual one-offs → plain sentences, no headers/bullets.
|
||||
- File References: When referencing files in your response, make sure to include the relevant start line and always follow the below rules:
|
||||
* Use inline code to make file paths clickable.
|
||||
* Each reference should have a stand alone path. Even if it's the same file.
|
||||
* Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix.
|
||||
* Line/column (1‑based, optional): :line[:column] or #Lline[Ccolumn] (column defaults to 1).
|
||||
* Do not use URIs like file://, vscode://, or https://.
|
||||
* Do not provide range of lines
|
||||
* Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5
|
||||
@@ -251,16 +251,6 @@ You are producing plain text that will later be styled by the CLI. Follow these
|
||||
- Apply to inline examples and to bullet keywords if the keyword itself is a literal file/command.
|
||||
- Never mix monospace and bold markers; choose one based on whether it’s a keyword (`**`) or inline code/path (`` ` ``).
|
||||
|
||||
**File References**
|
||||
When referencing files in your response, make sure to include the relevant start line and always follow the below rules:
|
||||
* Use inline code to make file paths clickable.
|
||||
* Each reference should have a stand alone path. Even if it's the same file.
|
||||
* Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix.
|
||||
* Line/column (1‑based, optional): :line[:column] or #Lline[Ccolumn] (column defaults to 1).
|
||||
* Do not use URIs like file://, vscode://, or https://.
|
||||
* Do not provide range of lines
|
||||
* Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5
|
||||
|
||||
**Structure**
|
||||
|
||||
- Place related bullets together; don’t mix unrelated concepts in the same section.
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
# Review guidelines:
|
||||
|
||||
You are acting as a reviewer for a proposed code change made by another engineer.
|
||||
|
||||
Below are some default guidelines for determining whether the original author would appreciate the issue being flagged.
|
||||
|
||||
These are not the final word in determining whether an issue is a bug. In many cases, you will encounter other, more specific guidelines. These may be present elsewhere in a developer message, a user message, a file, or even elsewhere in this system message.
|
||||
Those guidelines should be considered to override these general instructions.
|
||||
|
||||
Here are the general guidelines for determining whether something is a bug and should be flagged.
|
||||
|
||||
1. It meaningfully impacts the accuracy, performance, security, or maintainability of the code.
|
||||
2. The bug is discrete and actionable (i.e. not a general issue with the codebase or a combination of multiple issues).
|
||||
3. Fixing the bug does not demand a level of rigor that is not present in the rest of the codebase (e.g. one doesn't need very detailed comments and input validation in a repository of one-off scripts in personal projects)
|
||||
4. The bug was introduced in the commit (pre-existing bugs should not be flagged).
|
||||
5. The author of the original PR would likely fix the issue if they were made aware of it.
|
||||
6. The bug does not rely on unstated assumptions about the codebase or author's intent.
|
||||
7. It is not enough to speculate that a change may disrupt another part of the codebase, to be considered a bug, one must identify the other parts of the code that are provably affected.
|
||||
8. The bug is clearly not just an intentional change by the original author.
|
||||
|
||||
When flagging a bug, you will also provide an accompanying comment. Once again, these guidelines are not the final word on how to construct a comment -- defer to any subsequent guidelines that you encounter.
|
||||
|
||||
1. The comment should be clear about why the issue is a bug.
|
||||
2. The comment should appropriately communicate the severity of the issue. It should not claim that an issue is more severe than it actually is.
|
||||
3. The comment should be brief. The body should be at most 1 paragraph. It should not introduce line breaks within the natural language flow unless it is necessary for the code fragment.
|
||||
4. The comment should not include any chunks of code longer than 3 lines. Any code chunks should be wrapped in markdown inline code tags or a code block.
|
||||
5. The comment should clearly and explicitly communicate the scenarios, environments, or inputs that are necessary for the bug to arise. The comment should immediately indicate that the issue's severity depends on these factors.
|
||||
6. The comment's tone should be matter-of-fact and not accusatory or overly positive. It should read as a helpful AI assistant suggestion without sounding too much like a human reviewer.
|
||||
7. The comment should be written such that the original author can immediately grasp the idea without close reading.
|
||||
8. The comment should avoid excessive flattery and comments that are not helpful to the original author. The comment should avoid phrasing like "Great job ...", "Thanks for ...".
|
||||
|
||||
Below are some more detailed guidelines that you should apply to this specific review.
|
||||
|
||||
HOW MANY FINDINGS TO RETURN:
|
||||
|
||||
Output all findings that the original author would fix if they knew about it. If there is no finding that a person would definitely love to see and fix, prefer outputting no findings. Do not stop at the first qualifying finding. Continue until you've listed every qualifying finding.
|
||||
|
||||
GUIDELINES:
|
||||
|
||||
- Ignore trivial style unless it obscures meaning or violates documented standards.
|
||||
- Use one comment per distinct issue (or a multi-line range if necessary).
|
||||
- Use ```suggestion blocks ONLY for concrete replacement code (minimal lines; no commentary inside the block).
|
||||
- In every ```suggestion block, preserve the exact leading whitespace of the replaced lines (spaces vs tabs, number of spaces).
|
||||
- Do NOT introduce or remove outer indentation levels unless that is the actual fix.
|
||||
|
||||
The comments will be presented in the code review as inline comments. You should avoid providing unnecessary location details in the comment body. Always keep the line range as short as possible for interpreting the issue. Avoid ranges longer than 5–10 lines; instead, choose the most suitable subrange that pinpoints the problem.
|
||||
|
||||
At the beginning of the finding title, tag the bug with priority level. For example "[P1] Un-padding slices along wrong tensor dimensions". [P0] – Drop everything to fix. Blocking release, operations, or major usage. Only use for universal issues that do not depend on any assumptions about the inputs. · [P1] – Urgent. Should be addressed in the next cycle · [P2] – Normal. To be fixed eventually · [P3] – Low. Nice to have.
|
||||
|
||||
Additionally, include a numeric priority field in the JSON output for each finding: set "priority" to 0 for P0, 1 for P1, 2 for P2, or 3 for P3. If a priority cannot be determined, omit the field or use null.
|
||||
|
||||
At the end of your findings, output an "overall correctness" verdict of whether or not the patch should be considered "correct".
|
||||
Correct implies that existing code and tests will not break, and the patch is free of bugs and other blocking issues.
|
||||
Ignore non-blocking issues such as style, formatting, typos, documentation, and other nits.
|
||||
|
||||
FORMATTING GUIDELINES:
|
||||
The finding description should be one paragraph.
|
||||
|
||||
OUTPUT FORMAT:
|
||||
|
||||
## Output schema — MUST MATCH *exactly*
|
||||
|
||||
```json
|
||||
{
|
||||
"findings": [
|
||||
{
|
||||
"title": "<≤ 80 chars, imperative>",
|
||||
"body": "<valid Markdown explaining *why* this is a problem; cite files/lines/functions>",
|
||||
"confidence_score": <float 0.0-1.0>,
|
||||
"priority": <int 0-3, optional>,
|
||||
"code_location": {
|
||||
"absolute_file_path": "<file path>",
|
||||
"line_range": {"start": <int>, "end": <int>}
|
||||
}
|
||||
}
|
||||
],
|
||||
"overall_correctness": "patch is correct" | "patch is incorrect",
|
||||
"overall_explanation": "<1-3 sentence explanation justifying the overall_correctness verdict>",
|
||||
"overall_confidence_score": <float 0.0-1.0>
|
||||
}
|
||||
```
|
||||
|
||||
* **Do not** wrap the JSON in markdown fences or extra prose.
|
||||
* The code_location field is required and must include absolute_file_path and line_range.
|
||||
*Line ranges must be as short as possible for interpreting the issue (avoid ranges over 5–10 lines; pick the most suitable subrange).
|
||||
* The code_location should overlap with the diff.
|
||||
* Do not generate a PR fix.
|
||||
@@ -52,13 +52,12 @@ pub(crate) async fn apply_patch(
|
||||
&turn_context.sandbox_policy,
|
||||
&turn_context.cwd,
|
||||
) {
|
||||
SafetyCheck::AutoApprove {
|
||||
user_explicitly_approved,
|
||||
..
|
||||
} => InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec {
|
||||
action,
|
||||
user_explicitly_approved_this_action: user_explicitly_approved,
|
||||
}),
|
||||
SafetyCheck::AutoApprove { .. } => {
|
||||
InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec {
|
||||
action,
|
||||
user_explicitly_approved_this_action: false,
|
||||
})
|
||||
}
|
||||
SafetyCheck::AskUser => {
|
||||
// Compute a readable summary of path changes to include in the
|
||||
// approval request so the user can make an informed decision.
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::time::Duration;
|
||||
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
|
||||
use crate::token_data::PlanType;
|
||||
use crate::token_data::TokenData;
|
||||
use crate::token_data::parse_id_token;
|
||||
|
||||
@@ -71,9 +70,13 @@ impl CodexAuth {
|
||||
Ok(access)
|
||||
}
|
||||
|
||||
/// Loads the available auth information from the auth.json.
|
||||
pub fn from_codex_home(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
|
||||
load_auth(codex_home)
|
||||
/// Loads the available auth information from the auth.json or
|
||||
/// OPENAI_API_KEY environment variable.
|
||||
pub fn from_codex_home(
|
||||
codex_home: &Path,
|
||||
preferred_auth_method: AuthMode,
|
||||
) -> std::io::Result<Option<CodexAuth>> {
|
||||
load_auth(codex_home, true, preferred_auth_method)
|
||||
}
|
||||
|
||||
pub async fn get_token_data(&self) -> Result<TokenData, std::io::Error> {
|
||||
@@ -132,12 +135,13 @@ impl CodexAuth {
|
||||
}
|
||||
|
||||
pub fn get_account_id(&self) -> Option<String> {
|
||||
self.get_current_token_data().and_then(|t| t.account_id)
|
||||
self.get_current_token_data()
|
||||
.and_then(|t| t.account_id.clone())
|
||||
}
|
||||
|
||||
pub(crate) fn get_plan_type(&self) -> Option<PlanType> {
|
||||
pub fn get_plan_type(&self) -> Option<String> {
|
||||
self.get_current_token_data()
|
||||
.and_then(|t| t.id_token.chatgpt_plan_type)
|
||||
.and_then(|t| t.id_token.chatgpt_plan_type.as_ref().map(|p| p.as_string()))
|
||||
}
|
||||
|
||||
fn get_current_auth_json(&self) -> Option<AuthDotJson> {
|
||||
@@ -146,7 +150,7 @@ impl CodexAuth {
|
||||
}
|
||||
|
||||
fn get_current_token_data(&self) -> Option<TokenData> {
|
||||
self.get_current_auth_json().and_then(|t| t.tokens)
|
||||
self.get_current_auth_json().and_then(|t| t.tokens.clone())
|
||||
}
|
||||
|
||||
/// Consider this private to integration tests.
|
||||
@@ -189,11 +193,10 @@ impl CodexAuth {
|
||||
|
||||
pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY";
|
||||
|
||||
pub fn read_openai_api_key_from_env() -> Option<String> {
|
||||
fn read_openai_api_key_from_env() -> Option<String> {
|
||||
env::var(OPENAI_API_KEY_ENV_VAR)
|
||||
.ok()
|
||||
.map(|value| value.trim().to_string())
|
||||
.filter(|value| !value.is_empty())
|
||||
.filter(|s| !s.is_empty())
|
||||
}
|
||||
|
||||
pub fn get_auth_file(codex_home: &Path) -> PathBuf {
|
||||
@@ -211,7 +214,7 @@ pub fn logout(codex_home: &Path) -> std::io::Result<bool> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes an `auth.json` that contains only the API key.
|
||||
/// Writes an `auth.json` that contains only the API key. Intended for CLI use.
|
||||
pub fn login_with_api_key(codex_home: &Path, api_key: &str) -> std::io::Result<()> {
|
||||
let auth_dot_json = AuthDotJson {
|
||||
openai_api_key: Some(api_key.to_string()),
|
||||
@@ -221,11 +224,28 @@ pub fn login_with_api_key(codex_home: &Path, api_key: &str) -> std::io::Result<(
|
||||
write_auth_json(&get_auth_file(codex_home), &auth_dot_json)
|
||||
}
|
||||
|
||||
fn load_auth(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
|
||||
fn load_auth(
|
||||
codex_home: &Path,
|
||||
include_env_var: bool,
|
||||
preferred_auth_method: AuthMode,
|
||||
) -> std::io::Result<Option<CodexAuth>> {
|
||||
// First, check to see if there is a valid auth.json file. If not, we fall
|
||||
// back to AuthMode::ApiKey using the OPENAI_API_KEY environment variable
|
||||
// (if it is set).
|
||||
let auth_file = get_auth_file(codex_home);
|
||||
let client = crate::default_client::create_client();
|
||||
let auth_dot_json = match try_read_auth_json(&auth_file) {
|
||||
Ok(auth) => auth,
|
||||
// If auth.json does not exist, try to read the OPENAI_API_KEY from the
|
||||
// environment variable.
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound && include_env_var => {
|
||||
return match read_openai_api_key_from_env() {
|
||||
Some(api_key) => Ok(Some(CodexAuth::from_api_key_with_client(&api_key, client))),
|
||||
None => Ok(None),
|
||||
};
|
||||
}
|
||||
// Though if auth.json exists but is malformed, do not fall back to the
|
||||
// env var because the user may be expecting to use AuthMode::ChatGPT.
|
||||
Err(e) => {
|
||||
return Err(e);
|
||||
}
|
||||
@@ -237,11 +257,32 @@ fn load_auth(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
|
||||
last_refresh,
|
||||
} = auth_dot_json;
|
||||
|
||||
// Prefer AuthMode.ApiKey if it's set in the auth.json.
|
||||
// If the auth.json has an API key AND does not appear to be on a plan that
|
||||
// should prefer AuthMode::ChatGPT, use AuthMode::ApiKey.
|
||||
if let Some(api_key) = &auth_json_api_key {
|
||||
return Ok(Some(CodexAuth::from_api_key_with_client(api_key, client)));
|
||||
// Should any of these be AuthMode::ChatGPT with the api_key set?
|
||||
// Does AuthMode::ChatGPT indicate that there is an auth.json that is
|
||||
// "refreshable" even if we are using the API key for auth?
|
||||
match &tokens {
|
||||
Some(tokens) => {
|
||||
if tokens.should_use_api_key(preferred_auth_method, tokens.is_openai_email()) {
|
||||
return Ok(Some(CodexAuth::from_api_key_with_client(api_key, client)));
|
||||
} else {
|
||||
// Ignore the API key and fall through to ChatGPT auth.
|
||||
}
|
||||
}
|
||||
None => {
|
||||
// We have an API key but no tokens in the auth.json file.
|
||||
// Perhaps the user ran `codex login --api-key <KEY>` or updated
|
||||
// auth.json by hand. Either way, let's assume they are trying
|
||||
// to use their API key.
|
||||
return Ok(Some(CodexAuth::from_api_key_with_client(api_key, client)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For the AuthMode::ChatGPT variant, perhaps neither api_key nor
|
||||
// openai_api_key should exist?
|
||||
Ok(Some(CodexAuth {
|
||||
api_key: None,
|
||||
mode: AuthMode::ChatGPT,
|
||||
@@ -291,10 +332,10 @@ async fn update_tokens(
|
||||
let tokens = auth_dot_json.tokens.get_or_insert_with(TokenData::default);
|
||||
tokens.id_token = parse_id_token(&id_token).map_err(std::io::Error::other)?;
|
||||
if let Some(access_token) = access_token {
|
||||
tokens.access_token = access_token;
|
||||
tokens.access_token = access_token.to_string();
|
||||
}
|
||||
if let Some(refresh_token) = refresh_token {
|
||||
tokens.refresh_token = refresh_token;
|
||||
tokens.refresh_token = refresh_token.to_string();
|
||||
}
|
||||
auth_dot_json.last_refresh = Some(Utc::now());
|
||||
write_auth_json(auth_file, &auth_dot_json)?;
|
||||
@@ -371,6 +412,7 @@ use std::sync::RwLock;
|
||||
/// Internal cached auth state.
|
||||
#[derive(Clone, Debug)]
|
||||
struct CachedAuth {
|
||||
preferred_auth_mode: AuthMode,
|
||||
auth: Option<CodexAuth>,
|
||||
}
|
||||
|
||||
@@ -408,32 +450,6 @@ mod tests {
|
||||
assert_eq!(auth_dot_json, same_auth_dot_json);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn login_with_api_key_overwrites_existing_auth_json() {
|
||||
let dir = tempdir().unwrap();
|
||||
let auth_path = dir.path().join("auth.json");
|
||||
let stale_auth = json!({
|
||||
"OPENAI_API_KEY": "sk-old",
|
||||
"tokens": {
|
||||
"id_token": "stale.header.payload",
|
||||
"access_token": "stale-access",
|
||||
"refresh_token": "stale-refresh",
|
||||
"account_id": "stale-acc"
|
||||
}
|
||||
});
|
||||
std::fs::write(
|
||||
&auth_path,
|
||||
serde_json::to_string_pretty(&stale_auth).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
super::login_with_api_key(dir.path(), "sk-new").expect("login_with_api_key should succeed");
|
||||
|
||||
let auth = super::try_read_auth_json(&auth_path).expect("auth.json should parse");
|
||||
assert_eq!(auth.openai_api_key.as_deref(), Some("sk-new"));
|
||||
assert!(auth.tokens.is_none(), "tokens should be cleared");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pro_account_with_no_api_key_uses_chatgpt_auth() {
|
||||
let codex_home = tempdir().unwrap();
|
||||
@@ -452,7 +468,9 @@ mod tests {
|
||||
auth_dot_json,
|
||||
auth_file: _,
|
||||
..
|
||||
} = super::load_auth(codex_home.path()).unwrap().unwrap();
|
||||
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(None, api_key);
|
||||
assert_eq!(AuthMode::ChatGPT, mode);
|
||||
|
||||
@@ -481,6 +499,88 @@ mod tests {
|
||||
)
|
||||
}
|
||||
|
||||
/// Even if the OPENAI_API_KEY is set in auth.json, if the plan is not in
|
||||
/// [`TokenData::is_plan_that_should_use_api_key`], it should use
|
||||
/// [`AuthMode::ChatGPT`].
|
||||
#[tokio::test]
|
||||
async fn pro_account_with_api_key_still_uses_chatgpt_auth() {
|
||||
let codex_home = tempdir().unwrap();
|
||||
let fake_jwt = write_auth_file(
|
||||
AuthFileParams {
|
||||
openai_api_key: Some("sk-test-key".to_string()),
|
||||
chatgpt_plan_type: "pro".to_string(),
|
||||
},
|
||||
codex_home.path(),
|
||||
)
|
||||
.expect("failed to write auth file");
|
||||
|
||||
let CodexAuth {
|
||||
api_key,
|
||||
mode,
|
||||
auth_dot_json,
|
||||
auth_file: _,
|
||||
..
|
||||
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(None, api_key);
|
||||
assert_eq!(AuthMode::ChatGPT, mode);
|
||||
|
||||
let guard = auth_dot_json.lock().unwrap();
|
||||
let auth_dot_json = guard.as_ref().expect("AuthDotJson should exist");
|
||||
assert_eq!(
|
||||
&AuthDotJson {
|
||||
openai_api_key: None,
|
||||
tokens: Some(TokenData {
|
||||
id_token: IdTokenInfo {
|
||||
email: Some("user@example.com".to_string()),
|
||||
chatgpt_plan_type: Some(PlanType::Known(KnownPlan::Pro)),
|
||||
raw_jwt: fake_jwt,
|
||||
},
|
||||
access_token: "test-access-token".to_string(),
|
||||
refresh_token: "test-refresh-token".to_string(),
|
||||
account_id: None,
|
||||
}),
|
||||
last_refresh: Some(
|
||||
DateTime::parse_from_rfc3339(LAST_REFRESH)
|
||||
.unwrap()
|
||||
.with_timezone(&Utc)
|
||||
),
|
||||
},
|
||||
auth_dot_json
|
||||
)
|
||||
}
|
||||
|
||||
/// If the OPENAI_API_KEY is set in auth.json and it is an enterprise
|
||||
/// account, then it should use [`AuthMode::ApiKey`].
|
||||
#[tokio::test]
|
||||
async fn enterprise_account_with_api_key_uses_apikey_auth() {
|
||||
let codex_home = tempdir().unwrap();
|
||||
write_auth_file(
|
||||
AuthFileParams {
|
||||
openai_api_key: Some("sk-test-key".to_string()),
|
||||
chatgpt_plan_type: "enterprise".to_string(),
|
||||
},
|
||||
codex_home.path(),
|
||||
)
|
||||
.expect("failed to write auth file");
|
||||
|
||||
let CodexAuth {
|
||||
api_key,
|
||||
mode,
|
||||
auth_dot_json,
|
||||
auth_file: _,
|
||||
..
|
||||
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(Some("sk-test-key".to_string()), api_key);
|
||||
assert_eq!(AuthMode::ApiKey, mode);
|
||||
|
||||
let guard = auth_dot_json.lock().expect("should unwrap");
|
||||
assert!(guard.is_none(), "auth_dot_json should be None");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn loads_api_key_from_auth_json() {
|
||||
let dir = tempdir().unwrap();
|
||||
@@ -491,7 +591,9 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let auth = super::load_auth(dir.path()).unwrap().unwrap();
|
||||
let auth = super::load_auth(dir.path(), false, AuthMode::ChatGPT)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(auth.mode, AuthMode::ApiKey);
|
||||
assert_eq!(auth.api_key, Some("sk-test-key".to_string()));
|
||||
|
||||
@@ -581,17 +683,26 @@ impl AuthManager {
|
||||
/// preferred auth method. Errors loading auth are swallowed; `auth()` will
|
||||
/// simply return `None` in that case so callers can treat it as an
|
||||
/// unauthenticated state.
|
||||
pub fn new(codex_home: PathBuf) -> Self {
|
||||
let auth = CodexAuth::from_codex_home(&codex_home).ok().flatten();
|
||||
pub fn new(codex_home: PathBuf, preferred_auth_mode: AuthMode) -> Self {
|
||||
let auth = CodexAuth::from_codex_home(&codex_home, preferred_auth_mode)
|
||||
.ok()
|
||||
.flatten();
|
||||
Self {
|
||||
codex_home,
|
||||
inner: RwLock::new(CachedAuth { auth }),
|
||||
inner: RwLock::new(CachedAuth {
|
||||
preferred_auth_mode,
|
||||
auth,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an AuthManager with a specific CodexAuth, for testing only.
|
||||
pub fn from_auth_for_testing(auth: CodexAuth) -> Arc<Self> {
|
||||
let cached = CachedAuth { auth: Some(auth) };
|
||||
let preferred_auth_mode = auth.mode;
|
||||
let cached = CachedAuth {
|
||||
preferred_auth_mode,
|
||||
auth: Some(auth),
|
||||
};
|
||||
Arc::new(Self {
|
||||
codex_home: PathBuf::new(),
|
||||
inner: RwLock::new(cached),
|
||||
@@ -603,10 +714,21 @@ impl AuthManager {
|
||||
self.inner.read().ok().and_then(|c| c.auth.clone())
|
||||
}
|
||||
|
||||
/// Force a reload of the auth information from auth.json. Returns
|
||||
/// Preferred auth method used when (re)loading.
|
||||
pub fn preferred_auth_method(&self) -> AuthMode {
|
||||
self.inner
|
||||
.read()
|
||||
.map(|c| c.preferred_auth_mode)
|
||||
.unwrap_or(AuthMode::ApiKey)
|
||||
}
|
||||
|
||||
/// Force a reload using the existing preferred auth method. Returns
|
||||
/// whether the auth value changed.
|
||||
pub fn reload(&self) -> bool {
|
||||
let new_auth = CodexAuth::from_codex_home(&self.codex_home).ok().flatten();
|
||||
let preferred = self.preferred_auth_method();
|
||||
let new_auth = CodexAuth::from_codex_home(&self.codex_home, preferred)
|
||||
.ok()
|
||||
.flatten();
|
||||
if let Ok(mut guard) = self.inner.write() {
|
||||
let changed = !AuthManager::auths_equal(&guard.auth, &new_auth);
|
||||
guard.auth = new_auth;
|
||||
@@ -625,8 +747,8 @@ impl AuthManager {
|
||||
}
|
||||
|
||||
/// Convenience constructor returning an `Arc` wrapper.
|
||||
pub fn shared(codex_home: PathBuf) -> Arc<Self> {
|
||||
Arc::new(Self::new(codex_home))
|
||||
pub fn shared(codex_home: PathBuf, preferred_auth_mode: AuthMode) -> Arc<Self> {
|
||||
Arc::new(Self::new(codex_home, preferred_auth_mode))
|
||||
}
|
||||
|
||||
/// Attempt to refresh the current auth token (if any). On success, reload
|
||||
|
||||
@@ -73,9 +73,6 @@ pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option<V
|
||||
}
|
||||
}
|
||||
|
||||
// Walk uses a stack (LIFO), so re-sort by position to restore source order.
|
||||
command_nodes.sort_by_key(|node| node.start_byte());
|
||||
|
||||
let mut commands = Vec::new();
|
||||
for node in command_nodes {
|
||||
if let Some(words) = parse_plain_command_from_node(node, src) {
|
||||
@@ -153,10 +150,10 @@ mod tests {
|
||||
let src = "ls && pwd; echo 'hi there' | wc -l";
|
||||
let cmds = parse_seq(src).unwrap();
|
||||
let expected: Vec<Vec<String>> = vec![
|
||||
vec!["ls".to_string()],
|
||||
vec!["pwd".to_string()],
|
||||
vec!["echo".to_string(), "hi there".to_string()],
|
||||
vec!["wc".to_string(), "-l".to_string()],
|
||||
vec!["echo".to_string(), "hi there".to_string()],
|
||||
vec!["pwd".to_string()],
|
||||
vec!["ls".to_string()],
|
||||
];
|
||||
assert_eq!(cmds, expected);
|
||||
}
|
||||
|
||||
@@ -1,20 +1,6 @@
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::ModelProviderInfo;
|
||||
use crate::client_common::Prompt;
|
||||
use crate::client_common::ResponseEvent;
|
||||
use crate::client_common::ResponseStream;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::openai_tools::create_tools_json_for_chat_completions_api;
|
||||
use crate::util::backoff;
|
||||
use bytes::Bytes;
|
||||
use codex_otel::otel_event_manager::OtelEventManager;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ReasoningItemContent;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use eventsource_stream::Eventsource;
|
||||
use futures::Stream;
|
||||
use futures::StreamExt;
|
||||
@@ -29,13 +15,25 @@ use tokio::time::timeout;
|
||||
use tracing::debug;
|
||||
use tracing::trace;
|
||||
|
||||
use crate::ModelProviderInfo;
|
||||
use crate::client_common::Prompt;
|
||||
use crate::client_common::ResponseEvent;
|
||||
use crate::client_common::ResponseStream;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::openai_tools::create_tools_json_for_chat_completions_api;
|
||||
use crate::util::backoff;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ReasoningItemContent;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
/// Implementation for the classic Chat Completions API.
|
||||
pub(crate) async fn stream_chat_completions(
|
||||
prompt: &Prompt,
|
||||
model_family: &ModelFamily,
|
||||
client: &reqwest::Client,
|
||||
provider: &ModelProviderInfo,
|
||||
otel_event_manager: &OtelEventManager,
|
||||
) -> Result<ResponseStream> {
|
||||
// Build messages array
|
||||
let mut messages = Vec::<serde_json::Value>::new();
|
||||
@@ -290,30 +288,12 @@ pub(crate) async fn stream_chat_completions(
|
||||
|
||||
let req_builder = provider.create_request_builder(client, &None).await?;
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
let res = req_builder
|
||||
.header(reqwest::header::ACCEPT, "text/event-stream")
|
||||
.json(&payload)
|
||||
.send()
|
||||
.await;
|
||||
|
||||
let cf_ray = if let Ok(resp) = &res {
|
||||
let cf_ray = resp
|
||||
.headers()
|
||||
.get("cf-ray")
|
||||
.map(|v| v.to_str().unwrap_or_default())
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
|
||||
trace!("Response status: {}, cf-ray: {}", resp.status(), cf_ray);
|
||||
Some(cf_ray)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
otel_event_manager.request(cf_ray, attempt, start.elapsed(), &res);
|
||||
|
||||
match res {
|
||||
Ok(resp) if resp.status().is_success() => {
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(1600);
|
||||
@@ -322,7 +302,6 @@ pub(crate) async fn stream_chat_completions(
|
||||
stream,
|
||||
tx_event,
|
||||
provider.stream_idle_timeout(),
|
||||
otel_event_manager.clone(),
|
||||
));
|
||||
return Ok(ResponseStream { rx_event });
|
||||
}
|
||||
@@ -366,7 +345,6 @@ async fn process_chat_sse<S>(
|
||||
stream: S,
|
||||
tx_event: mpsc::Sender<Result<ResponseEvent>>,
|
||||
idle_timeout: Duration,
|
||||
otel_event_manager: OtelEventManager,
|
||||
) where
|
||||
S: Stream<Item = Result<Bytes>> + Unpin,
|
||||
{
|
||||
@@ -390,13 +368,12 @@ async fn process_chat_sse<S>(
|
||||
let mut reasoning_text = String::new();
|
||||
|
||||
loop {
|
||||
let start = Instant::now();
|
||||
let sse = match timeout(idle_timeout, stream.next()).await {
|
||||
Ok(Some(Ok(ev))) => ev,
|
||||
Ok(Some(Err(e))) => {
|
||||
let error = e.to_string();
|
||||
otel_event_manager.sse_event_failed(None, start.elapsed(), error.as_str());
|
||||
let _ = tx_event.send(Err(CodexErr::Stream(error, None))).await;
|
||||
let _ = tx_event
|
||||
.send(Err(CodexErr::Stream(e.to_string(), None)))
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
Ok(None) => {
|
||||
@@ -410,10 +387,11 @@ async fn process_chat_sse<S>(
|
||||
return;
|
||||
}
|
||||
Err(_) => {
|
||||
let error = "idle timeout waiting for SSE";
|
||||
otel_event_manager.sse_event_failed(None, start.elapsed(), error);
|
||||
let _ = tx_event
|
||||
.send(Err(CodexErr::Stream(error.into(), None)))
|
||||
.send(Err(CodexErr::Stream(
|
||||
"idle timeout waiting for SSE".into(),
|
||||
None,
|
||||
)))
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
@@ -421,7 +399,6 @@ async fn process_chat_sse<S>(
|
||||
|
||||
// OpenAI Chat streaming sends a literal string "[DONE]" when finished.
|
||||
if sse.data.trim() == "[DONE]" {
|
||||
otel_event_manager.sse_event(&sse.event, start.elapsed());
|
||||
// Emit any finalized items before closing so downstream consumers receive
|
||||
// terminal events for both assistant content and raw reasoning.
|
||||
if !assistant_text.is_empty() {
|
||||
@@ -459,14 +436,9 @@ async fn process_chat_sse<S>(
|
||||
// Parse JSON chunk
|
||||
let chunk: serde_json::Value = match serde_json::from_str(&sse.data) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
let error = format!("Failed to parse SSE event: {e}, data: {}", &sse.data);
|
||||
otel_event_manager.sse_event_failed(None, start.elapsed(), error.as_str());
|
||||
continue;
|
||||
}
|
||||
Err(_) => continue,
|
||||
};
|
||||
trace!("chat_completions received SSE chunk: {chunk:?}");
|
||||
otel_event_manager.sse_event(&sse.event, start.elapsed());
|
||||
|
||||
let choice_opt = chunk.get("choices").and_then(|c| c.get(0));
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,14 +10,14 @@ use codex_protocol::models::ResponseItem;
|
||||
use futures::Stream;
|
||||
use serde::Serialize;
|
||||
use std::borrow::Cow;
|
||||
use std::ops::Deref;
|
||||
use std::pin::Pin;
|
||||
use std::task::Context;
|
||||
use std::task::Poll;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
/// Review thread system prompt. Edit `core/src/review_prompt.md` to customize.
|
||||
pub const REVIEW_PROMPT: &str = include_str!("../review_prompt.md");
|
||||
/// The `instructions` field in the payload sent to a model should always start
|
||||
/// with this content.
|
||||
const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md");
|
||||
|
||||
/// API request payload for a single model turn
|
||||
#[derive(Default, Debug, Clone)]
|
||||
@@ -34,14 +34,15 @@ pub struct Prompt {
|
||||
}
|
||||
|
||||
impl Prompt {
|
||||
pub(crate) fn get_full_instructions<'a>(&'a self, model: &'a ModelFamily) -> Cow<'a, str> {
|
||||
pub(crate) fn get_full_instructions(&self, model: &ModelFamily) -> Cow<'_, str> {
|
||||
let base = self
|
||||
.base_instructions_override
|
||||
.as_deref()
|
||||
.unwrap_or(model.base_instructions.deref());
|
||||
// When there are no custom instructions, add apply_patch_tool_instructions if:
|
||||
// - the model needs special instructions (4.1)
|
||||
// AND
|
||||
.unwrap_or(BASE_INSTRUCTIONS);
|
||||
let mut sections: Vec<&str> = vec![base];
|
||||
|
||||
// When there are no custom instructions, add apply_patch_tool_instructions if either:
|
||||
// - the model needs special instructions (4.1), or
|
||||
// - there is no apply_patch tool present
|
||||
let is_apply_patch_tool_present = self.tools.iter().any(|tool| match tool {
|
||||
OpenAiTool::Function(f) => f.name == "apply_patch",
|
||||
@@ -49,13 +50,11 @@ impl Prompt {
|
||||
_ => false,
|
||||
});
|
||||
if self.base_instructions_override.is_none()
|
||||
&& model.needs_special_apply_patch_instructions
|
||||
&& !is_apply_patch_tool_present
|
||||
&& (model.needs_special_apply_patch_instructions || !is_apply_patch_tool_present)
|
||||
{
|
||||
Cow::Owned(format!("{base}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}"))
|
||||
} else {
|
||||
Cow::Borrowed(base)
|
||||
sections.push(APPLY_PATCH_TOOL_INSTRUCTIONS);
|
||||
}
|
||||
Cow::Owned(sections.join("\n"))
|
||||
}
|
||||
|
||||
pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> {
|
||||
@@ -82,10 +81,8 @@ pub enum ResponseEvent {
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub(crate) struct Reasoning {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) effort: Option<ReasoningEffortConfig>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) summary: Option<ReasoningSummaryConfig>,
|
||||
pub(crate) effort: ReasoningEffortConfig,
|
||||
pub(crate) summary: ReasoningSummaryConfig,
|
||||
}
|
||||
|
||||
/// Controls under the `text` field in the Responses API for GPT-5.
|
||||
@@ -139,17 +136,14 @@ pub(crate) struct ResponsesApiRequest<'a> {
|
||||
|
||||
pub(crate) fn create_reasoning_param_for_request(
|
||||
model_family: &ModelFamily,
|
||||
effort: Option<ReasoningEffortConfig>,
|
||||
effort: ReasoningEffortConfig,
|
||||
summary: ReasoningSummaryConfig,
|
||||
) -> Option<Reasoning> {
|
||||
if !model_family.supports_reasoning_summaries {
|
||||
return None;
|
||||
if model_family.supports_reasoning_summaries {
|
||||
Some(Reasoning { effort, summary })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
||||
Some(Reasoning {
|
||||
effort,
|
||||
summary: Some(summary),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn create_text_param_for_request(
|
||||
@@ -175,64 +169,18 @@ impl Stream for ResponseStream {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::model_family::find_family_for_model;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::*;
|
||||
|
||||
struct InstructionsTestCase {
|
||||
pub slug: &'static str,
|
||||
pub expects_apply_patch_instructions: bool,
|
||||
}
|
||||
#[test]
|
||||
fn get_full_instructions_no_user_content() {
|
||||
let prompt = Prompt {
|
||||
..Default::default()
|
||||
};
|
||||
let test_cases = vec![
|
||||
InstructionsTestCase {
|
||||
slug: "gpt-3.5",
|
||||
expects_apply_patch_instructions: true,
|
||||
},
|
||||
InstructionsTestCase {
|
||||
slug: "gpt-4.1",
|
||||
expects_apply_patch_instructions: true,
|
||||
},
|
||||
InstructionsTestCase {
|
||||
slug: "gpt-4o",
|
||||
expects_apply_patch_instructions: true,
|
||||
},
|
||||
InstructionsTestCase {
|
||||
slug: "gpt-5",
|
||||
expects_apply_patch_instructions: true,
|
||||
},
|
||||
InstructionsTestCase {
|
||||
slug: "codex-mini-latest",
|
||||
expects_apply_patch_instructions: true,
|
||||
},
|
||||
InstructionsTestCase {
|
||||
slug: "gpt-oss:120b",
|
||||
expects_apply_patch_instructions: false,
|
||||
},
|
||||
InstructionsTestCase {
|
||||
slug: "gpt-5-codex",
|
||||
expects_apply_patch_instructions: false,
|
||||
},
|
||||
];
|
||||
for test_case in test_cases {
|
||||
let model_family = find_family_for_model(test_case.slug).expect("known model slug");
|
||||
let expected = if test_case.expects_apply_patch_instructions {
|
||||
format!(
|
||||
"{}\n{}",
|
||||
model_family.clone().base_instructions,
|
||||
APPLY_PATCH_TOOL_INSTRUCTIONS
|
||||
)
|
||||
} else {
|
||||
model_family.clone().base_instructions
|
||||
};
|
||||
|
||||
let full = prompt.get_full_instructions(&model_family);
|
||||
assert_eq!(full, expected);
|
||||
}
|
||||
let expected = format!("{BASE_INSTRUCTIONS}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}");
|
||||
let model_family = find_family_for_model("gpt-4.1").expect("known model slug");
|
||||
let full = prompt.get_full_instructions(&model_family);
|
||||
assert_eq!(full, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,399 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::AgentTask;
|
||||
use super::Session;
|
||||
use super::TurnContext;
|
||||
use super::get_last_assistant_message_from_turn;
|
||||
use crate::Prompt;
|
||||
use crate::client_common::ResponseEvent;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result as CodexResult;
|
||||
use crate::protocol::AgentMessageEvent;
|
||||
use crate::protocol::CompactedItem;
|
||||
use crate::protocol::ErrorEvent;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::InputItem;
|
||||
use crate::protocol::InputMessageKind;
|
||||
use crate::protocol::TaskCompleteEvent;
|
||||
use crate::protocol::TaskStartedEvent;
|
||||
use crate::protocol::TurnContextItem;
|
||||
use crate::util::backoff;
|
||||
use askama::Template;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use futures::prelude::*;
|
||||
|
||||
pub(super) const COMPACT_TRIGGER_TEXT: &str = "Start Summarization";
|
||||
const SUMMARIZATION_PROMPT: &str = include_str!("../../templates/compact/prompt.md");
|
||||
|
||||
#[derive(Template)]
|
||||
#[template(path = "compact/history_bridge.md", escape = "none")]
|
||||
struct HistoryBridgeTemplate<'a> {
|
||||
user_messages_text: &'a str,
|
||||
summary_text: &'a str,
|
||||
}
|
||||
|
||||
pub(super) async fn spawn_compact_task(
|
||||
sess: Arc<Session>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
input: Vec<InputItem>,
|
||||
) {
|
||||
let task = AgentTask::compact(
|
||||
sess.clone(),
|
||||
turn_context,
|
||||
sub_id,
|
||||
input,
|
||||
SUMMARIZATION_PROMPT.to_string(),
|
||||
);
|
||||
sess.set_task(task).await;
|
||||
}
|
||||
|
||||
pub(super) async fn run_inline_auto_compact_task(
|
||||
sess: Arc<Session>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
) {
|
||||
let sub_id = sess.next_internal_sub_id();
|
||||
let input = vec![InputItem::Text {
|
||||
text: COMPACT_TRIGGER_TEXT.to_string(),
|
||||
}];
|
||||
run_compact_task_inner(
|
||||
sess,
|
||||
turn_context,
|
||||
sub_id,
|
||||
input,
|
||||
SUMMARIZATION_PROMPT.to_string(),
|
||||
false,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
pub(super) async fn run_compact_task(
|
||||
sess: Arc<Session>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
input: Vec<InputItem>,
|
||||
compact_instructions: String,
|
||||
) {
|
||||
let start_event = Event {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::TaskStarted(TaskStartedEvent {
|
||||
model_context_window: turn_context.client.get_model_context_window(),
|
||||
}),
|
||||
};
|
||||
sess.send_event(start_event).await;
|
||||
run_compact_task_inner(
|
||||
sess.clone(),
|
||||
turn_context,
|
||||
sub_id.clone(),
|
||||
input,
|
||||
compact_instructions,
|
||||
true,
|
||||
)
|
||||
.await;
|
||||
let event = Event {
|
||||
id: sub_id,
|
||||
msg: EventMsg::TaskComplete(TaskCompleteEvent {
|
||||
last_agent_message: None,
|
||||
}),
|
||||
};
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
|
||||
async fn run_compact_task_inner(
|
||||
sess: Arc<Session>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
input: Vec<InputItem>,
|
||||
compact_instructions: String,
|
||||
remove_task_on_completion: bool,
|
||||
) {
|
||||
let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input);
|
||||
let instructions_override = compact_instructions;
|
||||
let turn_input = sess
|
||||
.turn_input_with_history(vec![initial_input_for_turn.clone().into()])
|
||||
.await;
|
||||
|
||||
let prompt = Prompt {
|
||||
input: turn_input,
|
||||
tools: Vec::new(),
|
||||
base_instructions_override: Some(instructions_override),
|
||||
};
|
||||
|
||||
let max_retries = turn_context.client.get_provider().stream_max_retries();
|
||||
let mut retries = 0;
|
||||
|
||||
let rollout_item = RolloutItem::TurnContext(TurnContextItem {
|
||||
cwd: turn_context.cwd.clone(),
|
||||
approval_policy: turn_context.approval_policy,
|
||||
sandbox_policy: turn_context.sandbox_policy.clone(),
|
||||
model: turn_context.client.get_model(),
|
||||
effort: turn_context.client.get_reasoning_effort(),
|
||||
summary: turn_context.client.get_reasoning_summary(),
|
||||
});
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
|
||||
loop {
|
||||
let attempt_result = drain_to_completed(&sess, turn_context.as_ref(), &prompt).await;
|
||||
|
||||
match attempt_result {
|
||||
Ok(()) => {
|
||||
break;
|
||||
}
|
||||
Err(CodexErr::Interrupted) => {
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
if retries < max_retries {
|
||||
retries += 1;
|
||||
let delay = backoff(retries);
|
||||
sess.notify_stream_error(
|
||||
&sub_id,
|
||||
format!(
|
||||
"stream error: {e}; retrying {retries}/{max_retries} in {delay:?}…"
|
||||
),
|
||||
)
|
||||
.await;
|
||||
tokio::time::sleep(delay).await;
|
||||
continue;
|
||||
} else {
|
||||
let event = Event {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::Error(ErrorEvent {
|
||||
message: e.to_string(),
|
||||
}),
|
||||
};
|
||||
sess.send_event(event).await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if remove_task_on_completion {
|
||||
sess.remove_task(&sub_id).await;
|
||||
}
|
||||
let history_snapshot = {
|
||||
let state = sess.state.lock().await;
|
||||
state.history.contents()
|
||||
};
|
||||
let summary_text = get_last_assistant_message_from_turn(&history_snapshot).unwrap_or_default();
|
||||
let user_messages = collect_user_messages(&history_snapshot);
|
||||
let initial_context = sess.build_initial_context(turn_context.as_ref());
|
||||
let new_history = build_compacted_history(initial_context, &user_messages, &summary_text);
|
||||
{
|
||||
let mut state = sess.state.lock().await;
|
||||
state.history.replace(new_history);
|
||||
}
|
||||
|
||||
let rollout_item = RolloutItem::Compacted(CompactedItem {
|
||||
message: summary_text.clone(),
|
||||
});
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
|
||||
let event = Event {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::AgentMessage(AgentMessageEvent {
|
||||
message: "Compact task completed".to_string(),
|
||||
}),
|
||||
};
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
|
||||
pub fn content_items_to_text(content: &[ContentItem]) -> Option<String> {
|
||||
let mut pieces = Vec::new();
|
||||
for item in content {
|
||||
match item {
|
||||
ContentItem::InputText { text } | ContentItem::OutputText { text } => {
|
||||
if !text.is_empty() {
|
||||
pieces.push(text.as_str());
|
||||
}
|
||||
}
|
||||
ContentItem::InputImage { .. } => {}
|
||||
}
|
||||
}
|
||||
if pieces.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(pieces.join("\n"))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn collect_user_messages(items: &[ResponseItem]) -> Vec<String> {
|
||||
items
|
||||
.iter()
|
||||
.filter_map(|item| match item {
|
||||
ResponseItem::Message { role, content, .. } if role == "user" => {
|
||||
content_items_to_text(content)
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
.filter(|text| !is_session_prefix_message(text))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn is_session_prefix_message(text: &str) -> bool {
|
||||
matches!(
|
||||
InputMessageKind::from(("user", text)),
|
||||
InputMessageKind::UserInstructions | InputMessageKind::EnvironmentContext
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn build_compacted_history(
|
||||
initial_context: Vec<ResponseItem>,
|
||||
user_messages: &[String],
|
||||
summary_text: &str,
|
||||
) -> Vec<ResponseItem> {
|
||||
let mut history = initial_context;
|
||||
let user_messages_text = if user_messages.is_empty() {
|
||||
"(none)".to_string()
|
||||
} else {
|
||||
user_messages.join("\n\n")
|
||||
};
|
||||
let summary_text = if summary_text.is_empty() {
|
||||
"(no summary available)".to_string()
|
||||
} else {
|
||||
summary_text.to_string()
|
||||
};
|
||||
let Ok(bridge) = HistoryBridgeTemplate {
|
||||
user_messages_text: &user_messages_text,
|
||||
summary_text: &summary_text,
|
||||
}
|
||||
.render() else {
|
||||
return vec![];
|
||||
};
|
||||
history.push(ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText { text: bridge }],
|
||||
});
|
||||
history
|
||||
}
|
||||
|
||||
async fn drain_to_completed(
|
||||
sess: &Session,
|
||||
turn_context: &TurnContext,
|
||||
prompt: &Prompt,
|
||||
) -> CodexResult<()> {
|
||||
let mut stream = turn_context.client.clone().stream(prompt).await?;
|
||||
loop {
|
||||
let maybe_event = stream.next().await;
|
||||
let Some(event) = maybe_event else {
|
||||
return Err(CodexErr::Stream(
|
||||
"stream closed before response.completed".into(),
|
||||
None,
|
||||
));
|
||||
};
|
||||
match event {
|
||||
Ok(ResponseEvent::OutputItemDone(item)) => {
|
||||
let mut state = sess.state.lock().await;
|
||||
state.history.record_items(std::slice::from_ref(&item));
|
||||
}
|
||||
Ok(ResponseEvent::Completed { .. }) => {
|
||||
return Ok(());
|
||||
}
|
||||
Ok(_) => continue,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn content_items_to_text_joins_non_empty_segments() {
|
||||
let items = vec![
|
||||
ContentItem::InputText {
|
||||
text: "hello".to_string(),
|
||||
},
|
||||
ContentItem::OutputText {
|
||||
text: String::new(),
|
||||
},
|
||||
ContentItem::OutputText {
|
||||
text: "world".to_string(),
|
||||
},
|
||||
];
|
||||
|
||||
let joined = content_items_to_text(&items);
|
||||
|
||||
assert_eq!(Some("hello\nworld".to_string()), joined);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn content_items_to_text_ignores_image_only_content() {
|
||||
let items = vec![ContentItem::InputImage {
|
||||
image_url: "file://image.png".to_string(),
|
||||
}];
|
||||
|
||||
let joined = content_items_to_text(&items);
|
||||
|
||||
assert_eq!(None, joined);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn collect_user_messages_extracts_user_text_only() {
|
||||
let items = vec![
|
||||
ResponseItem::Message {
|
||||
id: Some("assistant".to_string()),
|
||||
role: "assistant".to_string(),
|
||||
content: vec![ContentItem::OutputText {
|
||||
text: "ignored".to_string(),
|
||||
}],
|
||||
},
|
||||
ResponseItem::Message {
|
||||
id: Some("user".to_string()),
|
||||
role: "user".to_string(),
|
||||
content: vec![
|
||||
ContentItem::InputText {
|
||||
text: "first".to_string(),
|
||||
},
|
||||
ContentItem::OutputText {
|
||||
text: "second".to_string(),
|
||||
},
|
||||
],
|
||||
},
|
||||
ResponseItem::Other,
|
||||
];
|
||||
|
||||
let collected = collect_user_messages(&items);
|
||||
|
||||
assert_eq!(vec!["first\nsecond".to_string()], collected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn collect_user_messages_filters_session_prefix_entries() {
|
||||
let items = vec![
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "<user_instructions>do things</user_instructions>".to_string(),
|
||||
}],
|
||||
},
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "<ENVIRONMENT_CONTEXT>cwd=/tmp</ENVIRONMENT_CONTEXT>".to_string(),
|
||||
}],
|
||||
},
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "real user message".to_string(),
|
||||
}],
|
||||
},
|
||||
];
|
||||
|
||||
let collected = collect_user_messages(&items);
|
||||
|
||||
assert_eq!(vec!["real user message".to_string()], collected);
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
use crate::config_profile::ConfigProfile;
|
||||
use crate::config_types::History;
|
||||
use crate::config_types::McpServerConfig;
|
||||
use crate::config_types::Notifications;
|
||||
use crate::config_types::ReasoningSummaryFormat;
|
||||
use crate::config_types::SandboxWorkspaceWrite;
|
||||
use crate::config_types::ShellEnvironmentPolicy;
|
||||
@@ -10,36 +9,29 @@ use crate::config_types::Tui;
|
||||
use crate::config_types::UriBasedFileOpener;
|
||||
use crate::git_info::resolve_root_git_project_for_trust;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::model_family::derive_default_model_family;
|
||||
use crate::model_family::find_family_for_model;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::model_provider_info::built_in_model_providers;
|
||||
use crate::openai_model_info::get_model_info;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use anyhow::Context;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use codex_protocol::mcp_protocol::Tools;
|
||||
use codex_protocol::mcp_protocol::UserSavedConfig;
|
||||
use dirs::home_dir;
|
||||
use serde::Deserialize;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::NamedTempFile;
|
||||
use toml::Value as TomlValue;
|
||||
use toml_edit::Array as TomlArray;
|
||||
use toml_edit::DocumentMut;
|
||||
use toml_edit::Item as TomlItem;
|
||||
use toml_edit::Table as TomlTable;
|
||||
|
||||
const OPENAI_DEFAULT_MODEL: &str = "gpt-5";
|
||||
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5-codex";
|
||||
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5-codex";
|
||||
|
||||
/// Maximum number of bytes of the documentation that will be embedded. Larger
|
||||
/// files are *silently truncated* to this size so we do not take up too much of
|
||||
@@ -48,17 +40,12 @@ pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB
|
||||
|
||||
pub(crate) const CONFIG_TOML_FILE: &str = "config.toml";
|
||||
|
||||
const DEFAULT_OTEL_ENVIRONMENT: &str = "dev";
|
||||
|
||||
/// Application configuration loaded from disk and merged with overrides.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Config {
|
||||
/// Optional override of model selection.
|
||||
pub model: String,
|
||||
|
||||
/// Model used specifically for review sessions. Defaults to "gpt-5".
|
||||
pub review_model: String,
|
||||
|
||||
pub model_family: ModelFamily,
|
||||
|
||||
/// Size of the context window for the model, in tokens.
|
||||
@@ -67,9 +54,6 @@ pub struct Config {
|
||||
/// Maximum number of output tokens.
|
||||
pub model_max_output_tokens: Option<u64>,
|
||||
|
||||
/// Token usage threshold triggering auto-compaction of conversation history.
|
||||
pub model_auto_compact_token_limit: Option<i64>,
|
||||
|
||||
/// Key into the model_providers map that specifies which provider to use.
|
||||
pub model_provider_id: String,
|
||||
|
||||
@@ -120,10 +104,6 @@ pub struct Config {
|
||||
/// If unset the feature is disabled.
|
||||
pub notify: Option<Vec<String>>,
|
||||
|
||||
/// TUI notifications preference. When set, the TUI will send OSC 9 notifications on approvals
|
||||
/// and turn completions when not focused.
|
||||
pub tui_notifications: Notifications,
|
||||
|
||||
/// The directory that should be treated as the current working directory
|
||||
/// for the session. All relative paths inside the business-logic layer are
|
||||
/// resolved against this path.
|
||||
@@ -149,6 +129,9 @@ pub struct Config {
|
||||
/// output will be hyperlinked using the specified URI scheme.
|
||||
pub file_opener: UriBasedFileOpener,
|
||||
|
||||
/// Collection of settings that are specific to the TUI.
|
||||
pub tui: Tui,
|
||||
|
||||
/// Path to the `codex-linux-sandbox` executable. This must be set if
|
||||
/// [`crate::exec::SandboxType::LinuxSeccomp`] is used. Note that this
|
||||
/// cannot be set in the config file: it must be set in code via
|
||||
@@ -159,7 +142,7 @@ pub struct Config {
|
||||
|
||||
/// Value to use for `reasoning.effort` when making a request using the
|
||||
/// Responses API.
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_effort: ReasoningEffort,
|
||||
|
||||
/// If not "none", the value to use for `reasoning.summary` when making a
|
||||
/// request using the Responses API.
|
||||
@@ -171,6 +154,9 @@ pub struct Config {
|
||||
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
|
||||
pub chatgpt_base_url: String,
|
||||
|
||||
/// Experimental rollout resume path (absolute path to .jsonl; undocumented).
|
||||
pub experimental_resume: Option<PathBuf>,
|
||||
|
||||
/// Include an experimental plan tool that the model can use to update its current plan and status of each step.
|
||||
pub include_plan_tool: bool,
|
||||
|
||||
@@ -181,6 +167,9 @@ pub struct Config {
|
||||
|
||||
pub tools_web_search_request: bool,
|
||||
|
||||
/// If set to `true`, the API key will be signed with the `originator` header.
|
||||
pub preferred_auth_method: AuthMode,
|
||||
|
||||
pub use_experimental_streamable_shell_tool: bool,
|
||||
|
||||
/// If set to `true`, used only the experimental unified exec tool.
|
||||
@@ -196,9 +185,6 @@ pub struct Config {
|
||||
/// All characters are inserted as they are received, and no buffering
|
||||
/// or placeholder replacement will occur for fast keypress bursts.
|
||||
pub disable_paste_burst: bool,
|
||||
|
||||
/// OTEL configuration (exporter type, endpoint, headers, etc.).
|
||||
pub otel: crate::config_types::OtelConfig,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@@ -278,88 +264,6 @@ pub fn load_config_as_toml(codex_home: &Path) -> std::io::Result<TomlValue> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_global_mcp_servers(
|
||||
codex_home: &Path,
|
||||
) -> std::io::Result<BTreeMap<String, McpServerConfig>> {
|
||||
let root_value = load_config_as_toml(codex_home)?;
|
||||
let Some(servers_value) = root_value.get("mcp_servers") else {
|
||||
return Ok(BTreeMap::new());
|
||||
};
|
||||
|
||||
servers_value
|
||||
.clone()
|
||||
.try_into()
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
|
||||
}
|
||||
|
||||
pub fn write_global_mcp_servers(
|
||||
codex_home: &Path,
|
||||
servers: &BTreeMap<String, McpServerConfig>,
|
||||
) -> std::io::Result<()> {
|
||||
let config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
let mut doc = match std::fs::read_to_string(&config_path) {
|
||||
Ok(contents) => contents
|
||||
.parse::<DocumentMut>()
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => DocumentMut::new(),
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
doc.as_table_mut().remove("mcp_servers");
|
||||
|
||||
if !servers.is_empty() {
|
||||
let mut table = TomlTable::new();
|
||||
table.set_implicit(true);
|
||||
doc["mcp_servers"] = TomlItem::Table(table);
|
||||
|
||||
for (name, config) in servers {
|
||||
let mut entry = TomlTable::new();
|
||||
entry.set_implicit(false);
|
||||
entry["command"] = toml_edit::value(config.command.clone());
|
||||
|
||||
if !config.args.is_empty() {
|
||||
let mut args = TomlArray::new();
|
||||
for arg in &config.args {
|
||||
args.push(arg.clone());
|
||||
}
|
||||
entry["args"] = TomlItem::Value(args.into());
|
||||
}
|
||||
|
||||
if let Some(env) = &config.env
|
||||
&& !env.is_empty()
|
||||
{
|
||||
let mut env_table = TomlTable::new();
|
||||
env_table.set_implicit(false);
|
||||
let mut pairs: Vec<_> = env.iter().collect();
|
||||
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
for (key, value) in pairs {
|
||||
env_table.insert(key, toml_edit::value(value.clone()));
|
||||
}
|
||||
entry["env"] = TomlItem::Table(env_table);
|
||||
}
|
||||
|
||||
if let Some(timeout) = config.startup_timeout_ms {
|
||||
let timeout = i64::try_from(timeout).map_err(|_| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"startup_timeout_ms exceeds supported range",
|
||||
)
|
||||
})?;
|
||||
entry["startup_timeout_ms"] = toml_edit::value(timeout);
|
||||
}
|
||||
|
||||
doc["mcp_servers"][name.as_str()] = TomlItem::Table(entry);
|
||||
}
|
||||
}
|
||||
|
||||
std::fs::create_dir_all(codex_home)?;
|
||||
let tmp_file = NamedTempFile::new_in(codex_home)?;
|
||||
std::fs::write(tmp_file.path(), doc.to_string())?;
|
||||
tmp_file.persist(config_path).map_err(|err| err.error)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_project_trusted_inner(doc: &mut DocumentMut, project_path: &Path) -> anyhow::Result<()> {
|
||||
// Ensure we render a human-friendly structure:
|
||||
//
|
||||
@@ -451,117 +355,6 @@ pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> anyhow::Re
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_profile_table<'a>(
|
||||
doc: &'a mut DocumentMut,
|
||||
profile_name: &str,
|
||||
) -> anyhow::Result<&'a mut toml_edit::Table> {
|
||||
let mut created_profiles_table = false;
|
||||
{
|
||||
let root = doc.as_table_mut();
|
||||
let needs_table = !root.contains_key("profiles")
|
||||
|| root
|
||||
.get("profiles")
|
||||
.and_then(|item| item.as_table())
|
||||
.is_none();
|
||||
if needs_table {
|
||||
root.insert("profiles", toml_edit::table());
|
||||
created_profiles_table = true;
|
||||
}
|
||||
}
|
||||
|
||||
let Some(profiles_table) = doc["profiles"].as_table_mut() else {
|
||||
return Err(anyhow::anyhow!(
|
||||
"profiles table missing after initialization"
|
||||
));
|
||||
};
|
||||
|
||||
if created_profiles_table {
|
||||
profiles_table.set_implicit(true);
|
||||
}
|
||||
|
||||
let needs_profile_table = !profiles_table.contains_key(profile_name)
|
||||
|| profiles_table
|
||||
.get(profile_name)
|
||||
.and_then(|item| item.as_table())
|
||||
.is_none();
|
||||
if needs_profile_table {
|
||||
profiles_table.insert(profile_name, toml_edit::table());
|
||||
}
|
||||
|
||||
let Some(profile_table) = profiles_table
|
||||
.get_mut(profile_name)
|
||||
.and_then(|item| item.as_table_mut())
|
||||
else {
|
||||
return Err(anyhow::anyhow!(format!(
|
||||
"profile table missing for {profile_name}"
|
||||
)));
|
||||
};
|
||||
|
||||
profile_table.set_implicit(false);
|
||||
Ok(profile_table)
|
||||
}
|
||||
|
||||
// TODO(jif) refactor config persistence.
|
||||
pub async fn persist_model_selection(
|
||||
codex_home: &Path,
|
||||
active_profile: Option<&str>,
|
||||
model: &str,
|
||||
effort: Option<ReasoningEffort>,
|
||||
) -> anyhow::Result<()> {
|
||||
let config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
let serialized = match tokio::fs::read_to_string(&config_path).await {
|
||||
Ok(contents) => contents,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => String::new(),
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
|
||||
let mut doc = if serialized.is_empty() {
|
||||
DocumentMut::new()
|
||||
} else {
|
||||
serialized.parse::<DocumentMut>()?
|
||||
};
|
||||
|
||||
if let Some(profile_name) = active_profile {
|
||||
let profile_table = ensure_profile_table(&mut doc, profile_name)?;
|
||||
profile_table["model"] = toml_edit::value(model);
|
||||
match effort {
|
||||
Some(effort) => {
|
||||
profile_table["model_reasoning_effort"] = toml_edit::value(effort.to_string());
|
||||
}
|
||||
None => {
|
||||
profile_table.remove("model_reasoning_effort");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let table = doc.as_table_mut();
|
||||
table["model"] = toml_edit::value(model);
|
||||
match effort {
|
||||
Some(effort) => {
|
||||
table["model_reasoning_effort"] = toml_edit::value(effort.to_string());
|
||||
}
|
||||
None => {
|
||||
table.remove("model_reasoning_effort");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(jif) refactor the home creation
|
||||
tokio::fs::create_dir_all(codex_home)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to create Codex home directory at {}",
|
||||
codex_home.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
tokio::fs::write(&config_path, doc.to_string())
|
||||
.await
|
||||
.with_context(|| format!("failed to persist config.toml at {}", config_path.display()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply a single dotted-path override onto a TOML value.
|
||||
fn apply_toml_override(root: &mut TomlValue, path: &str, value: TomlValue) {
|
||||
use toml::value::Table;
|
||||
@@ -606,12 +399,10 @@ fn apply_toml_override(root: &mut TomlValue, path: &str, value: TomlValue) {
|
||||
}
|
||||
|
||||
/// Base config deserialized from ~/.codex/config.toml.
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Deserialize, Debug, Clone, Default)]
|
||||
pub struct ConfigToml {
|
||||
/// Optional override of model selection.
|
||||
pub model: Option<String>,
|
||||
/// Review model override used by the `/review` feature.
|
||||
pub review_model: Option<String>,
|
||||
|
||||
/// Provider to use from the model_providers map.
|
||||
pub model_provider: Option<String>,
|
||||
@@ -622,9 +413,6 @@ pub struct ConfigToml {
|
||||
/// Maximum number of output tokens.
|
||||
pub model_max_output_tokens: Option<u64>,
|
||||
|
||||
/// Token usage threshold triggering auto-compaction of conversation history.
|
||||
pub model_auto_compact_token_limit: Option<i64>,
|
||||
|
||||
/// Default approval policy for executing commands.
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
|
||||
@@ -695,6 +483,9 @@ pub struct ConfigToml {
|
||||
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
|
||||
/// Experimental rollout resume path (absolute path to .jsonl; undocumented).
|
||||
pub experimental_resume: Option<PathBuf>,
|
||||
|
||||
/// Experimental path to a file whose contents replace the built-in BASE_INSTRUCTIONS.
|
||||
pub experimental_instructions_file: Option<PathBuf>,
|
||||
|
||||
@@ -703,6 +494,9 @@ pub struct ConfigToml {
|
||||
|
||||
pub projects: Option<HashMap<String, ProjectConfig>>,
|
||||
|
||||
/// If set to `true`, the API key will be signed with the `originator` header.
|
||||
pub preferred_auth_method: Option<AuthMode>,
|
||||
|
||||
/// Nested tools section for feature toggles
|
||||
pub tools: Option<ToolsToml>,
|
||||
|
||||
@@ -710,9 +504,6 @@ pub struct ConfigToml {
|
||||
/// All characters are inserted as they are received, and no buffering
|
||||
/// or placeholder replacement will occur for fast keypress bursts.
|
||||
pub disable_paste_burst: Option<bool>,
|
||||
|
||||
/// OTEL configuration.
|
||||
pub otel: Option<crate::config_types::OtelConfigToml>,
|
||||
}
|
||||
|
||||
impl From<ConfigToml> for UserSavedConfig {
|
||||
@@ -743,7 +534,7 @@ pub struct ProjectConfig {
|
||||
pub trust_level: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Deserialize, Debug, Clone, Default)]
|
||||
pub struct ToolsToml {
|
||||
#[serde(default, alias = "web_search_request")]
|
||||
pub web_search: Option<bool>,
|
||||
@@ -840,7 +631,6 @@ impl ConfigToml {
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct ConfigOverrides {
|
||||
pub model: Option<String>,
|
||||
pub review_model: Option<String>,
|
||||
pub cwd: Option<PathBuf>,
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
pub sandbox_mode: Option<SandboxMode>,
|
||||
@@ -868,7 +658,6 @@ impl Config {
|
||||
// Destructure ConfigOverrides fully to ensure all overrides are applied.
|
||||
let ConfigOverrides {
|
||||
model,
|
||||
review_model: override_review_model,
|
||||
cwd,
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
@@ -959,8 +748,15 @@ impl Config {
|
||||
.or(cfg.model)
|
||||
.unwrap_or_else(default_model);
|
||||
|
||||
let mut model_family =
|
||||
find_family_for_model(&model).unwrap_or_else(|| derive_default_model_family(&model));
|
||||
let mut model_family = find_family_for_model(&model).unwrap_or_else(|| ModelFamily {
|
||||
slug: model.clone(),
|
||||
family: model.clone(),
|
||||
needs_special_apply_patch_instructions: false,
|
||||
supports_reasoning_summaries: false,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
uses_local_shell_tool: false,
|
||||
apply_patch_tool_type: None,
|
||||
});
|
||||
|
||||
if let Some(supports_reasoning_summaries) = cfg.model_supports_reasoning_summaries {
|
||||
model_family.supports_reasoning_summaries = supports_reasoning_summaries;
|
||||
@@ -978,11 +774,8 @@ impl Config {
|
||||
.as_ref()
|
||||
.map(|info| info.max_output_tokens)
|
||||
});
|
||||
let model_auto_compact_token_limit = cfg.model_auto_compact_token_limit.or_else(|| {
|
||||
openai_model_info
|
||||
.as_ref()
|
||||
.and_then(|info| info.auto_compact_token_limit)
|
||||
});
|
||||
|
||||
let experimental_resume = cfg.experimental_resume;
|
||||
|
||||
// Load base instructions override from a file if specified. If the
|
||||
// path is relative, resolve it against the effective cwd so the
|
||||
@@ -995,18 +788,11 @@ impl Config {
|
||||
Self::get_base_instructions(experimental_instructions_path, &resolved_cwd)?;
|
||||
let base_instructions = base_instructions.or(file_base_instructions);
|
||||
|
||||
// Default review model when not set in config; allow CLI override to take precedence.
|
||||
let review_model = override_review_model
|
||||
.or(cfg.review_model)
|
||||
.unwrap_or_else(default_review_model);
|
||||
|
||||
let config = Self {
|
||||
model,
|
||||
review_model,
|
||||
model_family,
|
||||
model_context_window,
|
||||
model_max_output_tokens,
|
||||
model_auto_compact_token_limit,
|
||||
model_provider_id,
|
||||
model_provider,
|
||||
cwd: resolved_cwd,
|
||||
@@ -1025,6 +811,7 @@ impl Config {
|
||||
codex_home,
|
||||
history,
|
||||
file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode),
|
||||
tui: cfg.tui.unwrap_or_default(),
|
||||
codex_linux_sandbox_exe,
|
||||
|
||||
hide_agent_reasoning: cfg.hide_agent_reasoning.unwrap_or(false),
|
||||
@@ -1034,7 +821,8 @@ impl Config {
|
||||
.unwrap_or(false),
|
||||
model_reasoning_effort: config_profile
|
||||
.model_reasoning_effort
|
||||
.or(cfg.model_reasoning_effort),
|
||||
.or(cfg.model_reasoning_effort)
|
||||
.unwrap_or_default(),
|
||||
model_reasoning_summary: config_profile
|
||||
.model_reasoning_summary
|
||||
.or(cfg.model_reasoning_summary)
|
||||
@@ -1044,39 +832,21 @@ impl Config {
|
||||
.chatgpt_base_url
|
||||
.or(cfg.chatgpt_base_url)
|
||||
.unwrap_or("https://chatgpt.com/backend-api/".to_string()),
|
||||
|
||||
experimental_resume,
|
||||
include_plan_tool: include_plan_tool.unwrap_or(false),
|
||||
include_apply_patch_tool: include_apply_patch_tool.unwrap_or(false),
|
||||
tools_web_search_request,
|
||||
preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT),
|
||||
use_experimental_streamable_shell_tool: cfg
|
||||
.experimental_use_exec_command_tool
|
||||
.unwrap_or(false),
|
||||
use_experimental_unified_exec_tool: cfg
|
||||
.experimental_use_unified_exec_tool
|
||||
.unwrap_or(false),
|
||||
.unwrap_or(true),
|
||||
include_view_image_tool,
|
||||
active_profile: active_profile_name,
|
||||
disable_paste_burst: cfg.disable_paste_burst.unwrap_or(false),
|
||||
tui_notifications: cfg
|
||||
.tui
|
||||
.as_ref()
|
||||
.map(|t| t.notifications.clone())
|
||||
.unwrap_or_default(),
|
||||
otel: {
|
||||
use crate::config_types::OtelConfig;
|
||||
use crate::config_types::OtelConfigToml;
|
||||
use crate::config_types::OtelExporterKind;
|
||||
let t: OtelConfigToml = cfg.otel.unwrap_or_default();
|
||||
let log_user_prompt = t.log_user_prompt.unwrap_or(false);
|
||||
let environment = t
|
||||
.environment
|
||||
.unwrap_or(DEFAULT_OTEL_ENVIRONMENT.to_string());
|
||||
let exporter = t.exporter.unwrap_or(OtelExporterKind::None);
|
||||
OtelConfig {
|
||||
log_user_prompt,
|
||||
environment,
|
||||
exporter,
|
||||
}
|
||||
},
|
||||
};
|
||||
Ok(config)
|
||||
}
|
||||
@@ -1145,10 +915,6 @@ fn default_model() -> String {
|
||||
OPENAI_DEFAULT_MODEL.to_string()
|
||||
}
|
||||
|
||||
fn default_review_model() -> String {
|
||||
OPENAI_DEFAULT_REVIEW_MODEL.to_string()
|
||||
}
|
||||
|
||||
/// Returns the path to the Codex configuration directory, which can be
|
||||
/// specified by the `CODEX_HOME` environment variable. If not set, defaults to
|
||||
/// `~/.codex`.
|
||||
@@ -1187,11 +953,9 @@ pub fn log_dir(cfg: &Config) -> std::io::Result<PathBuf> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::config_types::HistoryPersistence;
|
||||
use crate::config_types::Notifications;
|
||||
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
@@ -1226,19 +990,6 @@ persistence = "none"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tui_config_missing_notifications_field_defaults_to_disabled() {
|
||||
let cfg = r#"
|
||||
[tui]
|
||||
"#;
|
||||
|
||||
let parsed = toml::from_str::<ConfigToml>(cfg)
|
||||
.expect("TUI config without notifications should succeed");
|
||||
let tui = parsed.tui.expect("config should include tui section");
|
||||
|
||||
assert_eq!(tui.notifications, Notifications::Enabled(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sandbox_config_parsing() {
|
||||
let sandbox_full_access = r#"
|
||||
@@ -1295,189 +1046,6 @@ exclude_slash_tmp = true
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_global_mcp_servers_returns_empty_if_missing() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_global_mcp_servers_round_trips_entries() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut servers = BTreeMap::new();
|
||||
servers.insert(
|
||||
"docs".to_string(),
|
||||
McpServerConfig {
|
||||
command: "echo".to_string(),
|
||||
args: vec!["hello".to_string()],
|
||||
env: None,
|
||||
startup_timeout_ms: None,
|
||||
},
|
||||
);
|
||||
|
||||
write_global_mcp_servers(codex_home.path(), &servers)?;
|
||||
|
||||
let loaded = load_global_mcp_servers(codex_home.path())?;
|
||||
assert_eq!(loaded.len(), 1);
|
||||
let docs = loaded.get("docs").expect("docs entry");
|
||||
assert_eq!(docs.command, "echo");
|
||||
assert_eq!(docs.args, vec!["hello".to_string()]);
|
||||
|
||||
let empty = BTreeMap::new();
|
||||
write_global_mcp_servers(codex_home.path(), &empty)?;
|
||||
let loaded = load_global_mcp_servers(codex_home.path())?;
|
||||
assert!(loaded.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn persist_model_selection_updates_defaults() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
persist_model_selection(
|
||||
codex_home.path(),
|
||||
None,
|
||||
"gpt-5-codex",
|
||||
Some(ReasoningEffort::High),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let serialized =
|
||||
tokio::fs::read_to_string(codex_home.path().join(CONFIG_TOML_FILE)).await?;
|
||||
let parsed: ConfigToml = toml::from_str(&serialized)?;
|
||||
|
||||
assert_eq!(parsed.model.as_deref(), Some("gpt-5-codex"));
|
||||
assert_eq!(parsed.model_reasoning_effort, Some(ReasoningEffort::High));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn persist_model_selection_overwrites_existing_model() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
|
||||
tokio::fs::write(
|
||||
&config_path,
|
||||
r#"
|
||||
model = "gpt-5"
|
||||
model_reasoning_effort = "medium"
|
||||
|
||||
[profiles.dev]
|
||||
model = "gpt-4.1"
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
persist_model_selection(
|
||||
codex_home.path(),
|
||||
None,
|
||||
"o4-mini",
|
||||
Some(ReasoningEffort::High),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let serialized = tokio::fs::read_to_string(config_path).await?;
|
||||
let parsed: ConfigToml = toml::from_str(&serialized)?;
|
||||
|
||||
assert_eq!(parsed.model.as_deref(), Some("o4-mini"));
|
||||
assert_eq!(parsed.model_reasoning_effort, Some(ReasoningEffort::High));
|
||||
assert_eq!(
|
||||
parsed
|
||||
.profiles
|
||||
.get("dev")
|
||||
.and_then(|profile| profile.model.as_deref()),
|
||||
Some("gpt-4.1"),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn persist_model_selection_updates_profile() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
persist_model_selection(
|
||||
codex_home.path(),
|
||||
Some("dev"),
|
||||
"gpt-5-codex",
|
||||
Some(ReasoningEffort::Medium),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let serialized =
|
||||
tokio::fs::read_to_string(codex_home.path().join(CONFIG_TOML_FILE)).await?;
|
||||
let parsed: ConfigToml = toml::from_str(&serialized)?;
|
||||
let profile = parsed
|
||||
.profiles
|
||||
.get("dev")
|
||||
.expect("profile should be created");
|
||||
|
||||
assert_eq!(profile.model.as_deref(), Some("gpt-5-codex"));
|
||||
assert_eq!(
|
||||
profile.model_reasoning_effort,
|
||||
Some(ReasoningEffort::Medium)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn persist_model_selection_updates_existing_profile() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
|
||||
tokio::fs::write(
|
||||
&config_path,
|
||||
r#"
|
||||
[profiles.dev]
|
||||
model = "gpt-4"
|
||||
model_reasoning_effort = "medium"
|
||||
|
||||
[profiles.prod]
|
||||
model = "gpt-5"
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
persist_model_selection(
|
||||
codex_home.path(),
|
||||
Some("dev"),
|
||||
"o4-high",
|
||||
Some(ReasoningEffort::Medium),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let serialized = tokio::fs::read_to_string(config_path).await?;
|
||||
let parsed: ConfigToml = toml::from_str(&serialized)?;
|
||||
|
||||
let dev_profile = parsed
|
||||
.profiles
|
||||
.get("dev")
|
||||
.expect("dev profile should survive updates");
|
||||
assert_eq!(dev_profile.model.as_deref(), Some("o4-high"));
|
||||
assert_eq!(
|
||||
dev_profile.model_reasoning_effort,
|
||||
Some(ReasoningEffort::Medium)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
parsed
|
||||
.profiles
|
||||
.get("prod")
|
||||
.and_then(|profile| profile.model.as_deref()),
|
||||
Some("gpt-5"),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct PrecedenceTestFixture {
|
||||
cwd: TempDir,
|
||||
codex_home: TempDir,
|
||||
@@ -1619,11 +1187,9 @@ model_verbosity = "high"
|
||||
assert_eq!(
|
||||
Config {
|
||||
model: "o3".to_string(),
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
model_family: find_family_for_model("o3").expect("known model slug"),
|
||||
model_context_window: Some(200_000),
|
||||
model_max_output_tokens: Some(100_000),
|
||||
model_auto_compact_token_limit: None,
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
@@ -1638,28 +1204,25 @@ model_verbosity = "high"
|
||||
codex_home: fixture.codex_home(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
tui: Tui::default(),
|
||||
codex_linux_sandbox_exe: None,
|
||||
hide_agent_reasoning: false,
|
||||
show_raw_agent_reasoning: false,
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model_reasoning_effort: ReasoningEffort::High,
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
experimental_resume: None,
|
||||
base_instructions: None,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_unified_exec_tool: true,
|
||||
include_view_image_tool: true,
|
||||
active_profile: Some("o3".to_string()),
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
otel: crate::config_types::OtelConfig {
|
||||
log_user_prompt: false,
|
||||
environment: DEFAULT_OTEL_ENVIRONMENT.to_string(),
|
||||
exporter: crate::config_types::OtelExporterKind::None,
|
||||
},
|
||||
},
|
||||
o3_profile_config
|
||||
);
|
||||
@@ -1682,11 +1245,9 @@ model_verbosity = "high"
|
||||
)?;
|
||||
let expected_gpt3_profile_config = Config {
|
||||
model: "gpt-3.5-turbo".to_string(),
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
model_family: find_family_for_model("gpt-3.5-turbo").expect("known model slug"),
|
||||
model_context_window: Some(16_385),
|
||||
model_max_output_tokens: Some(4_096),
|
||||
model_auto_compact_token_limit: None,
|
||||
model_provider_id: "openai-chat-completions".to_string(),
|
||||
model_provider: fixture.openai_chat_completions_provider.clone(),
|
||||
approval_policy: AskForApproval::UnlessTrusted,
|
||||
@@ -1701,28 +1262,25 @@ model_verbosity = "high"
|
||||
codex_home: fixture.codex_home(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
tui: Tui::default(),
|
||||
codex_linux_sandbox_exe: None,
|
||||
hide_agent_reasoning: false,
|
||||
show_raw_agent_reasoning: false,
|
||||
model_reasoning_effort: None,
|
||||
model_reasoning_effort: ReasoningEffort::default(),
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
experimental_resume: None,
|
||||
base_instructions: None,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_unified_exec_tool: true,
|
||||
include_view_image_tool: true,
|
||||
active_profile: Some("gpt3".to_string()),
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
otel: crate::config_types::OtelConfig {
|
||||
log_user_prompt: false,
|
||||
environment: DEFAULT_OTEL_ENVIRONMENT.to_string(),
|
||||
exporter: crate::config_types::OtelExporterKind::None,
|
||||
},
|
||||
};
|
||||
|
||||
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
|
||||
@@ -1760,11 +1318,9 @@ model_verbosity = "high"
|
||||
)?;
|
||||
let expected_zdr_profile_config = Config {
|
||||
model: "o3".to_string(),
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
model_family: find_family_for_model("o3").expect("known model slug"),
|
||||
model_context_window: Some(200_000),
|
||||
model_max_output_tokens: Some(100_000),
|
||||
model_auto_compact_token_limit: None,
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
approval_policy: AskForApproval::OnFailure,
|
||||
@@ -1779,28 +1335,25 @@ model_verbosity = "high"
|
||||
codex_home: fixture.codex_home(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
tui: Tui::default(),
|
||||
codex_linux_sandbox_exe: None,
|
||||
hide_agent_reasoning: false,
|
||||
show_raw_agent_reasoning: false,
|
||||
model_reasoning_effort: None,
|
||||
model_reasoning_effort: ReasoningEffort::default(),
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
experimental_resume: None,
|
||||
base_instructions: None,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_unified_exec_tool: true,
|
||||
include_view_image_tool: true,
|
||||
active_profile: Some("zdr".to_string()),
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
otel: crate::config_types::OtelConfig {
|
||||
log_user_prompt: false,
|
||||
environment: DEFAULT_OTEL_ENVIRONMENT.to_string(),
|
||||
exporter: crate::config_types::OtelExporterKind::None,
|
||||
},
|
||||
};
|
||||
|
||||
assert_eq!(expected_zdr_profile_config, zdr_profile_config);
|
||||
@@ -1824,11 +1377,9 @@ model_verbosity = "high"
|
||||
)?;
|
||||
let expected_gpt5_profile_config = Config {
|
||||
model: "gpt-5".to_string(),
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
model_family: find_family_for_model("gpt-5").expect("known model slug"),
|
||||
model_context_window: Some(272_000),
|
||||
model_max_output_tokens: Some(128_000),
|
||||
model_auto_compact_token_limit: None,
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
approval_policy: AskForApproval::OnFailure,
|
||||
@@ -1843,28 +1394,25 @@ model_verbosity = "high"
|
||||
codex_home: fixture.codex_home(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
tui: Tui::default(),
|
||||
codex_linux_sandbox_exe: None,
|
||||
hide_agent_reasoning: false,
|
||||
show_raw_agent_reasoning: false,
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model_reasoning_effort: ReasoningEffort::High,
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_verbosity: Some(Verbosity::High),
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
experimental_resume: None,
|
||||
base_instructions: None,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_unified_exec_tool: true,
|
||||
include_view_image_tool: true,
|
||||
active_profile: Some("gpt5".to_string()),
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
otel: crate::config_types::OtelConfig {
|
||||
log_user_prompt: false,
|
||||
environment: DEFAULT_OTEL_ENVIRONMENT.to_string(),
|
||||
exporter: crate::config_types::OtelExporterKind::None,
|
||||
},
|
||||
};
|
||||
|
||||
assert_eq!(expected_gpt5_profile_config, gpt5_profile_config);
|
||||
@@ -1968,46 +1516,3 @@ trust_level = "trusted"
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod notifications_tests {
|
||||
use crate::config_types::Notifications;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Deserialize, Debug, PartialEq)]
|
||||
struct TuiTomlTest {
|
||||
notifications: Notifications,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, PartialEq)]
|
||||
struct RootTomlTest {
|
||||
tui: TuiTomlTest,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tui_notifications_true() {
|
||||
let toml = r#"
|
||||
[tui]
|
||||
notifications = true
|
||||
"#;
|
||||
let parsed: RootTomlTest = toml::from_str(toml).expect("deserialize notifications=true");
|
||||
assert!(matches!(
|
||||
parsed.tui.notifications,
|
||||
Notifications::Enabled(true)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tui_notifications_custom_array() {
|
||||
let toml = r#"
|
||||
[tui]
|
||||
notifications = ["foo"]
|
||||
"#;
|
||||
let parsed: RootTomlTest =
|
||||
toml::from_str(toml).expect("deserialize notifications=[\"foo\"]");
|
||||
assert!(matches!(
|
||||
parsed.tui.notifications,
|
||||
Notifications::Custom(ref v) if v == &vec!["foo".to_string()]
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,12 +7,6 @@ use toml_edit::DocumentMut;
|
||||
pub const CONFIG_KEY_MODEL: &str = "model";
|
||||
pub const CONFIG_KEY_EFFORT: &str = "model_reasoning_effort";
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
enum NoneBehavior {
|
||||
Skip,
|
||||
Remove,
|
||||
}
|
||||
|
||||
/// Persist overrides into `config.toml` using explicit key segments per
|
||||
/// override. This avoids ambiguity with keys that contain dots or spaces.
|
||||
pub async fn persist_overrides(
|
||||
@@ -20,12 +14,47 @@ pub async fn persist_overrides(
|
||||
profile: Option<&str>,
|
||||
overrides: &[(&[&str], &str)],
|
||||
) -> Result<()> {
|
||||
let with_options: Vec<(&[&str], Option<&str>)> = overrides
|
||||
.iter()
|
||||
.map(|(segments, value)| (*segments, Some(*value)))
|
||||
.collect();
|
||||
let config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
|
||||
persist_overrides_with_behavior(codex_home, profile, &with_options, NoneBehavior::Skip).await
|
||||
let mut doc = match tokio::fs::read_to_string(&config_path).await {
|
||||
Ok(s) => s.parse::<DocumentMut>()?,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
tokio::fs::create_dir_all(codex_home).await?;
|
||||
DocumentMut::new()
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
let effective_profile = if let Some(p) = profile {
|
||||
Some(p.to_owned())
|
||||
} else {
|
||||
doc.get("profile")
|
||||
.and_then(|i| i.as_str())
|
||||
.map(|s| s.to_string())
|
||||
};
|
||||
|
||||
for (segments, val) in overrides.iter().copied() {
|
||||
let value = toml_edit::value(val);
|
||||
if let Some(ref name) = effective_profile {
|
||||
if segments.first().copied() == Some("profiles") {
|
||||
apply_toml_edit_override_segments(&mut doc, segments, value);
|
||||
} else {
|
||||
let mut seg_buf: Vec<&str> = Vec::with_capacity(2 + segments.len());
|
||||
seg_buf.push("profiles");
|
||||
seg_buf.push(name.as_str());
|
||||
seg_buf.extend_from_slice(segments);
|
||||
apply_toml_edit_override_segments(&mut doc, &seg_buf, value);
|
||||
}
|
||||
} else {
|
||||
apply_toml_edit_override_segments(&mut doc, segments, value);
|
||||
}
|
||||
}
|
||||
|
||||
let tmp_file = NamedTempFile::new_in(codex_home)?;
|
||||
tokio::fs::write(tmp_file.path(), doc.to_string()).await?;
|
||||
tmp_file.persist(config_path)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Persist overrides where values may be optional. Any entries with `None`
|
||||
@@ -36,17 +65,16 @@ pub async fn persist_non_null_overrides(
|
||||
profile: Option<&str>,
|
||||
overrides: &[(&[&str], Option<&str>)],
|
||||
) -> Result<()> {
|
||||
persist_overrides_with_behavior(codex_home, profile, overrides, NoneBehavior::Skip).await
|
||||
}
|
||||
let filtered: Vec<(&[&str], &str)> = overrides
|
||||
.iter()
|
||||
.filter_map(|(k, v)| v.map(|vv| (*k, vv)))
|
||||
.collect();
|
||||
|
||||
/// Persist overrides where `None` values clear any existing values from the
|
||||
/// configuration file.
|
||||
pub async fn persist_overrides_and_clear_if_none(
|
||||
codex_home: &Path,
|
||||
profile: Option<&str>,
|
||||
overrides: &[(&[&str], Option<&str>)],
|
||||
) -> Result<()> {
|
||||
persist_overrides_with_behavior(codex_home, profile, overrides, NoneBehavior::Remove).await
|
||||
if filtered.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
persist_overrides(codex_home, profile, &filtered).await
|
||||
}
|
||||
|
||||
/// Apply a single override onto a `toml_edit` document while preserving
|
||||
@@ -93,125 +121,6 @@ fn apply_toml_edit_override_segments(
|
||||
current[last] = value;
|
||||
}
|
||||
|
||||
async fn persist_overrides_with_behavior(
|
||||
codex_home: &Path,
|
||||
profile: Option<&str>,
|
||||
overrides: &[(&[&str], Option<&str>)],
|
||||
none_behavior: NoneBehavior,
|
||||
) -> Result<()> {
|
||||
if overrides.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let should_skip = match none_behavior {
|
||||
NoneBehavior::Skip => overrides.iter().all(|(_, value)| value.is_none()),
|
||||
NoneBehavior::Remove => false,
|
||||
};
|
||||
|
||||
if should_skip {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
|
||||
let read_result = tokio::fs::read_to_string(&config_path).await;
|
||||
let mut doc = match read_result {
|
||||
Ok(contents) => contents.parse::<DocumentMut>()?,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
if overrides
|
||||
.iter()
|
||||
.all(|(_, value)| value.is_none() && matches!(none_behavior, NoneBehavior::Remove))
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
tokio::fs::create_dir_all(codex_home).await?;
|
||||
DocumentMut::new()
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
let effective_profile = if let Some(p) = profile {
|
||||
Some(p.to_owned())
|
||||
} else {
|
||||
doc.get("profile")
|
||||
.and_then(|i| i.as_str())
|
||||
.map(|s| s.to_string())
|
||||
};
|
||||
|
||||
let mut mutated = false;
|
||||
|
||||
for (segments, value) in overrides.iter().copied() {
|
||||
let mut seg_buf: Vec<&str> = Vec::new();
|
||||
let segments_to_apply: &[&str];
|
||||
|
||||
if let Some(ref name) = effective_profile {
|
||||
if segments.first().copied() == Some("profiles") {
|
||||
segments_to_apply = segments;
|
||||
} else {
|
||||
seg_buf.reserve(2 + segments.len());
|
||||
seg_buf.push("profiles");
|
||||
seg_buf.push(name.as_str());
|
||||
seg_buf.extend_from_slice(segments);
|
||||
segments_to_apply = seg_buf.as_slice();
|
||||
}
|
||||
} else {
|
||||
segments_to_apply = segments;
|
||||
}
|
||||
|
||||
match value {
|
||||
Some(v) => {
|
||||
let item_value = toml_edit::value(v);
|
||||
apply_toml_edit_override_segments(&mut doc, segments_to_apply, item_value);
|
||||
mutated = true;
|
||||
}
|
||||
None => {
|
||||
if matches!(none_behavior, NoneBehavior::Remove)
|
||||
&& remove_toml_edit_segments(&mut doc, segments_to_apply)
|
||||
{
|
||||
mutated = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !mutated {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let tmp_file = NamedTempFile::new_in(codex_home)?;
|
||||
tokio::fs::write(tmp_file.path(), doc.to_string()).await?;
|
||||
tmp_file.persist(config_path)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_toml_edit_segments(doc: &mut DocumentMut, segments: &[&str]) -> bool {
|
||||
use toml_edit::Item;
|
||||
|
||||
if segments.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut current = doc.as_table_mut();
|
||||
for seg in &segments[..segments.len() - 1] {
|
||||
let Some(item) = current.get_mut(seg) else {
|
||||
return false;
|
||||
};
|
||||
|
||||
match item {
|
||||
Item::Table(table) => {
|
||||
current = table;
|
||||
}
|
||||
_ => {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
current.remove(segments[segments.len() - 1]).is_some()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -665,81 +574,6 @@ model = "o3"
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn persist_clear_none_removes_top_level_value() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
let seed = r#"model = "gpt-5"
|
||||
model_reasoning_effort = "medium"
|
||||
"#;
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
persist_overrides_and_clear_if_none(
|
||||
codex_home,
|
||||
None,
|
||||
&[
|
||||
(&[CONFIG_KEY_MODEL], None),
|
||||
(&[CONFIG_KEY_EFFORT], Some("high")),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = "model_reasoning_effort = \"high\"\n";
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn persist_clear_none_respects_active_profile() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
let seed = r#"profile = "team"
|
||||
|
||||
[profiles.team]
|
||||
model = "gpt-4"
|
||||
model_reasoning_effort = "minimal"
|
||||
"#;
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
persist_overrides_and_clear_if_none(
|
||||
codex_home,
|
||||
None,
|
||||
&[
|
||||
(&[CONFIG_KEY_MODEL], None),
|
||||
(&[CONFIG_KEY_EFFORT], Some("high")),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"profile = "team"
|
||||
|
||||
[profiles.team]
|
||||
model_reasoning_effort = "high"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn persist_clear_none_noop_when_file_missing() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
persist_overrides_and_clear_if_none(codex_home, None, &[(&[CONFIG_KEY_MODEL], None)])
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
assert!(!codex_home.join(CONFIG_TOML_FILE).exists());
|
||||
}
|
||||
|
||||
// Test helper moved to bottom per review guidance.
|
||||
async fn read_config(codex_home: &Path) -> String {
|
||||
let p = codex_home.join(CONFIG_TOML_FILE);
|
||||
|
||||
@@ -76,75 +76,9 @@ pub enum HistoryPersistence {
|
||||
None,
|
||||
}
|
||||
|
||||
// ===== OTEL configuration =====
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OtelHttpProtocol {
|
||||
/// Binary payload
|
||||
Binary,
|
||||
/// JSON payload
|
||||
Json,
|
||||
}
|
||||
|
||||
/// Which OTEL exporter to use.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OtelExporterKind {
|
||||
None,
|
||||
OtlpHttp {
|
||||
endpoint: String,
|
||||
headers: HashMap<String, String>,
|
||||
protocol: OtelHttpProtocol,
|
||||
},
|
||||
OtlpGrpc {
|
||||
endpoint: String,
|
||||
headers: HashMap<String, String>,
|
||||
},
|
||||
}
|
||||
|
||||
/// OTEL settings loaded from config.toml. Fields are optional so we can apply defaults.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct OtelConfigToml {
|
||||
/// Log user prompt in traces
|
||||
pub log_user_prompt: Option<bool>,
|
||||
|
||||
/// Mark traces with environment (dev, staging, prod, test). Defaults to dev.
|
||||
pub environment: Option<String>,
|
||||
|
||||
/// Exporter to use. Defaults to `otlp-file`.
|
||||
pub exporter: Option<OtelExporterKind>,
|
||||
}
|
||||
|
||||
/// Effective OTEL settings after defaults are applied.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct OtelConfig {
|
||||
pub log_user_prompt: bool,
|
||||
pub environment: String,
|
||||
pub exporter: OtelExporterKind,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Notifications {
|
||||
Enabled(bool),
|
||||
Custom(Vec<String>),
|
||||
}
|
||||
|
||||
impl Default for Notifications {
|
||||
fn default() -> Self {
|
||||
Self::Enabled(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Collection of settings that are specific to the TUI.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct Tui {
|
||||
/// Enable desktop notifications from the TUI when the terminal is unfocused.
|
||||
/// Defaults to `false`.
|
||||
#[serde(default)]
|
||||
pub notifications: Notifications,
|
||||
}
|
||||
pub struct Tui {}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct SandboxWorkspaceWrite {
|
||||
|
||||
@@ -32,8 +32,32 @@ impl ConversationHistory {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn replace(&mut self, items: Vec<ResponseItem>) {
|
||||
self.items = items;
|
||||
pub(crate) fn keep_last_messages(&mut self, n: usize) {
|
||||
if n == 0 {
|
||||
self.items.clear();
|
||||
return;
|
||||
}
|
||||
|
||||
// Collect the last N message items (assistant/user), newest to oldest.
|
||||
let mut kept: Vec<ResponseItem> = Vec::with_capacity(n);
|
||||
for item in self.items.iter().rev() {
|
||||
if let ResponseItem::Message { role, content, .. } = item {
|
||||
kept.push(ResponseItem::Message {
|
||||
// we need to remove the id or the model will complain that messages are sent without
|
||||
// their reasonings
|
||||
id: None,
|
||||
role: role.clone(),
|
||||
content: content.clone(),
|
||||
});
|
||||
if kept.len() == n {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Preserve chronological order (oldest to newest) within the kept slice.
|
||||
kept.reverse();
|
||||
self.items = kept;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,9 +71,8 @@ fn is_api_message(message: &ResponseItem) -> bool {
|
||||
| ResponseItem::CustomToolCall { .. }
|
||||
| ResponseItem::CustomToolCallOutput { .. }
|
||||
| ResponseItem::LocalShellCall { .. }
|
||||
| ResponseItem::Reasoning { .. }
|
||||
| ResponseItem::WebSearchCall { .. } => true,
|
||||
ResponseItem::Other => false,
|
||||
| ResponseItem::Reasoning { .. } => true,
|
||||
ResponseItem::WebSearchCall { .. } | ResponseItem::Other => false,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,8 +3,6 @@ use crate::CodexAuth;
|
||||
use crate::codex::Codex;
|
||||
use crate::codex::CodexSpawnOk;
|
||||
use crate::codex::INITIAL_SUBMIT_ID;
|
||||
use crate::codex::compact::content_items_to_text;
|
||||
use crate::codex::compact::is_session_prefix_message;
|
||||
use crate::codex_conversation::CodexConversation;
|
||||
use crate::config::Config;
|
||||
use crate::error::CodexErr;
|
||||
@@ -61,11 +59,21 @@ impl ConversationManager {
|
||||
config: Config,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
) -> CodexResult<NewConversation> {
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
conversation_id,
|
||||
} = Codex::spawn(config, auth_manager, InitialHistory::New).await?;
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
// TO BE REFACTORED: use the config experimental_resume field until we have a mainstream way.
|
||||
if let Some(resume_path) = config.experimental_resume.as_ref() {
|
||||
let initial_history = RolloutRecorder::get_rollout_history(resume_path).await?;
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
conversation_id,
|
||||
} = Codex::spawn(config, auth_manager, initial_history).await?;
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
} else {
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
conversation_id,
|
||||
} = Codex::spawn(config, auth_manager, InitialHistory::New).await?;
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn finalize_spawn(
|
||||
@@ -136,19 +144,19 @@ impl ConversationManager {
|
||||
self.conversations.write().await.remove(conversation_id)
|
||||
}
|
||||
|
||||
/// Fork an existing conversation by taking messages up to the given position
|
||||
/// (not including the message at the given position) and starting a new
|
||||
/// Fork an existing conversation by dropping the last `drop_last_messages`
|
||||
/// user/assistant messages from its transcript and starting a new
|
||||
/// conversation with identical configuration (unless overridden by the
|
||||
/// caller's `config`). The new conversation will have a fresh id.
|
||||
pub async fn fork_conversation(
|
||||
&self,
|
||||
nth_user_message: usize,
|
||||
num_messages_to_drop: usize,
|
||||
config: Config,
|
||||
path: PathBuf,
|
||||
) -> CodexResult<NewConversation> {
|
||||
// Compute the prefix up to the cut point.
|
||||
let history = RolloutRecorder::get_rollout_history(&path).await?;
|
||||
let history = truncate_before_nth_user_message(history, nth_user_message);
|
||||
let history = truncate_after_dropping_last_messages(history, num_messages_to_drop);
|
||||
|
||||
// Spawn a new conversation with the computed initial history.
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
@@ -161,30 +169,33 @@ impl ConversationManager {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a prefix of `items` obtained by cutting strictly before the nth user message
|
||||
/// (0-based) and all items that follow it.
|
||||
fn truncate_before_nth_user_message(history: InitialHistory, n: usize) -> InitialHistory {
|
||||
// Work directly on rollout items, and cut the vector at the nth user message input.
|
||||
/// Return a prefix of `items` obtained by dropping the last `n` user messages
|
||||
/// and all items that follow them.
|
||||
fn truncate_after_dropping_last_messages(history: InitialHistory, n: usize) -> InitialHistory {
|
||||
if n == 0 {
|
||||
return InitialHistory::Forked(history.get_rollout_items());
|
||||
}
|
||||
|
||||
// Work directly on rollout items, and cut the vector at the nth-from-last user message input.
|
||||
let items: Vec<RolloutItem> = history.get_rollout_items();
|
||||
|
||||
// Find indices of user message inputs in rollout order.
|
||||
let mut user_positions: Vec<usize> = Vec::new();
|
||||
for (idx, item) in items.iter().enumerate() {
|
||||
if let RolloutItem::ResponseItem(ResponseItem::Message { role, content, .. }) = item
|
||||
if let RolloutItem::ResponseItem(ResponseItem::Message { role, .. }) = item
|
||||
&& role == "user"
|
||||
&& content_items_to_text(content).is_some_and(|text| !is_session_prefix_message(&text))
|
||||
{
|
||||
user_positions.push(idx);
|
||||
}
|
||||
}
|
||||
|
||||
// If fewer than or equal to n user messages exist, treat as empty (out of range).
|
||||
if user_positions.len() <= n {
|
||||
// If fewer than n user messages exist, treat as empty.
|
||||
if user_positions.len() < n {
|
||||
return InitialHistory::New;
|
||||
}
|
||||
|
||||
// Cut strictly before the nth user message (do not keep the nth itself).
|
||||
let cut_idx = user_positions[n];
|
||||
// Cut strictly before the nth-from-last user message (do not keep the nth itself).
|
||||
let cut_idx = user_positions[user_positions.len() - n];
|
||||
let rolled: Vec<RolloutItem> = items.into_iter().take(cut_idx).collect();
|
||||
|
||||
if rolled.is_empty() {
|
||||
@@ -197,11 +208,9 @@ fn truncate_before_nth_user_message(history: InitialHistory, n: usize) -> Initia
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::codex::make_session_and_context;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ReasoningItemReasoningSummary;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
fn user_msg(text: &str) -> ResponseItem {
|
||||
ResponseItem::Message {
|
||||
@@ -253,7 +262,7 @@ mod tests {
|
||||
.cloned()
|
||||
.map(RolloutItem::ResponseItem)
|
||||
.collect();
|
||||
let truncated = truncate_before_nth_user_message(InitialHistory::Forked(initial), 1);
|
||||
let truncated = truncate_after_dropping_last_messages(InitialHistory::Forked(initial), 1);
|
||||
let got_items = truncated.get_rollout_items();
|
||||
let expected_items = vec![
|
||||
RolloutItem::ResponseItem(items[0].clone()),
|
||||
@@ -270,37 +279,7 @@ mod tests {
|
||||
.cloned()
|
||||
.map(RolloutItem::ResponseItem)
|
||||
.collect();
|
||||
let truncated2 = truncate_before_nth_user_message(InitialHistory::Forked(initial2), 2);
|
||||
let truncated2 = truncate_after_dropping_last_messages(InitialHistory::Forked(initial2), 2);
|
||||
assert!(matches!(truncated2, InitialHistory::New));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignores_session_prefix_messages_when_truncating() {
|
||||
let (session, turn_context) = make_session_and_context();
|
||||
let mut items = session.build_initial_context(&turn_context);
|
||||
items.push(user_msg("feature request"));
|
||||
items.push(assistant_msg("ack"));
|
||||
items.push(user_msg("second question"));
|
||||
items.push(assistant_msg("answer"));
|
||||
|
||||
let rollout_items: Vec<RolloutItem> = items
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(RolloutItem::ResponseItem)
|
||||
.collect();
|
||||
|
||||
let truncated = truncate_before_nth_user_message(InitialHistory::Forked(rollout_items), 1);
|
||||
let got_items = truncated.get_rollout_items();
|
||||
|
||||
let expected: Vec<RolloutItem> = vec![
|
||||
RolloutItem::ResponseItem(items[0].clone()),
|
||||
RolloutItem::ResponseItem(items[1].clone()),
|
||||
RolloutItem::ResponseItem(items[2].clone()),
|
||||
];
|
||||
|
||||
assert_eq!(
|
||||
serde_json::to_value(&got_items).unwrap(),
|
||||
serde_json::to_value(&expected).unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use strum_macros::Display as DeriveDisplay;
|
||||
|
||||
use crate::codex::TurnContext;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::shell::Shell;
|
||||
@@ -64,7 +63,7 @@ impl EnvironmentContext {
|
||||
if writable_roots.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(writable_roots)
|
||||
Some(writable_roots.clone())
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
@@ -72,39 +71,6 @@ impl EnvironmentContext {
|
||||
shell,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compares two environment contexts, ignoring the shell. Useful when
|
||||
/// comparing turn to turn, since the initial environment_context will
|
||||
/// include the shell, and then it is not configurable from turn to turn.
|
||||
pub fn equals_except_shell(&self, other: &EnvironmentContext) -> bool {
|
||||
let EnvironmentContext {
|
||||
cwd,
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
network_access,
|
||||
writable_roots,
|
||||
// should compare all fields except shell
|
||||
shell: _,
|
||||
} = other;
|
||||
|
||||
self.cwd == *cwd
|
||||
&& self.approval_policy == *approval_policy
|
||||
&& self.sandbox_mode == *sandbox_mode
|
||||
&& self.network_access == *network_access
|
||||
&& self.writable_roots == *writable_roots
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&TurnContext> for EnvironmentContext {
|
||||
fn from(turn_context: &TurnContext) -> Self {
|
||||
Self::new(
|
||||
Some(turn_context.cwd.clone()),
|
||||
Some(turn_context.approval_policy),
|
||||
Some(turn_context.sandbox_policy.clone()),
|
||||
// Shell is not configurable from turn to turn
|
||||
None,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl EnvironmentContext {
|
||||
@@ -174,9 +140,6 @@ impl From<EnvironmentContext> for ResponseItem {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::shell::BashShell;
|
||||
use crate::shell::ZshShell;
|
||||
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
@@ -247,82 +210,4 @@ mod tests {
|
||||
|
||||
assert_eq!(context.serialize_to_xml(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn equals_except_shell_compares_approval_policy() {
|
||||
// Approval policy
|
||||
let context1 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo"], false)),
|
||||
None,
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::Never),
|
||||
Some(workspace_write_policy(vec!["/repo"], true)),
|
||||
None,
|
||||
);
|
||||
assert!(!context1.equals_except_shell(&context2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn equals_except_shell_compares_sandbox_policy() {
|
||||
let context1 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::new_read_only_policy()),
|
||||
None,
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::new_workspace_write_policy()),
|
||||
None,
|
||||
);
|
||||
|
||||
assert!(!context1.equals_except_shell(&context2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn equals_except_shell_compares_workspace_write_policy() {
|
||||
let context1 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo", "/tmp", "/var"], false)),
|
||||
None,
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo", "/tmp"], true)),
|
||||
None,
|
||||
);
|
||||
|
||||
assert!(!context1.equals_except_shell(&context2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn equals_except_shell_ignores_shell() {
|
||||
let context1 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo"], false)),
|
||||
Some(Shell::Bash(BashShell {
|
||||
shell_path: "/bin/bash".into(),
|
||||
bashrc_path: "/home/user/.bashrc".into(),
|
||||
})),
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo"], false)),
|
||||
Some(Shell::Zsh(ZshShell {
|
||||
shell_path: "/bin/zsh".into(),
|
||||
zshrc_path: "/home/user/.zshrc".into(),
|
||||
})),
|
||||
);
|
||||
|
||||
assert!(context1.equals_except_shell(&context2));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::token_data::KnownPlan;
|
||||
use crate::token_data::PlanType;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use reqwest::StatusCode;
|
||||
use serde_json;
|
||||
@@ -14,11 +11,8 @@ pub type Result<T> = std::result::Result<T, CodexErr>;
|
||||
#[derive(Error, Debug)]
|
||||
pub enum SandboxErr {
|
||||
/// Error from sandbox execution
|
||||
#[error(
|
||||
"sandbox denied exec error, exit code: {}, stdout: {}, stderr: {}",
|
||||
.output.exit_code, .output.stdout.text, .output.stderr.text
|
||||
)]
|
||||
Denied { output: Box<ExecToolCallOutput> },
|
||||
#[error("sandbox denied exec error, exit code: {0}, stdout: {1}, stderr: {2}")]
|
||||
Denied(i32, String, String),
|
||||
|
||||
/// Error from linux seccomp filter setup
|
||||
#[cfg(target_os = "linux")]
|
||||
@@ -32,7 +26,7 @@ pub enum SandboxErr {
|
||||
|
||||
/// Command timed out
|
||||
#[error("command timed out")]
|
||||
Timeout { output: Box<ExecToolCallOutput> },
|
||||
Timeout,
|
||||
|
||||
/// Command was killed by a signal
|
||||
#[error("command was killed by a signal")]
|
||||
@@ -133,58 +127,38 @@ pub enum CodexErr {
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct UsageLimitReachedError {
|
||||
pub(crate) plan_type: Option<PlanType>,
|
||||
pub(crate) resets_in_seconds: Option<u64>,
|
||||
pub plan_type: Option<String>,
|
||||
pub resets_in_seconds: Option<u64>,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for UsageLimitReachedError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let message = match self.plan_type.as_ref() {
|
||||
Some(PlanType::Known(KnownPlan::Plus)) => format!(
|
||||
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing){}",
|
||||
retry_suffix_after_or(self.resets_in_seconds)
|
||||
),
|
||||
Some(PlanType::Known(KnownPlan::Team)) | Some(PlanType::Known(KnownPlan::Business)) => {
|
||||
format!(
|
||||
"You've hit your usage limit. To get more access now, send a request to your admin{}",
|
||||
retry_suffix_after_or(self.resets_in_seconds)
|
||||
)
|
||||
// Base message differs slightly for legacy ChatGPT Plus plan users.
|
||||
if let Some(plan_type) = &self.plan_type
|
||||
&& plan_type == "plus"
|
||||
{
|
||||
write!(
|
||||
f,
|
||||
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing) or try again"
|
||||
)?;
|
||||
if let Some(secs) = self.resets_in_seconds {
|
||||
let reset_duration = format_reset_duration(secs);
|
||||
write!(f, " in {reset_duration}.")?;
|
||||
} else {
|
||||
write!(f, " later.")?;
|
||||
}
|
||||
Some(PlanType::Known(KnownPlan::Free)) => {
|
||||
"To use Codex with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing."
|
||||
.to_string()
|
||||
} else {
|
||||
write!(f, "You've hit your usage limit.")?;
|
||||
|
||||
if let Some(secs) = self.resets_in_seconds {
|
||||
let reset_duration = format_reset_duration(secs);
|
||||
write!(f, " Try again in {reset_duration}.")?;
|
||||
} else {
|
||||
write!(f, " Try again later.")?;
|
||||
}
|
||||
Some(PlanType::Known(KnownPlan::Pro))
|
||||
| Some(PlanType::Known(KnownPlan::Enterprise))
|
||||
| Some(PlanType::Known(KnownPlan::Edu)) => format!(
|
||||
"You've hit your usage limit.{}",
|
||||
retry_suffix(self.resets_in_seconds)
|
||||
),
|
||||
Some(PlanType::Unknown(_)) | None => format!(
|
||||
"You've hit your usage limit.{}",
|
||||
retry_suffix(self.resets_in_seconds)
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
write!(f, "{message}")
|
||||
}
|
||||
}
|
||||
|
||||
fn retry_suffix(resets_in_seconds: Option<u64>) -> String {
|
||||
if let Some(secs) = resets_in_seconds {
|
||||
let reset_duration = format_reset_duration(secs);
|
||||
format!(" Try again in {reset_duration}.")
|
||||
} else {
|
||||
" Try again later.".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
fn retry_suffix_after_or(resets_in_seconds: Option<u64>) -> String {
|
||||
if let Some(secs) = resets_in_seconds {
|
||||
let reset_duration = format_reset_duration(secs);
|
||||
format!(" or try again in {reset_duration}.")
|
||||
} else {
|
||||
" or try again later.".to_string()
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -249,12 +223,9 @@ impl CodexErr {
|
||||
|
||||
pub fn get_error_message_ui(e: &CodexErr) -> String {
|
||||
match e {
|
||||
CodexErr::Sandbox(SandboxErr::Denied { output }) => output.stderr.text.clone(),
|
||||
CodexErr::Sandbox(SandboxErr::Denied(_, _, stderr)) => stderr.to_string(),
|
||||
// Timeouts are not sandbox errors from a UX perspective; present them plainly
|
||||
CodexErr::Sandbox(SandboxErr::Timeout { output }) => format!(
|
||||
"error: command timed out after {} ms",
|
||||
output.duration.as_millis()
|
||||
),
|
||||
CodexErr::Sandbox(SandboxErr::Timeout) => "error: command timed out".to_string(),
|
||||
_ => e.to_string(),
|
||||
}
|
||||
}
|
||||
@@ -266,7 +237,7 @@ mod tests {
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_plus_plan() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Plus)),
|
||||
plan_type: Some("plus".to_string()),
|
||||
resets_in_seconds: None,
|
||||
};
|
||||
assert_eq!(
|
||||
@@ -275,18 +246,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_free_plan() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Free)),
|
||||
resets_in_seconds: Some(3600),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"To use Codex with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing."
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_default_when_none() {
|
||||
let err = UsageLimitReachedError {
|
||||
@@ -299,34 +258,10 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_team_plan() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Team)),
|
||||
resets_in_seconds: Some(3600),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"You've hit your usage limit. To get more access now, send a request to your admin or try again in 1 hour."
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_business_plan_without_reset() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Business)),
|
||||
resets_in_seconds: None,
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"You've hit your usage limit. To get more access now, send a request to your admin or try again later."
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_default_for_other_plans() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Pro)),
|
||||
plan_type: Some("pro".to_string()),
|
||||
resets_in_seconds: None,
|
||||
};
|
||||
assert_eq!(
|
||||
@@ -350,7 +285,7 @@ mod tests {
|
||||
#[test]
|
||||
fn usage_limit_reached_includes_hours_and_minutes() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Plus)),
|
||||
plan_type: Some("plus".to_string()),
|
||||
resets_in_seconds: Some(3 * 3600 + 32 * 60),
|
||||
};
|
||||
assert_eq!(
|
||||
|
||||
@@ -159,7 +159,7 @@ mod tests {
|
||||
EventMsg::UserMessage(user) => {
|
||||
assert_eq!(user.message, "Hello world");
|
||||
assert!(matches!(user.kind, Some(InputMessageKind::Plain)));
|
||||
assert_eq!(user.images, Some(vec![img1, img2]));
|
||||
assert_eq!(user.images, Some(vec![img1.clone(), img2.clone()]));
|
||||
}
|
||||
other => panic!("expected UserMessage, got {other:?}"),
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ use std::os::unix::process::ExitStatusExt;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::process::ExitStatus;
|
||||
use std::time::Duration;
|
||||
@@ -35,7 +34,6 @@ const DEFAULT_TIMEOUT_MS: u64 = 10_000;
|
||||
const SIGKILL_CODE: i32 = 9;
|
||||
const TIMEOUT_CODE: i32 = 64;
|
||||
const EXIT_CODE_SIGNAL_BASE: i32 = 128; // conventional shell: 128 + signal
|
||||
const EXEC_TIMEOUT_EXIT_CODE: i32 = 124; // conventional timeout exit code
|
||||
|
||||
// I/O buffer sizing
|
||||
const READ_CHUNK_SIZE: usize = 8192; // bytes per read
|
||||
@@ -45,7 +43,7 @@ const AGGREGATE_BUFFER_INITIAL_CAPACITY: usize = 8 * 1024; // 8 KiB
|
||||
/// Aggregation still collects full output; only the live event stream is capped.
|
||||
pub(crate) const MAX_EXEC_OUTPUT_DELTAS_PER_CALL: usize = 10_000;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ExecParams {
|
||||
pub command: Vec<String>,
|
||||
pub cwd: PathBuf,
|
||||
@@ -83,41 +81,33 @@ pub async fn process_exec_tool_call(
|
||||
params: ExecParams,
|
||||
sandbox_type: SandboxType,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_cwd: &Path,
|
||||
codex_linux_sandbox_exe: &Option<PathBuf>,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
) -> Result<ExecToolCallOutput> {
|
||||
let start = Instant::now();
|
||||
|
||||
let timeout_duration = params.timeout_duration();
|
||||
|
||||
let raw_output_result: std::result::Result<RawExecToolCallOutput, CodexErr> = match sandbox_type
|
||||
{
|
||||
SandboxType::None => exec(params, sandbox_policy, stdout_stream.clone()).await,
|
||||
SandboxType::MacosSeatbelt => {
|
||||
let timeout = params.timeout_duration();
|
||||
let ExecParams {
|
||||
command,
|
||||
cwd: command_cwd,
|
||||
env,
|
||||
..
|
||||
command, cwd, env, ..
|
||||
} = params;
|
||||
let child = spawn_command_under_seatbelt(
|
||||
command,
|
||||
command_cwd,
|
||||
sandbox_policy,
|
||||
sandbox_cwd,
|
||||
cwd,
|
||||
StdioPolicy::RedirectForShellTool,
|
||||
env,
|
||||
)
|
||||
.await?;
|
||||
consume_truncated_output(child, timeout_duration, stdout_stream.clone()).await
|
||||
consume_truncated_output(child, timeout, stdout_stream.clone()).await
|
||||
}
|
||||
SandboxType::LinuxSeccomp => {
|
||||
let timeout = params.timeout_duration();
|
||||
let ExecParams {
|
||||
command,
|
||||
cwd: command_cwd,
|
||||
env,
|
||||
..
|
||||
command, cwd, env, ..
|
||||
} = params;
|
||||
|
||||
let codex_linux_sandbox_exe = codex_linux_sandbox_exe
|
||||
@@ -126,64 +116,48 @@ pub async fn process_exec_tool_call(
|
||||
let child = spawn_command_under_linux_sandbox(
|
||||
codex_linux_sandbox_exe,
|
||||
command,
|
||||
command_cwd,
|
||||
sandbox_policy,
|
||||
sandbox_cwd,
|
||||
cwd,
|
||||
StdioPolicy::RedirectForShellTool,
|
||||
env,
|
||||
)
|
||||
.await?;
|
||||
|
||||
consume_truncated_output(child, timeout_duration, stdout_stream).await
|
||||
consume_truncated_output(child, timeout, stdout_stream).await
|
||||
}
|
||||
};
|
||||
let duration = start.elapsed();
|
||||
match raw_output_result {
|
||||
Ok(raw_output) => {
|
||||
#[allow(unused_mut)]
|
||||
let mut timed_out = raw_output.timed_out;
|
||||
|
||||
#[cfg(target_family = "unix")]
|
||||
{
|
||||
if let Some(signal) = raw_output.exit_status.signal() {
|
||||
if signal == TIMEOUT_CODE {
|
||||
timed_out = true;
|
||||
} else {
|
||||
return Err(CodexErr::Sandbox(SandboxErr::Signal(signal)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut exit_code = raw_output.exit_status.code().unwrap_or(-1);
|
||||
if timed_out {
|
||||
exit_code = EXEC_TIMEOUT_EXIT_CODE;
|
||||
}
|
||||
|
||||
let stdout = raw_output.stdout.from_utf8_lossy();
|
||||
let stderr = raw_output.stderr.from_utf8_lossy();
|
||||
let aggregated_output = raw_output.aggregated_output.from_utf8_lossy();
|
||||
let exec_output = ExecToolCallOutput {
|
||||
|
||||
#[cfg(target_family = "unix")]
|
||||
match raw_output.exit_status.signal() {
|
||||
Some(TIMEOUT_CODE) => return Err(CodexErr::Sandbox(SandboxErr::Timeout)),
|
||||
Some(signal) => {
|
||||
return Err(CodexErr::Sandbox(SandboxErr::Signal(signal)));
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
|
||||
let exit_code = raw_output.exit_status.code().unwrap_or(-1);
|
||||
|
||||
if exit_code != 0 && is_likely_sandbox_denied(sandbox_type, exit_code) {
|
||||
return Err(CodexErr::Sandbox(SandboxErr::Denied(
|
||||
exit_code,
|
||||
stdout.text,
|
||||
stderr.text,
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(ExecToolCallOutput {
|
||||
exit_code,
|
||||
stdout,
|
||||
stderr,
|
||||
aggregated_output,
|
||||
aggregated_output: raw_output.aggregated_output.from_utf8_lossy(),
|
||||
duration,
|
||||
timed_out,
|
||||
};
|
||||
|
||||
if timed_out {
|
||||
return Err(CodexErr::Sandbox(SandboxErr::Timeout {
|
||||
output: Box::new(exec_output),
|
||||
}));
|
||||
}
|
||||
|
||||
if exit_code != 0 && is_likely_sandbox_denied(sandbox_type, exit_code) {
|
||||
return Err(CodexErr::Sandbox(SandboxErr::Denied {
|
||||
output: Box::new(exec_output),
|
||||
}));
|
||||
}
|
||||
|
||||
Ok(exec_output)
|
||||
})
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("exec error: {err}");
|
||||
@@ -223,7 +197,6 @@ struct RawExecToolCallOutput {
|
||||
pub stdout: StreamOutput<Vec<u8>>,
|
||||
pub stderr: StreamOutput<Vec<u8>>,
|
||||
pub aggregated_output: StreamOutput<Vec<u8>>,
|
||||
pub timed_out: bool,
|
||||
}
|
||||
|
||||
impl StreamOutput<String> {
|
||||
@@ -256,7 +229,6 @@ pub struct ExecToolCallOutput {
|
||||
pub stderr: StreamOutput<String>,
|
||||
pub aggregated_output: StreamOutput<String>,
|
||||
pub duration: Duration,
|
||||
pub timed_out: bool,
|
||||
}
|
||||
|
||||
async fn exec(
|
||||
@@ -326,24 +298,22 @@ async fn consume_truncated_output(
|
||||
Some(agg_tx.clone()),
|
||||
));
|
||||
|
||||
let (exit_status, timed_out) = tokio::select! {
|
||||
let exit_status = tokio::select! {
|
||||
result = tokio::time::timeout(timeout, child.wait()) => {
|
||||
match result {
|
||||
Ok(status_result) => {
|
||||
let exit_status = status_result?;
|
||||
(exit_status, false)
|
||||
}
|
||||
Ok(Ok(exit_status)) => exit_status,
|
||||
Ok(e) => e?,
|
||||
Err(_) => {
|
||||
// timeout
|
||||
child.start_kill()?;
|
||||
// Debatable whether `child.wait().await` should be called here.
|
||||
(synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + TIMEOUT_CODE), true)
|
||||
synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + TIMEOUT_CODE)
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
child.start_kill()?;
|
||||
(synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + SIGKILL_CODE), false)
|
||||
synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + SIGKILL_CODE)
|
||||
}
|
||||
};
|
||||
|
||||
@@ -366,7 +336,6 @@ async fn consume_truncated_output(
|
||||
stdout,
|
||||
stderr,
|
||||
aggregated_output,
|
||||
timed_out,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -38,20 +38,16 @@ impl ExecCommandSession {
|
||||
writer_handle: JoinHandle<()>,
|
||||
wait_handle: JoinHandle<()>,
|
||||
exit_status: std::sync::Arc<std::sync::atomic::AtomicBool>,
|
||||
) -> (Self, broadcast::Receiver<Vec<u8>>) {
|
||||
let initial_output_rx = output_tx.subscribe();
|
||||
(
|
||||
Self {
|
||||
writer_tx,
|
||||
output_tx,
|
||||
killer: StdMutex::new(Some(killer)),
|
||||
reader_handle: StdMutex::new(Some(reader_handle)),
|
||||
writer_handle: StdMutex::new(Some(writer_handle)),
|
||||
wait_handle: StdMutex::new(Some(wait_handle)),
|
||||
exit_status,
|
||||
},
|
||||
initial_output_rx,
|
||||
)
|
||||
) -> Self {
|
||||
Self {
|
||||
writer_tx,
|
||||
output_tx,
|
||||
killer: StdMutex::new(Some(killer)),
|
||||
reader_handle: StdMutex::new(Some(reader_handle)),
|
||||
writer_handle: StdMutex::new(Some(writer_handle)),
|
||||
wait_handle: StdMutex::new(Some(wait_handle)),
|
||||
exit_status,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn writer_sender(&self) -> mpsc::Sender<Vec<u8>> {
|
||||
|
||||
@@ -93,16 +93,18 @@ impl SessionManager {
|
||||
.fetch_add(1, std::sync::atomic::Ordering::SeqCst),
|
||||
);
|
||||
|
||||
let (session, mut output_rx, mut exit_rx) = create_exec_command_session(params.clone())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
format!(
|
||||
"failed to create exec command session for session id {}: {err}",
|
||||
session_id.0
|
||||
)
|
||||
})?;
|
||||
let (session, mut exit_rx) =
|
||||
create_exec_command_session(params.clone())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
format!(
|
||||
"failed to create exec command session for session id {}: {err}",
|
||||
session_id.0
|
||||
)
|
||||
})?;
|
||||
|
||||
// Insert into session map.
|
||||
let mut output_rx = session.output_receiver();
|
||||
self.sessions.lock().await.insert(session_id, session);
|
||||
|
||||
// Collect output until either timeout expires or process exits.
|
||||
@@ -243,11 +245,7 @@ impl SessionManager {
|
||||
/// Spawn PTY and child process per spawn_exec_command_session logic.
|
||||
async fn create_exec_command_session(
|
||||
params: ExecCommandParams,
|
||||
) -> anyhow::Result<(
|
||||
ExecCommandSession,
|
||||
tokio::sync::broadcast::Receiver<Vec<u8>>,
|
||||
oneshot::Receiver<i32>,
|
||||
)> {
|
||||
) -> anyhow::Result<(ExecCommandSession, oneshot::Receiver<i32>)> {
|
||||
let ExecCommandParams {
|
||||
cmd,
|
||||
yield_time_ms: _,
|
||||
@@ -281,6 +279,7 @@ async fn create_exec_command_session(
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel::<Vec<u8>>(128);
|
||||
// Broadcast for streaming PTY output to readers: subscribers receive from subscription time.
|
||||
let (output_tx, _) = tokio::sync::broadcast::channel::<Vec<u8>>(256);
|
||||
|
||||
// Reader task: drain PTY and forward chunks to output channel.
|
||||
let mut reader = pair.master.try_clone_reader()?;
|
||||
let output_tx_clone = output_tx.clone();
|
||||
@@ -342,7 +341,7 @@ async fn create_exec_command_session(
|
||||
});
|
||||
|
||||
// Create and store the session with channels.
|
||||
let (session, initial_output_rx) = ExecCommandSession::new(
|
||||
let session = ExecCommandSession::new(
|
||||
writer_tx,
|
||||
output_tx,
|
||||
killer,
|
||||
@@ -351,7 +350,7 @@ async fn create_exec_command_session(
|
||||
wait_handle,
|
||||
exit_status,
|
||||
);
|
||||
Ok((session, initial_output_rx, exit_rx))
|
||||
Ok((session, exit_rx))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -802,7 +802,7 @@ mod tests {
|
||||
async fn resolve_root_git_project_for_trust_regular_repo_returns_repo_root() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let repo_path = create_test_git_repo(&temp_dir).await;
|
||||
let expected = std::fs::canonicalize(&repo_path).unwrap();
|
||||
let expected = std::fs::canonicalize(&repo_path).unwrap().to_path_buf();
|
||||
|
||||
assert_eq!(
|
||||
resolve_root_git_project_for_trust(&repo_path),
|
||||
@@ -810,7 +810,10 @@ mod tests {
|
||||
);
|
||||
let nested = repo_path.join("sub/dir");
|
||||
std::fs::create_dir_all(&nested).unwrap();
|
||||
assert_eq!(resolve_root_git_project_for_trust(&nested), Some(expected));
|
||||
assert_eq!(
|
||||
resolve_root_git_project_for_trust(&nested),
|
||||
Some(expected.clone())
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::io::ErrorKind;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub(crate) const INTERNAL_STORAGE_FILE: &str = "internal_storage.json";
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct InternalStorage {
|
||||
#[serde(skip)]
|
||||
storage_path: PathBuf,
|
||||
#[serde(default)]
|
||||
pub gpt_5_codex_model_prompt_seen: bool,
|
||||
}
|
||||
|
||||
// TODO(jif) generalise all the file writers and build proper async channel inserters.
|
||||
impl InternalStorage {
|
||||
pub fn load(codex_home: &Path) -> Self {
|
||||
let storage_path = codex_home.join(INTERNAL_STORAGE_FILE);
|
||||
|
||||
match std::fs::read_to_string(&storage_path) {
|
||||
Ok(serialized) => match serde_json::from_str::<Self>(&serialized) {
|
||||
Ok(mut storage) => {
|
||||
storage.storage_path = storage_path;
|
||||
storage
|
||||
}
|
||||
Err(error) => {
|
||||
tracing::warn!("failed to parse internal storage: {error:?}");
|
||||
Self::empty(storage_path)
|
||||
}
|
||||
},
|
||||
Err(error) => {
|
||||
if error.kind() == ErrorKind::NotFound {
|
||||
tracing::debug!(
|
||||
"internal storage not found at {}; initializing defaults",
|
||||
storage_path.display()
|
||||
);
|
||||
} else {
|
||||
tracing::warn!("failed to read internal storage: {error:?}");
|
||||
}
|
||||
Self::empty(storage_path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn empty(storage_path: PathBuf) -> Self {
|
||||
Self {
|
||||
storage_path,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn persist(&self) -> anyhow::Result<()> {
|
||||
let serialized = serde_json::to_string_pretty(self)?;
|
||||
|
||||
if let Some(parent) = self.storage_path.parent() {
|
||||
tokio::fs::create_dir_all(parent).await.with_context(|| {
|
||||
format!(
|
||||
"failed to create internal storage directory at {}",
|
||||
parent.display()
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
tokio::fs::write(&self.storage_path, serialized)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to persist internal storage at {}",
|
||||
self.storage_path.display()
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -16,22 +16,21 @@ use tokio::process::Child;
|
||||
pub async fn spawn_command_under_linux_sandbox<P>(
|
||||
codex_linux_sandbox_exe: P,
|
||||
command: Vec<String>,
|
||||
command_cwd: PathBuf,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_policy_cwd: &Path,
|
||||
cwd: PathBuf,
|
||||
stdio_policy: StdioPolicy,
|
||||
env: HashMap<String, String>,
|
||||
) -> std::io::Result<Child>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let args = create_linux_sandbox_command_args(command, sandbox_policy, sandbox_policy_cwd);
|
||||
let args = create_linux_sandbox_command_args(command, sandbox_policy, &cwd);
|
||||
let arg0 = Some("codex-linux-sandbox");
|
||||
spawn_child_async(
|
||||
codex_linux_sandbox_exe.as_ref().to_path_buf(),
|
||||
args,
|
||||
arg0,
|
||||
command_cwd,
|
||||
cwd,
|
||||
sandbox_policy,
|
||||
stdio_policy,
|
||||
env,
|
||||
@@ -43,13 +42,10 @@ where
|
||||
fn create_linux_sandbox_command_args(
|
||||
command: Vec<String>,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_policy_cwd: &Path,
|
||||
cwd: &Path,
|
||||
) -> Vec<String> {
|
||||
#[expect(clippy::expect_used)]
|
||||
let sandbox_policy_cwd = sandbox_policy_cwd
|
||||
.to_str()
|
||||
.expect("cwd must be valid UTF-8")
|
||||
.to_string();
|
||||
let sandbox_policy_cwd = cwd.to_str().expect("cwd must be valid UTF-8").to_string();
|
||||
|
||||
#[expect(clippy::expect_used)]
|
||||
let sandbox_policy_json =
|
||||
|
||||
@@ -28,7 +28,6 @@ mod exec_command;
|
||||
pub mod exec_env;
|
||||
mod flags;
|
||||
pub mod git_info;
|
||||
pub mod internal_storage;
|
||||
mod is_safe_command;
|
||||
pub mod landlock;
|
||||
mod mcp_connection_manager;
|
||||
@@ -46,7 +45,6 @@ pub use model_provider_info::built_in_model_providers;
|
||||
pub use model_provider_info::create_oss_provider_with_base_url;
|
||||
mod conversation_manager;
|
||||
mod event_mapping;
|
||||
pub mod review_format;
|
||||
pub use codex_protocol::protocol::InitialHistory;
|
||||
pub use conversation_manager::ConversationManager;
|
||||
pub use conversation_manager::NewConversation;
|
||||
@@ -71,13 +69,11 @@ pub use rollout::ARCHIVED_SESSIONS_SUBDIR;
|
||||
pub use rollout::RolloutRecorder;
|
||||
pub use rollout::SESSIONS_SUBDIR;
|
||||
pub use rollout::SessionMeta;
|
||||
pub use rollout::find_conversation_path_by_id_str;
|
||||
pub use rollout::list::ConversationItem;
|
||||
pub use rollout::list::ConversationsPage;
|
||||
pub use rollout::list::Cursor;
|
||||
mod user_notification;
|
||||
pub mod util;
|
||||
|
||||
pub use apply_patch::CODEX_APPLY_PATCH_ARG1;
|
||||
pub use safety::get_platform_sandbox;
|
||||
// Re-export the protocol types from the standalone `codex-protocol` crate so existing
|
||||
@@ -89,16 +85,11 @@ pub use codex_protocol::config_types as protocol_config_types;
|
||||
|
||||
pub use client::ModelClient;
|
||||
pub use client_common::Prompt;
|
||||
pub use client_common::REVIEW_PROMPT;
|
||||
pub use client_common::ResponseEvent;
|
||||
pub use client_common::ResponseStream;
|
||||
pub use codex::compact::content_items_to_text;
|
||||
pub use codex::compact::is_session_prefix_message;
|
||||
pub use codex_protocol::models::ContentItem;
|
||||
pub use codex_protocol::models::LocalShellAction;
|
||||
pub use codex_protocol::models::LocalShellExecAction;
|
||||
pub use codex_protocol::models::LocalShellStatus;
|
||||
pub use codex_protocol::models::ReasoningItemContent;
|
||||
pub use codex_protocol::models::ResponseItem;
|
||||
|
||||
pub mod otel_init;
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
use crate::config_types::ReasoningSummaryFormat;
|
||||
use crate::tool_apply_patch::ApplyPatchToolType;
|
||||
|
||||
/// The `instructions` field in the payload sent to a model should always start
|
||||
/// with this content.
|
||||
const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md");
|
||||
const GPT_5_CODEX_INSTRUCTIONS: &str = include_str!("../gpt_5_codex_prompt.md");
|
||||
|
||||
/// A model family is a group of models that share certain characteristics.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct ModelFamily {
|
||||
@@ -38,9 +33,6 @@ pub struct ModelFamily {
|
||||
/// Present if the model performs better when `apply_patch` is provided as
|
||||
/// a tool call instead of just a bash command
|
||||
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
|
||||
|
||||
// Instructions to use for querying the model
|
||||
pub base_instructions: String,
|
||||
}
|
||||
|
||||
macro_rules! model_family {
|
||||
@@ -56,7 +48,6 @@ macro_rules! model_family {
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
uses_local_shell_tool: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
||||
};
|
||||
// apply overrides
|
||||
$(
|
||||
@@ -66,6 +57,22 @@ macro_rules! model_family {
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! simple_model_family {
|
||||
(
|
||||
$slug:expr, $family:expr
|
||||
) => {{
|
||||
Some(ModelFamily {
|
||||
slug: $slug.to_string(),
|
||||
family: $family.to_string(),
|
||||
needs_special_apply_patch_instructions: false,
|
||||
supports_reasoning_summaries: false,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
uses_local_shell_tool: false,
|
||||
apply_patch_tool_type: None,
|
||||
})
|
||||
}};
|
||||
}
|
||||
|
||||
/// Returns a `ModelFamily` for the given model slug, or `None` if the slug
|
||||
/// does not match any known model family.
|
||||
pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
@@ -73,20 +80,23 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
model_family!(
|
||||
slug, "o3",
|
||||
supports_reasoning_summaries: true,
|
||||
needs_special_apply_patch_instructions: true,
|
||||
)
|
||||
} else if slug.starts_with("o4-mini") {
|
||||
model_family!(
|
||||
slug, "o4-mini",
|
||||
supports_reasoning_summaries: true,
|
||||
needs_special_apply_patch_instructions: true,
|
||||
)
|
||||
} else if slug.starts_with("codex-mini-latest") {
|
||||
model_family!(
|
||||
slug, "codex-mini-latest",
|
||||
supports_reasoning_summaries: true,
|
||||
uses_local_shell_tool: true,
|
||||
needs_special_apply_patch_instructions: true,
|
||||
)
|
||||
} else if slug.starts_with("codex-") {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
)
|
||||
} else if slug.starts_with("gpt-4.1") {
|
||||
model_family!(
|
||||
@@ -96,36 +106,15 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
} else if slug.starts_with("gpt-oss") || slug.starts_with("openai/gpt-oss") {
|
||||
model_family!(slug, "gpt-oss", apply_patch_tool_type: Some(ApplyPatchToolType::Function))
|
||||
} else if slug.starts_with("gpt-4o") {
|
||||
model_family!(slug, "gpt-4o", needs_special_apply_patch_instructions: true)
|
||||
simple_model_family!(slug, "gpt-4o")
|
||||
} else if slug.starts_with("gpt-3.5") {
|
||||
model_family!(slug, "gpt-3.5", needs_special_apply_patch_instructions: true)
|
||||
} else if slug.starts_with("codex-") || slug.starts_with("gpt-5-codex") {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
||||
)
|
||||
simple_model_family!(slug, "gpt-3.5")
|
||||
} else if slug.starts_with("gpt-5") {
|
||||
model_family!(
|
||||
slug, "gpt-5",
|
||||
supports_reasoning_summaries: true,
|
||||
needs_special_apply_patch_instructions: true,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn derive_default_model_family(model: &str) -> ModelFamily {
|
||||
ModelFamily {
|
||||
slug: model.to_string(),
|
||||
family: model.to_string(),
|
||||
needs_special_apply_patch_instructions: false,
|
||||
supports_reasoning_summaries: false,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
uses_local_shell_tool: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,10 +80,7 @@ pub struct ModelProviderInfo {
|
||||
/// the connection as lost.
|
||||
pub stream_idle_timeout_ms: Option<u64>,
|
||||
|
||||
/// Does this provider require an OpenAI API Key or ChatGPT login token? If true,
|
||||
/// user is presented with login screen on first run, and login preference and token/key
|
||||
/// are stored in auth.json. If false (which is the default), login screen is skipped,
|
||||
/// and API key (if needed) comes from the "env_key" environment variable.
|
||||
/// Whether this provider requires some form of standard authentication (API key, ChatGPT token).
|
||||
#[serde(default)]
|
||||
pub requires_openai_auth: bool,
|
||||
}
|
||||
@@ -162,21 +159,6 @@ impl ModelProviderInfo {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_azure_responses_endpoint(&self) -> bool {
|
||||
if self.wire_api != WireApi::Responses {
|
||||
return false;
|
||||
}
|
||||
|
||||
if self.name.eq_ignore_ascii_case("azure") {
|
||||
return true;
|
||||
}
|
||||
|
||||
self.base_url
|
||||
.as_ref()
|
||||
.map(|base| matches_azure_responses_base_url(base))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Apply provider-specific HTTP headers (both static and environment-based)
|
||||
/// onto an existing `reqwest::RequestBuilder` and return the updated
|
||||
/// builder.
|
||||
@@ -344,18 +326,6 @@ pub fn create_oss_provider_with_base_url(base_url: &str) -> ModelProviderInfo {
|
||||
}
|
||||
}
|
||||
|
||||
fn matches_azure_responses_base_url(base_url: &str) -> bool {
|
||||
let base = base_url.to_ascii_lowercase();
|
||||
const AZURE_MARKERS: [&str; 5] = [
|
||||
"openai.azure.",
|
||||
"cognitiveservices.azure.",
|
||||
"aoai.azure.",
|
||||
"azure-api.",
|
||||
"azurefd.",
|
||||
];
|
||||
AZURE_MARKERS.iter().any(|marker| base.contains(marker))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -446,69 +416,4 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
|
||||
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
|
||||
assert_eq!(expected_provider, provider);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_azure_responses_base_urls() {
|
||||
fn provider_for(base_url: &str) -> ModelProviderInfo {
|
||||
ModelProviderInfo {
|
||||
name: "test".into(),
|
||||
base_url: Some(base_url.into()),
|
||||
env_key: None,
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
requires_openai_auth: false,
|
||||
}
|
||||
}
|
||||
|
||||
let positive_cases = [
|
||||
"https://foo.openai.azure.com/openai",
|
||||
"https://foo.openai.azure.us/openai/deployments/bar",
|
||||
"https://foo.cognitiveservices.azure.cn/openai",
|
||||
"https://foo.aoai.azure.com/openai",
|
||||
"https://foo.openai.azure-api.net/openai",
|
||||
"https://foo.z01.azurefd.net/",
|
||||
];
|
||||
for base_url in positive_cases {
|
||||
let provider = provider_for(base_url);
|
||||
assert!(
|
||||
provider.is_azure_responses_endpoint(),
|
||||
"expected {base_url} to be detected as Azure"
|
||||
);
|
||||
}
|
||||
|
||||
let named_provider = ModelProviderInfo {
|
||||
name: "Azure".into(),
|
||||
base_url: Some("https://example.com".into()),
|
||||
env_key: None,
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
assert!(named_provider.is_azure_responses_endpoint());
|
||||
|
||||
let negative_cases = [
|
||||
"https://api.openai.com/v1",
|
||||
"https://example.com/openai",
|
||||
"https://myproxy.azurewebsites.net/openai",
|
||||
];
|
||||
for base_url in negative_cases {
|
||||
let provider = provider_for(base_url);
|
||||
assert!(
|
||||
!provider.is_azure_responses_endpoint(),
|
||||
"expected {base_url} not to be detected as Azure"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,19 +12,6 @@ pub(crate) struct ModelInfo {
|
||||
|
||||
/// Maximum number of output tokens that can be generated for the model.
|
||||
pub(crate) max_output_tokens: u64,
|
||||
|
||||
/// Token threshold where we should automatically compact conversation history.
|
||||
pub(crate) auto_compact_token_limit: Option<i64>,
|
||||
}
|
||||
|
||||
impl ModelInfo {
|
||||
const fn new(context_window: u64, max_output_tokens: u64) -> Self {
|
||||
Self {
|
||||
context_window,
|
||||
max_output_tokens,
|
||||
auto_compact_token_limit: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
|
||||
@@ -33,37 +20,73 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
|
||||
// OSS models have a 128k shared token pool.
|
||||
// Arbitrarily splitting it: 3/4 input context, 1/4 output.
|
||||
// https://openai.com/index/gpt-oss-model-card/
|
||||
"gpt-oss-20b" => Some(ModelInfo::new(96_000, 32_000)),
|
||||
"gpt-oss-120b" => Some(ModelInfo::new(96_000, 32_000)),
|
||||
"gpt-oss-20b" => Some(ModelInfo {
|
||||
context_window: 96_000,
|
||||
max_output_tokens: 32_000,
|
||||
}),
|
||||
"gpt-oss-120b" => Some(ModelInfo {
|
||||
context_window: 96_000,
|
||||
max_output_tokens: 32_000,
|
||||
}),
|
||||
// https://platform.openai.com/docs/models/o3
|
||||
"o3" => Some(ModelInfo::new(200_000, 100_000)),
|
||||
"o3" => Some(ModelInfo {
|
||||
context_window: 200_000,
|
||||
max_output_tokens: 100_000,
|
||||
}),
|
||||
|
||||
// https://platform.openai.com/docs/models/o4-mini
|
||||
"o4-mini" => Some(ModelInfo::new(200_000, 100_000)),
|
||||
"o4-mini" => Some(ModelInfo {
|
||||
context_window: 200_000,
|
||||
max_output_tokens: 100_000,
|
||||
}),
|
||||
|
||||
// https://platform.openai.com/docs/models/codex-mini-latest
|
||||
"codex-mini-latest" => Some(ModelInfo::new(200_000, 100_000)),
|
||||
"codex-mini-latest" => Some(ModelInfo {
|
||||
context_window: 200_000,
|
||||
max_output_tokens: 100_000,
|
||||
}),
|
||||
|
||||
// As of Jun 25, 2025, gpt-4.1 defaults to gpt-4.1-2025-04-14.
|
||||
// https://platform.openai.com/docs/models/gpt-4.1
|
||||
"gpt-4.1" | "gpt-4.1-2025-04-14" => Some(ModelInfo::new(1_047_576, 32_768)),
|
||||
"gpt-4.1" | "gpt-4.1-2025-04-14" => Some(ModelInfo {
|
||||
context_window: 1_047_576,
|
||||
max_output_tokens: 32_768,
|
||||
}),
|
||||
|
||||
// As of Jun 25, 2025, gpt-4o defaults to gpt-4o-2024-08-06.
|
||||
// https://platform.openai.com/docs/models/gpt-4o
|
||||
"gpt-4o" | "gpt-4o-2024-08-06" => Some(ModelInfo::new(128_000, 16_384)),
|
||||
"gpt-4o" | "gpt-4o-2024-08-06" => Some(ModelInfo {
|
||||
context_window: 128_000,
|
||||
max_output_tokens: 16_384,
|
||||
}),
|
||||
|
||||
// https://platform.openai.com/docs/models/gpt-4o?snapshot=gpt-4o-2024-05-13
|
||||
"gpt-4o-2024-05-13" => Some(ModelInfo::new(128_000, 4_096)),
|
||||
"gpt-4o-2024-05-13" => Some(ModelInfo {
|
||||
context_window: 128_000,
|
||||
max_output_tokens: 4_096,
|
||||
}),
|
||||
|
||||
// https://platform.openai.com/docs/models/gpt-4o?snapshot=gpt-4o-2024-11-20
|
||||
"gpt-4o-2024-11-20" => Some(ModelInfo::new(128_000, 16_384)),
|
||||
"gpt-4o-2024-11-20" => Some(ModelInfo {
|
||||
context_window: 128_000,
|
||||
max_output_tokens: 16_384,
|
||||
}),
|
||||
|
||||
// https://platform.openai.com/docs/models/gpt-3.5-turbo
|
||||
"gpt-3.5-turbo" => Some(ModelInfo::new(16_385, 4_096)),
|
||||
"gpt-3.5-turbo" => Some(ModelInfo {
|
||||
context_window: 16_385,
|
||||
max_output_tokens: 4_096,
|
||||
}),
|
||||
|
||||
_ if slug.starts_with("gpt-5") => Some(ModelInfo::new(272_000, 128_000)),
|
||||
"gpt-5" => Some(ModelInfo {
|
||||
context_window: 272_000,
|
||||
max_output_tokens: 128_000,
|
||||
}),
|
||||
|
||||
_ if slug.starts_with("codex-") => Some(ModelInfo::new(272_000, 128_000)),
|
||||
_ if slug.starts_with("codex-") => Some(ModelInfo {
|
||||
context_window: 272_000,
|
||||
max_output_tokens: 128_000,
|
||||
}),
|
||||
|
||||
_ => None,
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ use std::collections::HashMap;
|
||||
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::plan_tool::PLAN_TOOL;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::tool_apply_patch::ApplyPatchToolType;
|
||||
use crate::tool_apply_patch::create_apply_patch_freeform_tool;
|
||||
use crate::tool_apply_patch::create_apply_patch_json_tool;
|
||||
@@ -55,9 +56,10 @@ pub(crate) enum OpenAiTool {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ConfigShellToolType {
|
||||
Default,
|
||||
Local,
|
||||
Streamable,
|
||||
DefaultShell,
|
||||
ShellWithRequest,
|
||||
LocalShell,
|
||||
StreamableShell,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -72,6 +74,7 @@ pub(crate) struct ToolsConfig {
|
||||
|
||||
pub(crate) struct ToolsConfigParams<'a> {
|
||||
pub(crate) model_family: &'a ModelFamily,
|
||||
pub(crate) approval_policy: AskForApproval,
|
||||
pub(crate) include_plan_tool: bool,
|
||||
pub(crate) include_apply_patch_tool: bool,
|
||||
pub(crate) include_web_search_request: bool,
|
||||
@@ -84,6 +87,7 @@ impl ToolsConfig {
|
||||
pub fn new(params: &ToolsConfigParams) -> Self {
|
||||
let ToolsConfigParams {
|
||||
model_family,
|
||||
approval_policy,
|
||||
include_plan_tool,
|
||||
include_apply_patch_tool,
|
||||
include_web_search_request,
|
||||
@@ -91,13 +95,16 @@ impl ToolsConfig {
|
||||
include_view_image_tool,
|
||||
experimental_unified_exec_tool,
|
||||
} = params;
|
||||
let shell_type = if *use_streamable_shell_tool {
|
||||
ConfigShellToolType::Streamable
|
||||
let mut shell_type = if *use_streamable_shell_tool {
|
||||
ConfigShellToolType::StreamableShell
|
||||
} else if model_family.uses_local_shell_tool {
|
||||
ConfigShellToolType::Local
|
||||
ConfigShellToolType::LocalShell
|
||||
} else {
|
||||
ConfigShellToolType::Default
|
||||
ConfigShellToolType::DefaultShell
|
||||
};
|
||||
if matches!(approval_policy, AskForApproval::OnRequest) && !use_streamable_shell_tool {
|
||||
shell_type = ConfigShellToolType::ShellWithRequest;
|
||||
}
|
||||
|
||||
let apply_patch_tool_type = match model_family.apply_patch_tool_type {
|
||||
Some(ApplyPatchToolType::Freeform) => Some(ApplyPatchToolType::Freeform),
|
||||
@@ -158,6 +165,40 @@ pub(crate) enum JsonSchema {
|
||||
},
|
||||
}
|
||||
|
||||
fn create_shell_tool() -> OpenAiTool {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"command".to_string(),
|
||||
JsonSchema::Array {
|
||||
items: Box::new(JsonSchema::String { description: None }),
|
||||
description: Some("The command to execute".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"workdir".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("The working directory to execute the command in".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"timeout_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The timeout for the command in milliseconds".to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "shell".to_string(),
|
||||
description: "Runs a shell command and returns its output".to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["command".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_unified_exec_tool() -> OpenAiTool {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
@@ -205,7 +246,9 @@ fn create_unified_exec_tool() -> OpenAiTool {
|
||||
})
|
||||
}
|
||||
|
||||
fn create_shell_tool() -> OpenAiTool {
|
||||
const SHELL_TOOL_DESCRIPTION: &str = r#"Runs a shell command and returns its output"#;
|
||||
|
||||
fn create_shell_tool_for_sandbox() -> OpenAiTool {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"command".to_string(),
|
||||
@@ -217,32 +260,33 @@ fn create_shell_tool() -> OpenAiTool {
|
||||
properties.insert(
|
||||
"workdir".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("The working directory to execute the command in".to_string()),
|
||||
description: Some("Working directory to execute the command in.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"timeout_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The timeout for the command in milliseconds".to_string()),
|
||||
description: Some("Timeout for the command in milliseconds.".to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
properties.insert(
|
||||
"with_escalated_permissions".to_string(),
|
||||
JsonSchema::Boolean {
|
||||
description: Some("Whether to request escalated permissions. Set to true if command needs to be run without sandbox restrictions".to_string()),
|
||||
description: Some("Request escalated permissions, only for when a command would otherwise be blocked by the sandbox.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"justification".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Only set if with_escalated_permissions is true. 1-sentence explanation of why we want to run this command.".to_string()),
|
||||
description: Some("Required if and only if with_escalated_permissions == true. One sentence explaining why escalation is needed (e.g., write outside CWD, network fetch, git commit).".to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
let description = SHELL_TOOL_DESCRIPTION.to_string();
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "shell".to_string(),
|
||||
description: "Runs a shell command and returns its output.".to_string(),
|
||||
description,
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
@@ -251,7 +295,6 @@ fn create_shell_tool() -> OpenAiTool {
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_view_image_tool() -> OpenAiTool {
|
||||
// Support only local filesystem path.
|
||||
let mut properties = BTreeMap::new();
|
||||
@@ -285,7 +328,7 @@ pub(crate) struct ApplyPatchToolArgs {
|
||||
/// Responses API:
|
||||
/// https://platform.openai.com/docs/guides/function-calling?api-mode=responses
|
||||
pub fn create_tools_json_for_responses_api(
|
||||
tools: &[OpenAiTool],
|
||||
tools: &Vec<OpenAiTool>,
|
||||
) -> crate::error::Result<Vec<serde_json::Value>> {
|
||||
let mut tools_json = Vec::new();
|
||||
|
||||
@@ -300,7 +343,7 @@ pub fn create_tools_json_for_responses_api(
|
||||
/// Chat Completions API:
|
||||
/// https://platform.openai.com/docs/guides/function-calling?api-mode=chat
|
||||
pub(crate) fn create_tools_json_for_chat_completions_api(
|
||||
tools: &[OpenAiTool],
|
||||
tools: &Vec<OpenAiTool>,
|
||||
) -> crate::error::Result<Vec<serde_json::Value>> {
|
||||
// We start with the JSON for the Responses API and than rewrite it to match
|
||||
// the chat completions tool call format.
|
||||
@@ -489,13 +532,16 @@ pub(crate) fn get_openai_tools(
|
||||
tools.push(create_unified_exec_tool());
|
||||
} else {
|
||||
match &config.shell_type {
|
||||
ConfigShellToolType::Default => {
|
||||
ConfigShellToolType::DefaultShell => {
|
||||
tools.push(create_shell_tool());
|
||||
}
|
||||
ConfigShellToolType::Local => {
|
||||
ConfigShellToolType::ShellWithRequest => {
|
||||
tools.push(create_shell_tool_for_sandbox());
|
||||
}
|
||||
ConfigShellToolType::LocalShell => {
|
||||
tools.push(OpenAiTool::LocalShell {});
|
||||
}
|
||||
ConfigShellToolType::Streamable => {
|
||||
ConfigShellToolType::StreamableShell => {
|
||||
tools.push(OpenAiTool::Function(
|
||||
crate::exec_command::create_exec_command_tool_for_responses_api(),
|
||||
));
|
||||
@@ -585,6 +631,7 @@ mod tests {
|
||||
.expect("codex-mini-latest should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
include_plan_tool: true,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -605,6 +652,7 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
include_plan_tool: true,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -625,6 +673,7 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -729,6 +778,7 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: false,
|
||||
@@ -805,6 +855,7 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -866,6 +917,7 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -922,6 +974,7 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -981,6 +1034,7 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -1033,8 +1087,8 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool() {
|
||||
let tool = super::create_shell_tool();
|
||||
fn test_shell_tool_for_sandbox_workspace_write() {
|
||||
let tool = super::create_shell_tool_for_sandbox();
|
||||
let OpenAiTool::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
@@ -1043,7 +1097,37 @@ mod tests {
|
||||
};
|
||||
assert_eq!(name, "shell");
|
||||
|
||||
let expected = "Runs a shell command and returns its output.";
|
||||
let expected = super::SHELL_TOOL_DESCRIPTION;
|
||||
assert_eq!(description, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool_for_sandbox_readonly() {
|
||||
let tool = super::create_shell_tool_for_sandbox();
|
||||
let OpenAiTool::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
else {
|
||||
panic!("expected function tool");
|
||||
};
|
||||
assert_eq!(name, "shell");
|
||||
|
||||
let expected = super::SHELL_TOOL_DESCRIPTION;
|
||||
assert_eq!(description, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool_for_sandbox_danger_full_access() {
|
||||
let tool = super::create_shell_tool_for_sandbox();
|
||||
let OpenAiTool::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
else {
|
||||
panic!("expected function tool");
|
||||
};
|
||||
assert_eq!(name, "shell");
|
||||
|
||||
let expected = super::SHELL_TOOL_DESCRIPTION;
|
||||
assert_eq!(description, expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
use crate::config::Config;
|
||||
use crate::config_types::OtelExporterKind as Kind;
|
||||
use crate::config_types::OtelHttpProtocol as Protocol;
|
||||
use crate::default_client::ORIGINATOR;
|
||||
use codex_otel::config::OtelExporter;
|
||||
use codex_otel::config::OtelHttpProtocol;
|
||||
use codex_otel::config::OtelSettings;
|
||||
use codex_otel::otel_provider::OtelProvider;
|
||||
use std::error::Error;
|
||||
|
||||
/// Build an OpenTelemetry provider from the app Config.
|
||||
///
|
||||
/// Returns `None` when OTEL export is disabled.
|
||||
pub fn build_provider(
|
||||
config: &Config,
|
||||
service_version: &str,
|
||||
) -> Result<Option<OtelProvider>, Box<dyn Error>> {
|
||||
let exporter = match &config.otel.exporter {
|
||||
Kind::None => OtelExporter::None,
|
||||
Kind::OtlpHttp {
|
||||
endpoint,
|
||||
headers,
|
||||
protocol,
|
||||
} => {
|
||||
let protocol = match protocol {
|
||||
Protocol::Json => OtelHttpProtocol::Json,
|
||||
Protocol::Binary => OtelHttpProtocol::Binary,
|
||||
};
|
||||
|
||||
OtelExporter::OtlpHttp {
|
||||
endpoint: endpoint.clone(),
|
||||
headers: headers
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone(), v.clone()))
|
||||
.collect(),
|
||||
protocol,
|
||||
}
|
||||
}
|
||||
Kind::OtlpGrpc { endpoint, headers } => OtelExporter::OtlpGrpc {
|
||||
endpoint: endpoint.clone(),
|
||||
headers: headers
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone(), v.clone()))
|
||||
.collect(),
|
||||
},
|
||||
};
|
||||
|
||||
OtelProvider::from(&OtelSettings {
|
||||
service_name: ORIGINATOR.value.to_owned(),
|
||||
service_version: service_version.to_string(),
|
||||
codex_home: config.codex_home.clone(),
|
||||
environment: config.otel.environment.to_string(),
|
||||
exporter,
|
||||
})
|
||||
}
|
||||
|
||||
/// Filter predicate for exporting only Codex-owned events via OTEL.
|
||||
/// Keeps events that originated from codex_otel module
|
||||
pub fn codex_export_filter(meta: &tracing::Metadata<'_>) -> bool {
|
||||
meta.target().starts_with("codex_otel")
|
||||
}
|
||||
@@ -868,7 +868,7 @@ pub fn parse_command_impl(command: &[String]) -> Vec<ParsedCommand> {
|
||||
let parts = if contains_connectors(&normalized) {
|
||||
split_on_connectors(&normalized)
|
||||
} else {
|
||||
vec![normalized]
|
||||
vec![normalized.clone()]
|
||||
};
|
||||
|
||||
// Preserve left-to-right execution order for all commands, including bash -c/-lc
|
||||
@@ -1156,8 +1156,10 @@ fn parse_bash_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
|
||||
// bias toward the primary command when pipelines are present.
|
||||
// First, drop obvious small formatting helpers (e.g., wc/awk/etc).
|
||||
let had_multiple_commands = all_commands.len() > 1;
|
||||
// Commands arrive in source order; drop formatting helpers while preserving it.
|
||||
let filtered_commands = drop_small_formatting_commands(all_commands);
|
||||
// The bash AST walker yields commands in right-to-left order for
|
||||
// connector/pipeline sequences. Reverse to reflect actual execution order.
|
||||
let mut filtered_commands = drop_small_formatting_commands(all_commands);
|
||||
filtered_commands.reverse();
|
||||
if filtered_commands.is_empty() {
|
||||
return Some(vec![ParsedCommand::Unknown {
|
||||
cmd: script.clone(),
|
||||
@@ -1199,7 +1201,10 @@ fn parse_bash_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
|
||||
name,
|
||||
}
|
||||
} else {
|
||||
ParsedCommand::Read { cmd, name }
|
||||
ParsedCommand::Read {
|
||||
cmd: cmd.clone(),
|
||||
name,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ParsedCommand::Read {
|
||||
@@ -1210,7 +1215,10 @@ fn parse_bash_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
|
||||
}
|
||||
ParsedCommand::ListFiles { path, cmd, .. } => {
|
||||
if had_connectors {
|
||||
ParsedCommand::ListFiles { cmd, path }
|
||||
ParsedCommand::ListFiles {
|
||||
cmd: cmd.clone(),
|
||||
path,
|
||||
}
|
||||
} else {
|
||||
ParsedCommand::ListFiles {
|
||||
cmd: shlex_join(&script_tokens),
|
||||
@@ -1222,7 +1230,11 @@ fn parse_bash_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
|
||||
query, path, cmd, ..
|
||||
} => {
|
||||
if had_connectors {
|
||||
ParsedCommand::Search { cmd, query, path }
|
||||
ParsedCommand::Search {
|
||||
cmd: cmd.clone(),
|
||||
query,
|
||||
path,
|
||||
}
|
||||
} else {
|
||||
ParsedCommand::Search {
|
||||
cmd: shlex_join(&script_tokens),
|
||||
|
||||
@@ -115,7 +115,7 @@ pub fn discover_project_doc_paths(config: &Config) -> std::io::Result<Vec<PathBu
|
||||
// Build chain from cwd upwards and detect git root.
|
||||
let mut chain: Vec<PathBuf> = vec![dir.clone()];
|
||||
let mut git_root: Option<PathBuf> = None;
|
||||
let mut cursor = dir;
|
||||
let mut cursor = dir.clone();
|
||||
while let Some(parent) = cursor.parent() {
|
||||
let git_marker = cursor.join(".git");
|
||||
let git_exists = match std::fs::metadata(&git_marker) {
|
||||
|
||||
21
codex-rs/core/src/prompt_for_compact_command.md
Normal file
21
codex-rs/core/src/prompt_for_compact_command.md
Normal file
@@ -0,0 +1,21 @@
|
||||
You are a summarization assistant. A conversation follows between a user and a coding-focused AI (Codex). Your task is to generate a clear summary capturing:
|
||||
|
||||
• High-level objective or problem being solved
|
||||
• Key instructions or design decisions given by the user
|
||||
• Main code actions or behaviors from the AI
|
||||
• Important variables, functions, modules, or outputs discussed
|
||||
• Any unresolved questions or next steps
|
||||
|
||||
Produce the summary in a structured format like:
|
||||
|
||||
**Objective:** …
|
||||
|
||||
**User instructions:** … (bulleted)
|
||||
|
||||
**AI actions / code behavior:** … (bulleted)
|
||||
|
||||
**Important entities:** … (e.g. function names, variables, files)
|
||||
|
||||
**Open issues / next steps:** … (if any)
|
||||
|
||||
**Summary (concise):** (one or two sentences)
|
||||
@@ -1,55 +0,0 @@
|
||||
use crate::protocol::ReviewFinding;
|
||||
|
||||
// Note: We keep this module UI-agnostic. It returns plain strings that
|
||||
// higher layers (e.g., TUI) may style as needed.
|
||||
|
||||
fn format_location(item: &ReviewFinding) -> String {
|
||||
let path = item.code_location.absolute_file_path.display();
|
||||
let start = item.code_location.line_range.start;
|
||||
let end = item.code_location.line_range.end;
|
||||
format!("{path}:{start}-{end}")
|
||||
}
|
||||
|
||||
/// Format a full review findings block as plain text lines.
|
||||
///
|
||||
/// - When `selection` is `Some`, each item line includes a checkbox marker:
|
||||
/// "[x]" for selected items and "[ ]" for unselected. Missing indices
|
||||
/// default to selected.
|
||||
/// - When `selection` is `None`, the marker is omitted and a simple bullet is
|
||||
/// rendered ("- Title — path:start-end").
|
||||
pub fn format_review_findings_block(
|
||||
findings: &[ReviewFinding],
|
||||
selection: Option<&[bool]>,
|
||||
) -> String {
|
||||
let mut lines: Vec<String> = Vec::new();
|
||||
lines.push(String::new());
|
||||
|
||||
// Header
|
||||
if findings.len() > 1 {
|
||||
lines.push("Full review comments:".to_string());
|
||||
} else {
|
||||
lines.push("Review comment:".to_string());
|
||||
}
|
||||
|
||||
for (idx, item) in findings.iter().enumerate() {
|
||||
lines.push(String::new());
|
||||
|
||||
let title = &item.title;
|
||||
let location = format_location(item);
|
||||
|
||||
if let Some(flags) = selection {
|
||||
// Default to selected if index is out of bounds.
|
||||
let checked = flags.get(idx).copied().unwrap_or(true);
|
||||
let marker = if checked { "[x]" } else { "[ ]" };
|
||||
lines.push(format!("- {marker} {title} — {location}"));
|
||||
} else {
|
||||
lines.push(format!("- {title} — {location}"));
|
||||
}
|
||||
|
||||
for body_line in item.body.lines() {
|
||||
lines.push(format!(" {body_line}"));
|
||||
}
|
||||
}
|
||||
|
||||
lines.join("\n")
|
||||
}
|
||||
@@ -3,10 +3,6 @@ use std::io::{self};
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_file_search as file_search;
|
||||
use std::num::NonZero;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use time::OffsetDateTime;
|
||||
use time::PrimitiveDateTime;
|
||||
use time::format_description::FormatItem;
|
||||
@@ -322,12 +318,6 @@ async fn read_head_and_flags(
|
||||
head.push(val);
|
||||
}
|
||||
}
|
||||
RolloutItem::TurnContext(_) => {
|
||||
// Not included in `head`; skip.
|
||||
}
|
||||
RolloutItem::Compacted(_) => {
|
||||
// Not included in `head`; skip.
|
||||
}
|
||||
RolloutItem::EventMsg(ev) => {
|
||||
if matches!(ev, EventMsg::UserMessage(_)) {
|
||||
saw_user_event = true;
|
||||
@@ -338,48 +328,3 @@ async fn read_head_and_flags(
|
||||
|
||||
Ok((head, saw_session_meta, saw_user_event))
|
||||
}
|
||||
|
||||
/// Locate a recorded conversation rollout file by its UUID string using the existing
|
||||
/// paginated listing implementation. Returns `Ok(Some(path))` if found, `Ok(None)` if not present
|
||||
/// or the id is invalid.
|
||||
pub async fn find_conversation_path_by_id_str(
|
||||
codex_home: &Path,
|
||||
id_str: &str,
|
||||
) -> io::Result<Option<PathBuf>> {
|
||||
// Validate UUID format early.
|
||||
if Uuid::parse_str(id_str).is_err() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let mut root = codex_home.to_path_buf();
|
||||
root.push(SESSIONS_SUBDIR);
|
||||
if !root.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
// This is safe because we know the values are valid.
|
||||
#[allow(clippy::unwrap_used)]
|
||||
let limit = NonZero::new(1).unwrap();
|
||||
// This is safe because we know the values are valid.
|
||||
#[allow(clippy::unwrap_used)]
|
||||
let threads = NonZero::new(2).unwrap();
|
||||
let cancel = Arc::new(AtomicBool::new(false));
|
||||
let exclude: Vec<String> = Vec::new();
|
||||
let compute_indices = false;
|
||||
|
||||
let results = file_search::run(
|
||||
id_str,
|
||||
limit,
|
||||
&root,
|
||||
exclude,
|
||||
threads,
|
||||
cancel,
|
||||
compute_indices,
|
||||
)
|
||||
.map_err(|e| io::Error::other(format!("file search failed: {e}")))?;
|
||||
|
||||
Ok(results
|
||||
.matches
|
||||
.into_iter()
|
||||
.next()
|
||||
.map(|m| root.join(m.path)))
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ pub(crate) mod policy;
|
||||
pub mod recorder;
|
||||
|
||||
pub use codex_protocol::protocol::SessionMeta;
|
||||
pub use list::find_conversation_path_by_id_str;
|
||||
pub use recorder::RolloutRecorder;
|
||||
pub use recorder::RolloutRecorderParams;
|
||||
|
||||
|
||||
@@ -8,10 +8,8 @@ pub(crate) fn is_persisted_response_item(item: &RolloutItem) -> bool {
|
||||
match item {
|
||||
RolloutItem::ResponseItem(item) => should_persist_response_item(item),
|
||||
RolloutItem::EventMsg(ev) => should_persist_event_msg(ev),
|
||||
// Persist Codex executive markers so we can analyze flows (e.g., compaction, API turns).
|
||||
RolloutItem::Compacted(_) | RolloutItem::TurnContext(_) | RolloutItem::SessionMeta(_) => {
|
||||
true
|
||||
}
|
||||
// Always persist session meta
|
||||
RolloutItem::SessionMeta(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,9 +23,8 @@ pub(crate) fn should_persist_response_item(item: &ResponseItem) -> bool {
|
||||
| ResponseItem::FunctionCall { .. }
|
||||
| ResponseItem::FunctionCallOutput { .. }
|
||||
| ResponseItem::CustomToolCall { .. }
|
||||
| ResponseItem::CustomToolCallOutput { .. }
|
||||
| ResponseItem::WebSearchCall { .. } => true,
|
||||
ResponseItem::Other => false,
|
||||
| ResponseItem::CustomToolCallOutput { .. } => true,
|
||||
ResponseItem::WebSearchCall { .. } | ResponseItem::Other => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,10 +36,7 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool {
|
||||
| EventMsg::AgentMessage(_)
|
||||
| EventMsg::AgentReasoning(_)
|
||||
| EventMsg::AgentReasoningRawContent(_)
|
||||
| EventMsg::TokenCount(_)
|
||||
| EventMsg::EnteredReviewMode(_)
|
||||
| EventMsg::ExitedReviewMode(_)
|
||||
| EventMsg::TurnAborted(_) => true,
|
||||
| EventMsg::TokenCount(_) => true,
|
||||
EventMsg::Error(_)
|
||||
| EventMsg::TaskStarted(_)
|
||||
| EventMsg::TaskComplete(_)
|
||||
@@ -69,6 +63,7 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool {
|
||||
| EventMsg::McpListToolsResponse(_)
|
||||
| EventMsg::ListCustomPromptsResponse(_)
|
||||
| EventMsg::PlanUpdate(_)
|
||||
| EventMsg::TurnAborted(_)
|
||||
| EventMsg::ShutdownComplete
|
||||
| EventMsg::ConversationPath(_) => false,
|
||||
}
|
||||
|
||||
@@ -204,6 +204,7 @@ impl RolloutRecorder {
|
||||
|
||||
pub(crate) async fn get_rollout_history(path: &Path) -> std::io::Result<InitialHistory> {
|
||||
info!("Resuming rollout from {path:?}");
|
||||
tracing::error!("Resuming rollout from {path:?}");
|
||||
let text = tokio::fs::read_to_string(path).await?;
|
||||
if text.trim().is_empty() {
|
||||
return Err(IoError::other("empty session file"));
|
||||
@@ -237,12 +238,6 @@ impl RolloutRecorder {
|
||||
RolloutItem::ResponseItem(item) => {
|
||||
items.push(RolloutItem::ResponseItem(item));
|
||||
}
|
||||
RolloutItem::Compacted(item) => {
|
||||
items.push(RolloutItem::Compacted(item));
|
||||
}
|
||||
RolloutItem::TurnContext(item) => {
|
||||
items.push(RolloutItem::TurnContext(item));
|
||||
}
|
||||
RolloutItem::EventMsg(_ev) => {
|
||||
items.push(RolloutItem::EventMsg(_ev));
|
||||
}
|
||||
@@ -253,7 +248,7 @@ impl RolloutRecorder {
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
tracing::error!(
|
||||
"Resumed rollout with {} items, conversation ID: {:?}",
|
||||
items.len(),
|
||||
conversation_id
|
||||
|
||||
@@ -305,7 +305,7 @@ async fn test_pagination_cursor() {
|
||||
path: p1,
|
||||
head: head_1,
|
||||
}],
|
||||
next_cursor: Some(expected_cursor3),
|
||||
next_cursor: Some(expected_cursor3.clone()),
|
||||
num_scanned_files: 5, // scanned 05, 04 (anchor), 03, 02 (anchor), 01
|
||||
reached_scan_cap: false,
|
||||
};
|
||||
@@ -344,7 +344,7 @@ async fn test_get_conversation_contents() {
|
||||
let expected_cursor: Cursor = serde_json::from_str(&format!("\"{ts}|{uuid}\"")).unwrap();
|
||||
let expected_page = ConversationsPage {
|
||||
items: vec![ConversationItem {
|
||||
path: expected_path,
|
||||
path: expected_path.clone(),
|
||||
head: expected_head,
|
||||
}],
|
||||
next_cursor: Some(expected_cursor),
|
||||
@@ -437,7 +437,7 @@ async fn test_stable_ordering_same_second_pagination() {
|
||||
path: p1,
|
||||
head: head(u1),
|
||||
}],
|
||||
next_cursor: Some(expected_cursor2),
|
||||
next_cursor: Some(expected_cursor2.clone()),
|
||||
num_scanned_files: 3, // scanned u3, u2 (anchor), u1
|
||||
reached_scan_cap: false,
|
||||
};
|
||||
|
||||
@@ -13,14 +13,9 @@ use crate::protocol::SandboxPolicy;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum SafetyCheck {
|
||||
AutoApprove {
|
||||
sandbox_type: SandboxType,
|
||||
user_explicitly_approved: bool,
|
||||
},
|
||||
AutoApprove { sandbox_type: SandboxType },
|
||||
AskUser,
|
||||
Reject {
|
||||
reason: String,
|
||||
},
|
||||
Reject { reason: String },
|
||||
}
|
||||
|
||||
pub fn assess_patch_safety(
|
||||
@@ -57,16 +52,12 @@ pub fn assess_patch_safety(
|
||||
// fall back to asking the user because the patch may touch arbitrary
|
||||
// paths outside the project.
|
||||
match get_platform_sandbox() {
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove {
|
||||
sandbox_type,
|
||||
user_explicitly_approved: false,
|
||||
},
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove { sandbox_type },
|
||||
None if sandbox_policy == &SandboxPolicy::DangerFullAccess => {
|
||||
// If the user has explicitly requested DangerFullAccess, then
|
||||
// we can auto-approve even without a sandbox.
|
||||
SafetyCheck::AutoApprove {
|
||||
sandbox_type: SandboxType::None,
|
||||
user_explicitly_approved: false,
|
||||
}
|
||||
}
|
||||
None => SafetyCheck::AskUser,
|
||||
@@ -110,7 +101,6 @@ pub fn assess_command_safety(
|
||||
if is_known_safe_command(command) || approved.contains(command) {
|
||||
return SafetyCheck::AutoApprove {
|
||||
sandbox_type: SandboxType::None,
|
||||
user_explicitly_approved: false,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -136,17 +126,13 @@ pub(crate) fn assess_safety_for_untrusted_command(
|
||||
| (Never, DangerFullAccess)
|
||||
| (OnRequest, DangerFullAccess) => SafetyCheck::AutoApprove {
|
||||
sandbox_type: SandboxType::None,
|
||||
user_explicitly_approved: false,
|
||||
},
|
||||
(OnRequest, ReadOnly) | (OnRequest, WorkspaceWrite { .. }) => {
|
||||
if with_escalated_permissions {
|
||||
SafetyCheck::AskUser
|
||||
} else {
|
||||
match get_platform_sandbox() {
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove {
|
||||
sandbox_type,
|
||||
user_explicitly_approved: false,
|
||||
},
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove { sandbox_type },
|
||||
// Fall back to asking since the command is untrusted and
|
||||
// we do not have a sandbox available
|
||||
None => SafetyCheck::AskUser,
|
||||
@@ -158,10 +144,7 @@ pub(crate) fn assess_safety_for_untrusted_command(
|
||||
| (OnFailure, ReadOnly)
|
||||
| (OnFailure, WorkspaceWrite { .. }) => {
|
||||
match get_platform_sandbox() {
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove {
|
||||
sandbox_type,
|
||||
user_explicitly_approved: false,
|
||||
},
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove { sandbox_type },
|
||||
None => {
|
||||
if matches!(approval_policy, OnFailure) {
|
||||
// Since the command is not trusted, even though the
|
||||
@@ -310,7 +293,7 @@ mod tests {
|
||||
// With the parent dir explicitly added as a writable root, the
|
||||
// outside write should be permitted.
|
||||
let policy_with_parent = SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![parent],
|
||||
writable_roots: vec![parent.clone()],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: true,
|
||||
exclude_slash_tmp: true,
|
||||
@@ -359,10 +342,7 @@ mod tests {
|
||||
);
|
||||
|
||||
let expected = match get_platform_sandbox() {
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove {
|
||||
sandbox_type,
|
||||
user_explicitly_approved: false,
|
||||
},
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove { sandbox_type },
|
||||
None => SafetyCheck::AskUser,
|
||||
};
|
||||
assert_eq!(safety_check, expected);
|
||||
|
||||
@@ -18,20 +18,19 @@ const MACOS_PATH_TO_SEATBELT_EXECUTABLE: &str = "/usr/bin/sandbox-exec";
|
||||
|
||||
pub async fn spawn_command_under_seatbelt(
|
||||
command: Vec<String>,
|
||||
command_cwd: PathBuf,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_policy_cwd: &Path,
|
||||
cwd: PathBuf,
|
||||
stdio_policy: StdioPolicy,
|
||||
mut env: HashMap<String, String>,
|
||||
) -> std::io::Result<Child> {
|
||||
let args = create_seatbelt_command_args(command, sandbox_policy, sandbox_policy_cwd);
|
||||
let args = create_seatbelt_command_args(command, sandbox_policy, &cwd);
|
||||
let arg0 = None;
|
||||
env.insert(CODEX_SANDBOX_ENV_VAR.to_string(), "seatbelt".to_string());
|
||||
spawn_child_async(
|
||||
PathBuf::from(MACOS_PATH_TO_SEATBELT_EXECUTABLE),
|
||||
args,
|
||||
arg0,
|
||||
command_cwd,
|
||||
cwd,
|
||||
sandbox_policy,
|
||||
stdio_policy,
|
||||
env,
|
||||
@@ -42,7 +41,7 @@ pub async fn spawn_command_under_seatbelt(
|
||||
fn create_seatbelt_command_args(
|
||||
command: Vec<String>,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_policy_cwd: &Path,
|
||||
cwd: &Path,
|
||||
) -> Vec<String> {
|
||||
let (file_write_policy, extra_cli_args) = {
|
||||
if sandbox_policy.has_full_disk_write_access() {
|
||||
@@ -52,7 +51,7 @@ fn create_seatbelt_command_args(
|
||||
Vec::<String>::new(),
|
||||
)
|
||||
} else {
|
||||
let writable_roots = sandbox_policy.get_writable_roots_with_cwd(sandbox_policy_cwd);
|
||||
let writable_roots = sandbox_policy.get_writable_roots_with_cwd(cwd);
|
||||
|
||||
let mut writable_folder_policies: Vec<String> = Vec::new();
|
||||
let mut cli_args: Vec<String> = Vec::new();
|
||||
@@ -154,7 +153,7 @@ mod tests {
|
||||
// Build a policy that only includes the two test roots as writable and
|
||||
// does not automatically include defaults TMPDIR or /tmp.
|
||||
let policy = SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![root_with_git, root_without_git],
|
||||
writable_roots: vec![root_with_git.clone(), root_without_git.clone()],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: true,
|
||||
exclude_slash_tmp: true,
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
; inspired by Chrome's sandbox policy:
|
||||
; https://source.chromium.org/chromium/chromium/src/+/main:sandbox/policy/mac/common.sb;l=273-319;drc=7b3962fe2e5fc9e2ee58000dc8fbf3429d84d3bd
|
||||
; https://source.chromium.org/chromium/chromium/src/+/main:sandbox/policy/mac/renderer.sb;l=64;drc=7b3962fe2e5fc9e2ee58000dc8fbf3429d84d3bd
|
||||
|
||||
; start with closed-by-default
|
||||
(deny default)
|
||||
@@ -10,13 +9,7 @@
|
||||
; child processes inherit the policy of their parent
|
||||
(allow process-exec)
|
||||
(allow process-fork)
|
||||
(allow signal (target same-sandbox))
|
||||
|
||||
; Allow cf prefs to work.
|
||||
(allow user-preference-read)
|
||||
|
||||
; process-info
|
||||
(allow process-info* (target same-sandbox))
|
||||
(allow signal (target self))
|
||||
|
||||
(allow file-write-data
|
||||
(require-all
|
||||
@@ -39,22 +32,28 @@
|
||||
(sysctl-name "hw.l3cachesize_compat")
|
||||
(sysctl-name "hw.logicalcpu_max")
|
||||
(sysctl-name "hw.machine")
|
||||
(sysctl-name "hw.memsize")
|
||||
(sysctl-name "hw.ncpu")
|
||||
(sysctl-name "hw.nperflevels")
|
||||
; Chrome locks these CPU feature detection down a bit more tightly,
|
||||
; but mostly for fingerprinting concerns which isn't an issue for codex.
|
||||
(sysctl-name-prefix "hw.optional.arm.")
|
||||
(sysctl-name-prefix "hw.optional.armv8_")
|
||||
(sysctl-name "hw.optional.arm.FEAT_BF16")
|
||||
(sysctl-name "hw.optional.arm.FEAT_DotProd")
|
||||
(sysctl-name "hw.optional.arm.FEAT_FCMA")
|
||||
(sysctl-name "hw.optional.arm.FEAT_FHM")
|
||||
(sysctl-name "hw.optional.arm.FEAT_FP16")
|
||||
(sysctl-name "hw.optional.arm.FEAT_I8MM")
|
||||
(sysctl-name "hw.optional.arm.FEAT_JSCVT")
|
||||
(sysctl-name "hw.optional.arm.FEAT_LSE")
|
||||
(sysctl-name "hw.optional.arm.FEAT_RDM")
|
||||
(sysctl-name "hw.optional.arm.FEAT_SHA512")
|
||||
(sysctl-name "hw.optional.armv8_2_sha512")
|
||||
(sysctl-name "hw.memsize")
|
||||
(sysctl-name "hw.pagesize")
|
||||
(sysctl-name "hw.packages")
|
||||
(sysctl-name "hw.pagesize_compat")
|
||||
(sysctl-name "hw.pagesize")
|
||||
(sysctl-name "hw.physicalcpu_max")
|
||||
(sysctl-name "hw.tbfrequency_compat")
|
||||
(sysctl-name "hw.vectorunit")
|
||||
(sysctl-name "kern.hostname")
|
||||
(sysctl-name "kern.maxfilesperproc")
|
||||
(sysctl-name "kern.maxproc")
|
||||
(sysctl-name "kern.osproductversion")
|
||||
(sysctl-name "kern.osrelease")
|
||||
(sysctl-name "kern.ostype")
|
||||
@@ -64,27 +63,14 @@
|
||||
(sysctl-name "kern.usrstack64")
|
||||
(sysctl-name "kern.version")
|
||||
(sysctl-name "sysctl.proc_cputype")
|
||||
(sysctl-name "vm.loadavg")
|
||||
(sysctl-name-prefix "hw.perflevel")
|
||||
(sysctl-name-prefix "kern.proc.pgrp.")
|
||||
(sysctl-name-prefix "kern.proc.pid.")
|
||||
(sysctl-name-prefix "net.routetable.")
|
||||
)
|
||||
|
||||
; IOKit
|
||||
(allow iokit-open
|
||||
(iokit-registry-entry-class "RootDomainUserClient")
|
||||
)
|
||||
|
||||
; needed to look up user info, see https://crbug.com/792228
|
||||
(allow mach-lookup
|
||||
(global-name "com.apple.system.opendirectoryd.libinfo")
|
||||
)
|
||||
|
||||
; Added on top of Chrome profile
|
||||
; Needed for python multiprocessing on MacOS for the SemLock
|
||||
(allow ipc-posix-sem)
|
||||
|
||||
; needed to look up user info, see https://crbug.com/792228
|
||||
(allow mach-lookup
|
||||
(global-name "com.apple.PowerManagement.control")
|
||||
(global-name "com.apple.system.opendirectoryd.libinfo")
|
||||
)
|
||||
|
||||
@@ -5,20 +5,20 @@ use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct ZshShell {
|
||||
pub(crate) shell_path: String,
|
||||
pub(crate) zshrc_path: String,
|
||||
shell_path: String,
|
||||
zshrc_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct BashShell {
|
||||
pub(crate) shell_path: String,
|
||||
pub(crate) bashrc_path: String,
|
||||
shell_path: String,
|
||||
bashrc_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct PowerShellConfig {
|
||||
pub(crate) exe: String, // Executable name or path, e.g. "pwsh" or "powershell.exe".
|
||||
pub(crate) bash_exe_fallback: Option<PathBuf>, // In case the model generates a bash command.
|
||||
exe: String, // Executable name or path, e.g. "pwsh" or "powershell.exe".
|
||||
bash_exe_fallback: Option<PathBuf>, // In case the model generates a bash command.
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
@@ -32,19 +32,15 @@ pub enum Shell {
|
||||
impl Shell {
|
||||
pub fn format_default_shell_invocation(&self, command: Vec<String>) -> Option<Vec<String>> {
|
||||
match self {
|
||||
Shell::Zsh(zsh) => format_shell_invocation_with_rc(
|
||||
command.as_slice(),
|
||||
&zsh.shell_path,
|
||||
&zsh.zshrc_path,
|
||||
),
|
||||
Shell::Bash(bash) => format_shell_invocation_with_rc(
|
||||
command.as_slice(),
|
||||
&bash.shell_path,
|
||||
&bash.bashrc_path,
|
||||
),
|
||||
Shell::Zsh(zsh) => {
|
||||
format_shell_invocation_with_rc(&command, &zsh.shell_path, &zsh.zshrc_path)
|
||||
}
|
||||
Shell::Bash(bash) => {
|
||||
format_shell_invocation_with_rc(&command, &bash.shell_path, &bash.bashrc_path)
|
||||
}
|
||||
Shell::PowerShell(ps) => {
|
||||
// If model generated a bash command, prefer a detected bash fallback
|
||||
if let Some(script) = strip_bash_lc(command.as_slice()) {
|
||||
if let Some(script) = strip_bash_lc(&command) {
|
||||
return match &ps.bash_exe_fallback {
|
||||
Some(bash) => Some(vec![
|
||||
bash.to_string_lossy().to_string(),
|
||||
@@ -106,7 +102,7 @@ impl Shell {
|
||||
}
|
||||
|
||||
fn format_shell_invocation_with_rc(
|
||||
command: &[String],
|
||||
command: &Vec<String>,
|
||||
shell_path: &str,
|
||||
rc_path: &str,
|
||||
) -> Option<Vec<String>> {
|
||||
@@ -122,8 +118,8 @@ fn format_shell_invocation_with_rc(
|
||||
Some(vec![shell_path.to_string(), "-lc".to_string(), rc_command])
|
||||
}
|
||||
|
||||
fn strip_bash_lc(command: &[String]) -> Option<String> {
|
||||
match command {
|
||||
fn strip_bash_lc(command: &Vec<String>) -> Option<String> {
|
||||
match command.as_slice() {
|
||||
// exactly three items
|
||||
[first, second, third]
|
||||
// first two must be "bash", "-lc"
|
||||
@@ -330,7 +326,10 @@ mod tests {
|
||||
.format_default_shell_invocation(input.iter().map(|s| s.to_string()).collect());
|
||||
let expected_cmd = expected_cmd
|
||||
.iter()
|
||||
.map(|s| s.replace("BASHRC_PATH", bashrc_path.to_str().unwrap()))
|
||||
.map(|s| {
|
||||
s.replace("BASHRC_PATH", bashrc_path.to_str().unwrap())
|
||||
.to_string()
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert_eq!(actual_cmd, Some(expected_cmd));
|
||||
@@ -349,7 +348,6 @@ mod tests {
|
||||
},
|
||||
SandboxType::None,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
temp_home.path(),
|
||||
&None,
|
||||
None,
|
||||
)
|
||||
@@ -437,7 +435,10 @@ mod macos_tests {
|
||||
.format_default_shell_invocation(input.iter().map(|s| s.to_string()).collect());
|
||||
let expected_cmd = expected_cmd
|
||||
.iter()
|
||||
.map(|s| s.replace("ZSHRC_PATH", zshrc_path.to_str().unwrap()))
|
||||
.map(|s| {
|
||||
s.replace("ZSHRC_PATH", zshrc_path.to_str().unwrap())
|
||||
.to_string()
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert_eq!(actual_cmd, Some(expected_cmd));
|
||||
@@ -456,7 +457,6 @@ mod macos_tests {
|
||||
},
|
||||
SandboxType::None,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
temp_home.path(),
|
||||
&None,
|
||||
None,
|
||||
)
|
||||
|
||||
@@ -3,6 +3,8 @@ use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use thiserror::Error;
|
||||
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Default)]
|
||||
pub struct TokenData {
|
||||
/// Flat info parsed from the JWT in auth.json.
|
||||
@@ -20,6 +22,36 @@ pub struct TokenData {
|
||||
pub account_id: Option<String>,
|
||||
}
|
||||
|
||||
impl TokenData {
|
||||
/// Returns true if this is a plan that should use the traditional
|
||||
/// "metered" billing via an API key.
|
||||
pub(crate) fn should_use_api_key(
|
||||
&self,
|
||||
preferred_auth_method: AuthMode,
|
||||
is_openai_email: bool,
|
||||
) -> bool {
|
||||
if preferred_auth_method == AuthMode::ApiKey {
|
||||
return true;
|
||||
}
|
||||
// If the email is an OpenAI email, use AuthMode::ChatGPT unless preferred_auth_method is AuthMode::ApiKey.
|
||||
if is_openai_email {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.id_token
|
||||
.chatgpt_plan_type
|
||||
.as_ref()
|
||||
.is_none_or(|plan| plan.is_plan_that_should_use_api_key())
|
||||
}
|
||||
|
||||
pub fn is_openai_email(&self) -> bool {
|
||||
self.id_token
|
||||
.email
|
||||
.as_deref()
|
||||
.is_some_and(|email| email.trim().to_ascii_lowercase().ends_with("@openai.com"))
|
||||
}
|
||||
}
|
||||
|
||||
/// Flat subset of useful claims in id_token from auth.json.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)]
|
||||
pub struct IdTokenInfo {
|
||||
@@ -47,6 +79,28 @@ pub(crate) enum PlanType {
|
||||
Unknown(String),
|
||||
}
|
||||
|
||||
impl PlanType {
|
||||
fn is_plan_that_should_use_api_key(&self) -> bool {
|
||||
match self {
|
||||
Self::Known(known) => {
|
||||
use KnownPlan::*;
|
||||
!matches!(known, Free | Plus | Pro | Team)
|
||||
}
|
||||
Self::Unknown(_) => {
|
||||
// Unknown plans should use the API key.
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_string(&self) -> String {
|
||||
match self {
|
||||
Self::Known(known) => format!("{known:?}").to_lowercase(),
|
||||
Self::Unknown(s) => s.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub(crate) enum KnownPlan {
|
||||
|
||||
@@ -678,7 +678,7 @@ index {left_oid}..{right_oid}
|
||||
let dest = dir.path().join("dest.txt");
|
||||
let mut acc = TurnDiffTracker::new();
|
||||
let mv = HashMap::from([(
|
||||
src,
|
||||
src.clone(),
|
||||
FileChange::Update {
|
||||
unified_diff: "".into(),
|
||||
move_path: Some(dest.clone()),
|
||||
|
||||
@@ -100,13 +100,10 @@ type OutputBuffer = Arc<Mutex<OutputBufferState>>;
|
||||
type OutputHandles = (OutputBuffer, Arc<Notify>);
|
||||
|
||||
impl ManagedUnifiedExecSession {
|
||||
fn new(
|
||||
session: ExecCommandSession,
|
||||
initial_output_rx: tokio::sync::broadcast::Receiver<Vec<u8>>,
|
||||
) -> Self {
|
||||
fn new(session: ExecCommandSession) -> Self {
|
||||
let output_buffer = Arc::new(Mutex::new(OutputBufferState::default()));
|
||||
let output_notify = Arc::new(Notify::new());
|
||||
let mut receiver = initial_output_rx;
|
||||
let mut receiver = session.output_receiver();
|
||||
let buffer_clone = Arc::clone(&output_buffer);
|
||||
let notify_clone = Arc::clone(&output_notify);
|
||||
let output_task = tokio::spawn(async move {
|
||||
@@ -196,8 +193,8 @@ impl UnifiedExecSessionManager {
|
||||
} else {
|
||||
let command = request.input_chunks.to_vec();
|
||||
let new_id = self.next_session_id.fetch_add(1, Ordering::SeqCst);
|
||||
let (session, initial_output_rx) = create_unified_exec_session(&command).await?;
|
||||
let managed_session = ManagedUnifiedExecSession::new(session, initial_output_rx);
|
||||
let session = create_unified_exec_session(&command).await?;
|
||||
let managed_session = ManagedUnifiedExecSession::new(session);
|
||||
let (buffer, notify) = managed_session.output_handles();
|
||||
writer_tx = managed_session.writer_sender();
|
||||
output_buffer = buffer;
|
||||
@@ -300,13 +297,7 @@ impl UnifiedExecSessionManager {
|
||||
|
||||
async fn create_unified_exec_session(
|
||||
command: &[String],
|
||||
) -> Result<
|
||||
(
|
||||
ExecCommandSession,
|
||||
tokio::sync::broadcast::Receiver<Vec<u8>>,
|
||||
),
|
||||
UnifiedExecError,
|
||||
> {
|
||||
) -> Result<ExecCommandSession, UnifiedExecError> {
|
||||
if command.is_empty() {
|
||||
return Err(UnifiedExecError::MissingCommandLine);
|
||||
}
|
||||
@@ -389,7 +380,7 @@ async fn create_unified_exec_session(
|
||||
wait_exit_status.store(true, Ordering::SeqCst);
|
||||
});
|
||||
|
||||
let (session, initial_output_rx) = ExecCommandSession::new(
|
||||
Ok(ExecCommandSession::new(
|
||||
writer_tx,
|
||||
output_tx,
|
||||
killer,
|
||||
@@ -397,8 +388,7 @@ async fn create_unified_exec_session(
|
||||
writer_handle,
|
||||
wait_handle,
|
||||
exit_status,
|
||||
);
|
||||
Ok((session, initial_output_rx))
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -431,7 +421,7 @@ mod tests {
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["bash".to_string(), "-i".to_string()],
|
||||
timeout_ms: Some(2_500),
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
let session_id = open_shell.session_id.expect("expected session_id");
|
||||
@@ -451,7 +441,7 @@ mod tests {
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_id),
|
||||
input_chunks: &["echo $CODEX_INTERACTIVE_SHELL_VAR\n".to_string()],
|
||||
timeout_ms: Some(2_500),
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
assert!(out_2.output.contains("codex"));
|
||||
@@ -468,7 +458,7 @@ mod tests {
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["/bin/bash".to_string(), "-i".to_string()],
|
||||
timeout_ms: Some(2_500),
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
let session_a = shell_a.session_id.expect("expected session id");
|
||||
@@ -477,7 +467,7 @@ mod tests {
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_a),
|
||||
input_chunks: &["export CODEX_INTERACTIVE_SHELL_VAR=codex\n".to_string()],
|
||||
timeout_ms: Some(2_500),
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -488,7 +478,7 @@ mod tests {
|
||||
"echo".to_string(),
|
||||
"$CODEX_INTERACTIVE_SHELL_VAR\n".to_string(),
|
||||
],
|
||||
timeout_ms: Some(2_500),
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
assert!(!out_2.output.contains("codex"));
|
||||
@@ -497,7 +487,7 @@ mod tests {
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_a),
|
||||
input_chunks: &["echo $CODEX_INTERACTIVE_SHELL_VAR\n".to_string()],
|
||||
timeout_ms: Some(2_500),
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
assert!(out_3.output.contains("codex"));
|
||||
@@ -514,7 +504,7 @@ mod tests {
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["bash".to_string(), "-i".to_string()],
|
||||
timeout_ms: Some(2_500),
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
let session_id = open_shell.session_id.expect("expected session id");
|
||||
@@ -526,7 +516,7 @@ mod tests {
|
||||
"export".to_string(),
|
||||
"CODEX_INTERACTIVE_SHELL_VAR=codex\n".to_string(),
|
||||
],
|
||||
timeout_ms: Some(2_500),
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -557,7 +547,6 @@ mod tests {
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
#[ignore] // Ignored while we have a better way to test this.
|
||||
async fn requests_with_large_timeout_are_capped() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
|
||||
@@ -579,14 +568,33 @@ mod tests {
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
#[ignore] // Ignored while we have a better way to test this.
|
||||
async fn completed_commands_do_not_persist_sessions() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
let result = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["/bin/echo".to_string(), "codex".to_string()],
|
||||
timeout_ms: Some(2_500),
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
|
||||
assert!(result.session_id.is_none());
|
||||
assert!(result.output.contains("codex"));
|
||||
|
||||
assert!(manager.sessions.lock().await.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
async fn correct_path_resolution() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
let result = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["echo".to_string(), "codex".to_string()],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -607,7 +615,7 @@ mod tests {
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["/bin/bash".to_string(), "-i".to_string()],
|
||||
timeout_ms: Some(2_500),
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
let session_id = open_shell.session_id.expect("expected session id");
|
||||
@@ -616,7 +624,7 @@ mod tests {
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_id),
|
||||
input_chunks: &["exit\n".to_string()],
|
||||
timeout_ms: Some(2_500),
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
You were originally given instructions from a user over one or more turns. Here were the user messages:
|
||||
|
||||
{{ user_messages_text }}
|
||||
|
||||
Another language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:
|
||||
|
||||
{{ summary_text }}
|
||||
@@ -1,5 +0,0 @@
|
||||
You have exceeded the maximum number of tokens, please stop coding and instead write a short memento message for the next agent. Your note should:
|
||||
- Summarize what you finished and what still needs work. If there was a recent update_plan call, repeat its steps verbatim.
|
||||
- List outstanding TODOs with file paths / line numbers so they're easy to find.
|
||||
- Flag code that needs more tests (edge cases, performance, integration, etc.).
|
||||
- Record any open bugs, quirks, or setup steps that will make it easier for the next agent to pick up where you left off.
|
||||
@@ -11,8 +11,6 @@ use codex_core::ReasoningItemContent;
|
||||
use codex_core::ResponseItem;
|
||||
use codex_core::WireApi;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use codex_otel::otel_event_manager::OtelEventManager;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use futures::StreamExt;
|
||||
@@ -72,26 +70,13 @@ async fn run_request(input: Vec<ResponseItem>) -> Value {
|
||||
let summary = config.model_reasoning_summary;
|
||||
let config = Arc::new(config);
|
||||
|
||||
let conversation_id = ConversationId::new();
|
||||
|
||||
let otel_event_manager = OtelEventManager::new(
|
||||
conversation_id,
|
||||
config.model.as_str(),
|
||||
config.model_family.slug.as_str(),
|
||||
None,
|
||||
Some(AuthMode::ChatGPT),
|
||||
false,
|
||||
"test".to_string(),
|
||||
);
|
||||
|
||||
let client = ModelClient::new(
|
||||
Arc::clone(&config),
|
||||
None,
|
||||
otel_event_manager,
|
||||
provider,
|
||||
effort,
|
||||
summary,
|
||||
conversation_id,
|
||||
ConversationId::new(),
|
||||
);
|
||||
|
||||
let mut prompt = Prompt::default();
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use std::sync::Arc;
|
||||
use tracing_test::traced_test;
|
||||
|
||||
use codex_core::ContentItem;
|
||||
use codex_core::ModelClient;
|
||||
@@ -9,8 +8,6 @@ use codex_core::ResponseEvent;
|
||||
use codex_core::ResponseItem;
|
||||
use codex_core::WireApi;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use codex_otel::otel_event_manager::OtelEventManager;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use futures::StreamExt;
|
||||
@@ -26,15 +23,11 @@ fn network_disabled() -> bool {
|
||||
}
|
||||
|
||||
async fn run_stream(sse_body: &str) -> Vec<ResponseEvent> {
|
||||
run_stream_with_bytes(sse_body.as_bytes()).await
|
||||
}
|
||||
|
||||
async fn run_stream_with_bytes(sse_body: &[u8]) -> Vec<ResponseEvent> {
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let template = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_bytes(sse_body.to_vec());
|
||||
.set_body_raw(sse_body.to_string(), "text/event-stream");
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/chat/completions"))
|
||||
@@ -70,26 +63,13 @@ async fn run_stream_with_bytes(sse_body: &[u8]) -> Vec<ResponseEvent> {
|
||||
let summary = config.model_reasoning_summary;
|
||||
let config = Arc::new(config);
|
||||
|
||||
let conversation_id = ConversationId::new();
|
||||
|
||||
let otel_event_manager = OtelEventManager::new(
|
||||
conversation_id,
|
||||
config.model.as_str(),
|
||||
config.model_family.slug.as_str(),
|
||||
None,
|
||||
Some(AuthMode::ChatGPT),
|
||||
false,
|
||||
"test".to_string(),
|
||||
);
|
||||
|
||||
let client = ModelClient::new(
|
||||
Arc::clone(&config),
|
||||
None,
|
||||
otel_event_manager,
|
||||
provider,
|
||||
effort,
|
||||
summary,
|
||||
conversation_id,
|
||||
ConversationId::new(),
|
||||
);
|
||||
|
||||
let mut prompt = Prompt::default();
|
||||
@@ -109,8 +89,7 @@ async fn run_stream_with_bytes(sse_body: &[u8]) -> Vec<ResponseEvent> {
|
||||
while let Some(event) = stream.next().await {
|
||||
match event {
|
||||
Ok(ev) => events.push(ev),
|
||||
// We still collect the error to exercise telemetry and complete the task.
|
||||
Err(_e) => break,
|
||||
Err(e) => panic!("stream event error: {e}"),
|
||||
}
|
||||
}
|
||||
events
|
||||
@@ -339,88 +318,3 @@ async fn streams_reasoning_before_tool_call() {
|
||||
|
||||
assert!(matches!(events[3], ResponseEvent::Completed { .. }));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[traced_test]
|
||||
async fn chat_sse_emits_failed_on_parse_error() {
|
||||
if network_disabled() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let sse_body = concat!("data: not-json\n\n", "data: [DONE]\n\n");
|
||||
|
||||
let _ = run_stream(sse_body).await;
|
||||
|
||||
logs_assert(|lines: &[&str]| {
|
||||
lines
|
||||
.iter()
|
||||
.find(|line| {
|
||||
line.contains("codex.api_request") && line.contains("http.response.status_code=200")
|
||||
})
|
||||
.map(|_| Ok(()))
|
||||
.unwrap_or(Err("cannot find codex.api_request event".to_string()))
|
||||
});
|
||||
|
||||
logs_assert(|lines: &[&str]| {
|
||||
lines
|
||||
.iter()
|
||||
.find(|line| {
|
||||
line.contains("codex.sse_event")
|
||||
&& line.contains("error.message")
|
||||
&& line.contains("Failed to parse SSE event")
|
||||
})
|
||||
.map(|_| Ok(()))
|
||||
.unwrap_or(Err("cannot find SSE event".to_string()))
|
||||
});
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[traced_test]
|
||||
async fn chat_sse_done_chunk_emits_event() {
|
||||
if network_disabled() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let sse_body = concat!("data: [DONE]\n\n",);
|
||||
|
||||
let _ = run_stream(sse_body).await;
|
||||
|
||||
logs_assert(|lines: &[&str]| {
|
||||
lines
|
||||
.iter()
|
||||
.find(|line| line.contains("codex.sse_event") && line.contains("event.kind=message"))
|
||||
.map(|_| Ok(()))
|
||||
.unwrap_or(Err("cannot find SSE event".to_string()))
|
||||
});
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[traced_test]
|
||||
async fn chat_sse_emits_error_on_invalid_utf8() {
|
||||
if network_disabled() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let _ = run_stream_with_bytes(b"data: \x80\x80\n\n").await;
|
||||
|
||||
logs_assert(|lines: &[&str]| {
|
||||
lines
|
||||
.iter()
|
||||
.find(|line| {
|
||||
line.contains("codex.sse_event")
|
||||
&& line.contains("error.message")
|
||||
&& line.contains("UTF8 error: invalid utf-8 sequence of 1 bytes from index 0")
|
||||
})
|
||||
.map(|_| Ok(()))
|
||||
.unwrap_or(Err("cannot find SSE event".to_string()))
|
||||
});
|
||||
}
|
||||
|
||||
@@ -11,4 +11,3 @@ codex-core = { path = "../.." }
|
||||
serde_json = "1"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1", features = ["time"] }
|
||||
wiremock = "0.6"
|
||||
|
||||
@@ -7,8 +7,6 @@ use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::ConfigToml;
|
||||
|
||||
pub mod responses;
|
||||
|
||||
/// Returns a default `Config` whose on-disk state is confined to the provided
|
||||
/// temporary directory. Using a per-test directory keeps tests hermetic and
|
||||
/// avoids clobbering a developer’s real `~/.codex`.
|
||||
|
||||
@@ -1,133 +0,0 @@
|
||||
use serde_json::Value;
|
||||
use wiremock::BodyPrintLimit;
|
||||
use wiremock::Mock;
|
||||
use wiremock::MockServer;
|
||||
use wiremock::ResponseTemplate;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
/// Build an SSE stream body from a list of JSON events.
|
||||
pub fn sse(events: Vec<Value>) -> String {
|
||||
use std::fmt::Write as _;
|
||||
let mut out = String::new();
|
||||
for ev in events {
|
||||
let kind = ev.get("type").and_then(|v| v.as_str()).unwrap();
|
||||
writeln!(&mut out, "event: {kind}").unwrap();
|
||||
if !ev.as_object().map(|o| o.len() == 1).unwrap_or(false) {
|
||||
write!(&mut out, "data: {ev}\n\n").unwrap();
|
||||
} else {
|
||||
out.push('\n');
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Convenience: SSE event for a completed response with a specific id.
|
||||
pub fn ev_completed(id: &str) -> Value {
|
||||
serde_json::json!({
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": id,
|
||||
"usage": {"input_tokens":0,"input_tokens_details":null,"output_tokens":0,"output_tokens_details":null,"total_tokens":0}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn ev_completed_with_tokens(id: &str, total_tokens: u64) -> Value {
|
||||
serde_json::json!({
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": id,
|
||||
"usage": {
|
||||
"input_tokens": total_tokens,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": total_tokens
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Convenience: SSE event for a single assistant message output item.
|
||||
pub fn ev_assistant_message(id: &str, text: &str) -> Value {
|
||||
serde_json::json!({
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"id": id,
|
||||
"content": [{"type": "output_text", "text": text}]
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn ev_function_call(call_id: &str, name: &str, arguments: &str) -> Value {
|
||||
serde_json::json!({
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "function_call",
|
||||
"call_id": call_id,
|
||||
"name": name,
|
||||
"arguments": arguments
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Convenience: SSE event for an `apply_patch` custom tool call with raw patch
|
||||
/// text. This mirrors the payload produced by the Responses API when the model
|
||||
/// invokes `apply_patch` directly (before we convert it to a function call).
|
||||
pub fn ev_apply_patch_custom_tool_call(call_id: &str, patch: &str) -> Value {
|
||||
serde_json::json!({
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "custom_tool_call",
|
||||
"name": "apply_patch",
|
||||
"input": patch,
|
||||
"call_id": call_id
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Convenience: SSE event for an `apply_patch` function call. The Responses API
|
||||
/// wraps the patch content in a JSON string under the `input` key; we recreate
|
||||
/// the same structure so downstream code exercises the full parsing path.
|
||||
pub fn ev_apply_patch_function_call(call_id: &str, patch: &str) -> Value {
|
||||
let arguments = serde_json::json!({ "input": patch });
|
||||
let arguments = serde_json::to_string(&arguments).expect("serialize apply_patch arguments");
|
||||
|
||||
serde_json::json!({
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "function_call",
|
||||
"name": "apply_patch",
|
||||
"arguments": arguments,
|
||||
"call_id": call_id
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn sse_response(body: String) -> ResponseTemplate {
|
||||
ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(body, "text/event-stream")
|
||||
}
|
||||
|
||||
pub async fn mount_sse_once<M>(server: &MockServer, matcher: M, body: String)
|
||||
where
|
||||
M: wiremock::Match + Send + Sync + 'static,
|
||||
{
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(matcher)
|
||||
.respond_with(sse_response(body))
|
||||
.mount(server)
|
||||
.await;
|
||||
}
|
||||
|
||||
pub async fn start_mock_server() -> MockServer {
|
||||
MockServer::builder()
|
||||
.body_print_limit(BodyPrintLimit::Limited(80_000))
|
||||
.start()
|
||||
.await
|
||||
}
|
||||
@@ -420,6 +420,12 @@ async fn integration_creates_and_checks_session_file() {
|
||||
// Second run: resume should update the existing file.
|
||||
let marker2 = format!("integration-resume-{}", Uuid::new_v4());
|
||||
let prompt2 = format!("echo {marker2}");
|
||||
// Cross‑platform safe resume override. On Windows, backslashes in a TOML string must be escaped
|
||||
// or the parse will fail and the raw literal (including quotes) may be preserved all the way down
|
||||
// to Config, which in turn breaks resume because the path is invalid. Normalize to forward slashes
|
||||
// to sidestep the issue.
|
||||
let resume_path_str = path.to_string_lossy().replace('\\', "/");
|
||||
let resume_override = format!("experimental_resume=\"{resume_path_str}\"");
|
||||
let mut cmd2 = AssertCommand::new("cargo");
|
||||
cmd2.arg("run")
|
||||
.arg("-p")
|
||||
@@ -428,11 +434,11 @@ async fn integration_creates_and_checks_session_file() {
|
||||
.arg("--")
|
||||
.arg("exec")
|
||||
.arg("--skip-git-repo-check")
|
||||
.arg("-c")
|
||||
.arg(&resume_override)
|
||||
.arg("-C")
|
||||
.arg(env!("CARGO_MANIFEST_DIR"))
|
||||
.arg(&prompt2)
|
||||
.arg("resume")
|
||||
.arg("--last");
|
||||
.arg(&prompt2);
|
||||
cmd2.env("CODEX_HOME", home.path())
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
.env("CODEX_RS_SSE_FIXTURE", &fixture)
|
||||
|
||||
@@ -1,36 +1,20 @@
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::ContentItem;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::LocalShellAction;
|
||||
use codex_core::LocalShellExecAction;
|
||||
use codex_core::LocalShellStatus;
|
||||
use codex_core::ModelClient;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::NewConversation;
|
||||
use codex_core::Prompt;
|
||||
use codex_core::ReasoningItemContent;
|
||||
use codex_core::ResponseEvent;
|
||||
use codex_core::ResponseItem;
|
||||
use codex_core::WireApi;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use codex_otel::otel_event_manager::OtelEventManager;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use codex_protocol::models::ReasoningItemReasoningSummary;
|
||||
use codex_protocol::models::WebSearchAction;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::load_sse_fixture_with_id;
|
||||
use core_test_support::wait_for_event;
|
||||
use futures::StreamExt;
|
||||
use serde_json::json;
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
use tempfile::TempDir;
|
||||
use tracing_test::traced_test;
|
||||
use uuid::Uuid;
|
||||
use wiremock::Mock;
|
||||
use wiremock::MockServer;
|
||||
@@ -239,21 +223,20 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = model_provider;
|
||||
config.experimental_resume = Some(session_path.clone());
|
||||
// Also configure user instructions to ensure they are NOT delivered on resume.
|
||||
config.user_instructions = Some("be nice".to_string());
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let auth_manager =
|
||||
codex_core::AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
session_configured,
|
||||
..
|
||||
} = conversation_manager
|
||||
.resume_conversation_from_rollout(config, session_path.clone(), auth_manager)
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("resume conversation");
|
||||
.expect("create new conversation");
|
||||
|
||||
// 1) Assert initial_messages only includes existing EventMsg entries; response items are not converted
|
||||
let initial_msgs = session_configured
|
||||
@@ -294,7 +277,25 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
|
||||
"content": [{ "type": "input_text", "text": "hello" }]
|
||||
}
|
||||
]);
|
||||
assert_eq!(request_body["input"], expected_input);
|
||||
let input_array = request_body
|
||||
.get("input")
|
||||
.and_then(|v| v.as_array())
|
||||
.cloned()
|
||||
.expect("input array in request body");
|
||||
let filtered: Vec<serde_json::Value> = input_array
|
||||
.into_iter()
|
||||
.filter(|item| {
|
||||
let text = item
|
||||
.get("content")
|
||||
.and_then(|c| c.as_array())
|
||||
.and_then(|a| a.first())
|
||||
.and_then(|o| o.get("text"))
|
||||
.and_then(|t| t.as_str())
|
||||
.unwrap_or("");
|
||||
!text.contains("<environment_context>")
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(serde_json::json!(filtered), expected_input);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
@@ -506,6 +507,79 @@ async fn chatgpt_auth_sends_correct_request() {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn prefers_chatgpt_token_when_config_prefers_chatgpt() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Mock server
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let first = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(sse_completed("resp1"), "text/event-stream");
|
||||
|
||||
// Expect ChatGPT base path and correct headers
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(header_regex("Authorization", r"Bearer Access-123"))
|
||||
.and(header_regex("chatgpt-account-id", r"acc-123"))
|
||||
.respond_with(first)
|
||||
.expect(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
// Init session
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
// Write auth.json that contains both API key and ChatGPT tokens for a plan that should prefer ChatGPT.
|
||||
let _jwt = write_auth_json(
|
||||
&codex_home,
|
||||
Some("sk-test-key"),
|
||||
"pro",
|
||||
"Access-123",
|
||||
Some("acc-123"),
|
||||
);
|
||||
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = model_provider;
|
||||
config.preferred_auth_method = AuthMode::ChatGPT;
|
||||
|
||||
let auth_manager =
|
||||
match CodexAuth::from_codex_home(codex_home.path(), config.preferred_auth_method) {
|
||||
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {e}"),
|
||||
};
|
||||
let conversation_manager = ConversationManager::new(auth_manager);
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
..
|
||||
} = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation");
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello".into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
@@ -550,12 +624,14 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
|
||||
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = model_provider;
|
||||
config.preferred_auth_method = AuthMode::ApiKey;
|
||||
|
||||
let auth_manager = match CodexAuth::from_codex_home(codex_home.path()) {
|
||||
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {e}"),
|
||||
};
|
||||
let auth_manager =
|
||||
match CodexAuth::from_codex_home(codex_home.path(), config.preferred_auth_method) {
|
||||
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {e}"),
|
||||
};
|
||||
let conversation_manager = ConversationManager::new(auth_manager);
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
@@ -638,160 +714,6 @@ async fn includes_user_instructions_message_in_request() {
|
||||
assert_message_ends_with(&request_body["input"][1], "</environment_context>");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn azure_responses_request_includes_store_and_reasoning_ids() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let sse_body = concat!(
|
||||
"data: {\"type\":\"response.created\",\"response\":{}}\n\n",
|
||||
"data: {\"type\":\"response.completed\",\"response\":{\"id\":\"resp_1\"}}\n\n",
|
||||
);
|
||||
|
||||
let template = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(sse_body, "text/event-stream");
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/openai/responses"))
|
||||
.respond_with(template)
|
||||
.expect(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let provider = ModelProviderInfo {
|
||||
name: "azure".into(),
|
||||
base_url: Some(format!("{}/openai", server.uri())),
|
||||
env_key: None,
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(5_000),
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider_id = provider.name.clone();
|
||||
config.model_provider = provider.clone();
|
||||
let effort = config.model_reasoning_effort;
|
||||
let summary = config.model_reasoning_summary;
|
||||
let config = Arc::new(config);
|
||||
|
||||
let conversation_id = ConversationId::new();
|
||||
|
||||
let otel_event_manager = OtelEventManager::new(
|
||||
conversation_id,
|
||||
config.model.as_str(),
|
||||
config.model_family.slug.as_str(),
|
||||
None,
|
||||
Some(AuthMode::ChatGPT),
|
||||
false,
|
||||
"test".to_string(),
|
||||
);
|
||||
|
||||
let client = ModelClient::new(
|
||||
Arc::clone(&config),
|
||||
None,
|
||||
otel_event_manager,
|
||||
provider,
|
||||
effort,
|
||||
summary,
|
||||
conversation_id,
|
||||
);
|
||||
|
||||
let mut prompt = Prompt::default();
|
||||
prompt.input.push(ResponseItem::Reasoning {
|
||||
id: "reasoning-id".into(),
|
||||
summary: vec![ReasoningItemReasoningSummary::SummaryText {
|
||||
text: "summary".into(),
|
||||
}],
|
||||
content: Some(vec![ReasoningItemContent::ReasoningText {
|
||||
text: "content".into(),
|
||||
}]),
|
||||
encrypted_content: None,
|
||||
});
|
||||
prompt.input.push(ResponseItem::Message {
|
||||
id: Some("message-id".into()),
|
||||
role: "assistant".into(),
|
||||
content: vec![ContentItem::OutputText {
|
||||
text: "message".into(),
|
||||
}],
|
||||
});
|
||||
prompt.input.push(ResponseItem::WebSearchCall {
|
||||
id: Some("web-search-id".into()),
|
||||
status: Some("completed".into()),
|
||||
action: WebSearchAction::Search {
|
||||
query: "weather".into(),
|
||||
},
|
||||
});
|
||||
prompt.input.push(ResponseItem::FunctionCall {
|
||||
id: Some("function-id".into()),
|
||||
name: "do_thing".into(),
|
||||
arguments: "{}".into(),
|
||||
call_id: "function-call-id".into(),
|
||||
});
|
||||
prompt.input.push(ResponseItem::LocalShellCall {
|
||||
id: Some("local-shell-id".into()),
|
||||
call_id: Some("local-shell-call-id".into()),
|
||||
status: LocalShellStatus::Completed,
|
||||
action: LocalShellAction::Exec(LocalShellExecAction {
|
||||
command: vec!["echo".into(), "hello".into()],
|
||||
timeout_ms: None,
|
||||
working_directory: None,
|
||||
env: None,
|
||||
user: None,
|
||||
}),
|
||||
});
|
||||
prompt.input.push(ResponseItem::CustomToolCall {
|
||||
id: Some("custom-tool-id".into()),
|
||||
status: Some("completed".into()),
|
||||
call_id: "custom-tool-call-id".into(),
|
||||
name: "custom_tool".into(),
|
||||
input: "{}".into(),
|
||||
});
|
||||
|
||||
let mut stream = client
|
||||
.stream(&prompt)
|
||||
.await
|
||||
.expect("responses stream to start");
|
||||
|
||||
while let Some(event) = stream.next().await {
|
||||
if let Ok(ResponseEvent::Completed { .. }) = event {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let requests = server
|
||||
.received_requests()
|
||||
.await
|
||||
.expect("mock server collected requests");
|
||||
assert_eq!(requests.len(), 1, "expected a single request");
|
||||
let body: serde_json::Value = requests[0]
|
||||
.body_json()
|
||||
.expect("request body to be valid JSON");
|
||||
|
||||
assert_eq!(body["store"], serde_json::Value::Bool(true));
|
||||
assert_eq!(body["stream"], serde_json::Value::Bool(true));
|
||||
assert_eq!(body["input"].as_array().map(Vec::len), Some(6));
|
||||
assert_eq!(body["input"][0]["id"].as_str(), Some("reasoning-id"));
|
||||
assert_eq!(body["input"][1]["id"].as_str(), Some("message-id"));
|
||||
assert_eq!(body["input"][2]["id"].as_str(), Some("web-search-id"));
|
||||
assert_eq!(body["input"][3]["id"].as_str(), Some("function-id"));
|
||||
assert_eq!(body["input"][4]["id"].as_str(), Some("local-shell-id"));
|
||||
assert_eq!(body["input"][5]["id"].as_str(), Some("custom-tool-id"));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn azure_overrides_assign_properties_used_for_responses_url() {
|
||||
let existing_env_var_with_random_value = if cfg!(windows) { "USERNAME" } else { "USER" };
|
||||
@@ -1046,34 +968,6 @@ async fn history_dedupes_streamed_and_final_messages_across_turns() {
|
||||
assert_eq!(requests.len(), 3, "expected 3 requests (one per turn)");
|
||||
|
||||
// Replace full-array compare with tail-only raw JSON compare using a single hard-coded value.
|
||||
let r3_tail_expected = json!([
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{"type":"input_text","text":"U1"}]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [{"type":"output_text","text":"Hey there!\n"}]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{"type":"input_text","text":"U2"}]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [{"type":"output_text","text":"Hey there!\n"}]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{"type":"input_text","text":"U3"}]
|
||||
}
|
||||
]);
|
||||
|
||||
let r3_input_array = requests[2]
|
||||
.body_json::<serde_json::Value>()
|
||||
.unwrap()
|
||||
@@ -1081,66 +975,60 @@ async fn history_dedupes_streamed_and_final_messages_across_turns() {
|
||||
.and_then(|v| v.as_array())
|
||||
.cloned()
|
||||
.expect("r3 missing input array");
|
||||
// skipping earlier context and developer messages
|
||||
let tail_len = r3_tail_expected.as_array().unwrap().len();
|
||||
let actual_tail = &r3_input_array[r3_input_array.len() - tail_len..];
|
||||
// We only assert on the last 5 items of the input history for request 3.
|
||||
// With per-turn environment context injected, the last 5 should be:
|
||||
// [env_ctx, U2, assistant("Hey there!\n"), env_ctx, U3]
|
||||
let actual_tail = &r3_input_array[r3_input_array.len() - 5..];
|
||||
|
||||
// env_ctx 1
|
||||
assert_eq!(actual_tail[0]["type"], serde_json::json!("message"));
|
||||
assert_eq!(actual_tail[0]["role"], serde_json::json!("user"));
|
||||
let env_text_1 = &actual_tail[0]["content"][0]["text"];
|
||||
assert!(
|
||||
env_text_1
|
||||
.as_str()
|
||||
.expect("env text should be string")
|
||||
.contains("<environment_context>")
|
||||
);
|
||||
|
||||
// U2
|
||||
assert_eq!(
|
||||
serde_json::Value::Array(actual_tail.to_vec()),
|
||||
r3_tail_expected,
|
||||
"request 3 tail mismatch",
|
||||
actual_tail[1],
|
||||
serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": "U2" } ]
|
||||
})
|
||||
);
|
||||
|
||||
// assistant response
|
||||
assert_eq!(
|
||||
actual_tail[2],
|
||||
serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [ { "type": "output_text", "text": "Hey there!\n" } ]
|
||||
})
|
||||
);
|
||||
|
||||
// env_ctx 2
|
||||
assert_eq!(actual_tail[3]["type"], serde_json::json!("message"));
|
||||
assert_eq!(actual_tail[3]["role"], serde_json::json!("user"));
|
||||
let env_text_2 = &actual_tail[3]["content"][0]["text"];
|
||||
assert!(
|
||||
env_text_2
|
||||
.as_str()
|
||||
.expect("env text should be string")
|
||||
.contains("<environment_context>")
|
||||
);
|
||||
|
||||
// U3
|
||||
assert_eq!(
|
||||
actual_tail[4],
|
||||
serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": "U3" } ]
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[traced_test]
|
||||
async fn responses_api_emits_api_request_event() {
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let first = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(sse_completed("resp1"), "text/event-stream");
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(first)
|
||||
.expect(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = model_provider;
|
||||
config.user_instructions = Some("be nice".to_string());
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello".into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
logs_assert(|lines: &[&str]| {
|
||||
lines
|
||||
.iter()
|
||||
.find(|line| line.contains("codex.api_request"))
|
||||
.map(|_| Ok(()))
|
||||
.unwrap_or_else(|| Err("expected codex.api_request event".to_string()))
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,52 +1,90 @@
|
||||
#![expect(clippy::unwrap_used)]
|
||||
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::NewConversation;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::protocol::ErrorEvent;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::RolloutItem;
|
||||
use codex_core::protocol::RolloutLine;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::wait_for_event;
|
||||
use serde_json::Value;
|
||||
use tempfile::TempDir;
|
||||
use wiremock::Mock;
|
||||
use wiremock::Request;
|
||||
use wiremock::Respond;
|
||||
use wiremock::MockServer;
|
||||
use wiremock::ResponseTemplate;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
use pretty_assertions::assert_eq;
|
||||
use responses::ev_assistant_message;
|
||||
use responses::ev_completed;
|
||||
use responses::sse;
|
||||
use responses::start_mock_server;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
// --- Test helpers -----------------------------------------------------------
|
||||
|
||||
pub(super) const FIRST_REPLY: &str = "FIRST_REPLY";
|
||||
pub(super) const SUMMARY_TEXT: &str = "SUMMARY_ONLY_CONTEXT";
|
||||
pub(super) const SUMMARIZE_TRIGGER: &str = "Start Summarization";
|
||||
/// Build an SSE stream body from a list of JSON events.
|
||||
fn sse(events: Vec<Value>) -> String {
|
||||
use std::fmt::Write as _;
|
||||
let mut out = String::new();
|
||||
for ev in events {
|
||||
let kind = ev.get("type").and_then(|v| v.as_str()).unwrap();
|
||||
writeln!(&mut out, "event: {kind}").unwrap();
|
||||
if !ev.as_object().map(|o| o.len() == 1).unwrap_or(false) {
|
||||
write!(&mut out, "data: {ev}\n\n").unwrap();
|
||||
} else {
|
||||
out.push('\n');
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Convenience: SSE event for a completed response with a specific id.
|
||||
fn ev_completed(id: &str) -> Value {
|
||||
serde_json::json!({
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": id,
|
||||
"usage": {"input_tokens":0,"input_tokens_details":null,"output_tokens":0,"output_tokens_details":null,"total_tokens":0}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Convenience: SSE event for a single assistant message output item.
|
||||
fn ev_assistant_message(id: &str, text: &str) -> Value {
|
||||
serde_json::json!({
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"id": id,
|
||||
"content": [{"type": "output_text", "text": text}]
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn sse_response(body: String) -> ResponseTemplate {
|
||||
ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(body, "text/event-stream")
|
||||
}
|
||||
|
||||
async fn mount_sse_once<M>(server: &MockServer, matcher: M, body: String)
|
||||
where
|
||||
M: wiremock::Match + Send + Sync + 'static,
|
||||
{
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(matcher)
|
||||
.respond_with(sse_response(body))
|
||||
.expect(1)
|
||||
.mount(server)
|
||||
.await;
|
||||
}
|
||||
|
||||
const FIRST_REPLY: &str = "FIRST_REPLY";
|
||||
const SUMMARY_TEXT: &str = "SUMMARY_ONLY_CONTEXT";
|
||||
const SUMMARIZE_TRIGGER: &str = "Start Summarization";
|
||||
const THIRD_USER_MSG: &str = "next turn";
|
||||
const AUTO_SUMMARY_TEXT: &str = "AUTO_SUMMARY";
|
||||
const FIRST_AUTO_MSG: &str = "token limit start";
|
||||
const SECOND_AUTO_MSG: &str = "token limit push";
|
||||
const STILL_TOO_BIG_REPLY: &str = "STILL_TOO_BIG";
|
||||
const MULTI_AUTO_MSG: &str = "multi auto";
|
||||
const SECOND_LARGE_REPLY: &str = "SECOND_LARGE_REPLY";
|
||||
const FIRST_AUTO_SUMMARY: &str = "FIRST_AUTO_SUMMARY";
|
||||
const SECOND_AUTO_SUMMARY: &str = "SECOND_AUTO_SUMMARY";
|
||||
const FINAL_REPLY: &str = "FINAL_REPLY";
|
||||
const DUMMY_FUNCTION_NAME: &str = "unsupported_tool";
|
||||
const DUMMY_CALL_ID: &str = "call-multi-auto";
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn summarize_context_three_requests_and_instructions() {
|
||||
@@ -58,7 +96,7 @@ async fn summarize_context_three_requests_and_instructions() {
|
||||
}
|
||||
|
||||
// Set up a mock server that we can inspect after the run.
|
||||
let server = start_mock_server().await;
|
||||
let server = MockServer::start().await;
|
||||
|
||||
// SSE 1: assistant replies normally so it is recorded in history.
|
||||
let sse1 = sse(vec![
|
||||
@@ -81,19 +119,19 @@ async fn summarize_context_three_requests_and_instructions() {
|
||||
body.contains("\"text\":\"hello world\"")
|
||||
&& !body.contains(&format!("\"text\":\"{SUMMARIZE_TRIGGER}\""))
|
||||
};
|
||||
responses::mount_sse_once(&server, first_matcher, sse1).await;
|
||||
mount_sse_once(&server, first_matcher, sse1).await;
|
||||
|
||||
let second_matcher = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains(&format!("\"text\":\"{SUMMARIZE_TRIGGER}\""))
|
||||
};
|
||||
responses::mount_sse_once(&server, second_matcher, sse2).await;
|
||||
mount_sse_once(&server, second_matcher, sse2).await;
|
||||
|
||||
let third_matcher = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains(&format!("\"text\":\"{THIRD_USER_MSG}\""))
|
||||
};
|
||||
responses::mount_sse_once(&server, third_matcher, sse3).await;
|
||||
mount_sse_once(&server, third_matcher, sse3).await;
|
||||
|
||||
// Build config pointing to the mock server and spawn Codex.
|
||||
let model_provider = ModelProviderInfo {
|
||||
@@ -103,14 +141,12 @@ async fn summarize_context_three_requests_and_instructions() {
|
||||
let home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.model_provider = model_provider;
|
||||
config.model_auto_compact_token_limit = Some(200_000);
|
||||
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
session_configured,
|
||||
..
|
||||
} = conversation_manager.new_conversation(config).await.unwrap();
|
||||
let rollout_path = session_configured.rollout_path;
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.unwrap()
|
||||
.conversation;
|
||||
|
||||
// 1) Normal user input – should hit server once.
|
||||
codex
|
||||
@@ -158,7 +194,7 @@ async fn summarize_context_three_requests_and_instructions() {
|
||||
"summarization should override base instructions"
|
||||
);
|
||||
assert!(
|
||||
instr2.contains("You have exceeded the maximum number of tokens"),
|
||||
instr2.contains("You are a summarization assistant"),
|
||||
"summarization instructions not applied"
|
||||
);
|
||||
|
||||
@@ -169,17 +205,14 @@ async fn summarize_context_three_requests_and_instructions() {
|
||||
assert_eq!(last2.get("type").unwrap().as_str().unwrap(), "message");
|
||||
assert_eq!(last2.get("role").unwrap().as_str().unwrap(), "user");
|
||||
let text2 = last2["content"][0]["text"].as_str().unwrap();
|
||||
assert!(
|
||||
text2.contains(SUMMARIZE_TRIGGER),
|
||||
"expected summarize trigger, got `{text2}`"
|
||||
);
|
||||
assert!(text2.contains(SUMMARIZE_TRIGGER));
|
||||
|
||||
// Third request must contain the refreshed instructions, bridge summary message and new user msg.
|
||||
// Third request must contain only the summary from step 2 as prior history plus new user msg.
|
||||
let input3 = body3.get("input").and_then(|v| v.as_array()).unwrap();
|
||||
println!("third request body: {body3}");
|
||||
assert!(
|
||||
input3.len() >= 3,
|
||||
"expected refreshed context and new user message in third request"
|
||||
input3.len() >= 2,
|
||||
"expected summary + new user message in third request"
|
||||
);
|
||||
|
||||
// Collect all (role, text) message tuples.
|
||||
@@ -195,626 +228,24 @@ async fn summarize_context_three_requests_and_instructions() {
|
||||
}
|
||||
}
|
||||
|
||||
// No previous assistant messages should remain and the new user message is present.
|
||||
// Exactly one assistant message should remain after compaction and the new user message is present.
|
||||
let assistant_count = messages.iter().filter(|(r, _)| r == "assistant").count();
|
||||
assert_eq!(assistant_count, 0, "assistant history should be cleared");
|
||||
assert_eq!(
|
||||
assistant_count, 1,
|
||||
"exactly one assistant message should remain after compaction"
|
||||
);
|
||||
assert!(
|
||||
messages
|
||||
.iter()
|
||||
.any(|(r, t)| r == "user" && t == THIRD_USER_MSG),
|
||||
"third request should include the new user message"
|
||||
);
|
||||
let Some((_, bridge_text)) = messages.iter().find(|(role, text)| {
|
||||
role == "user"
|
||||
&& (text.contains("Here were the user messages")
|
||||
|| text.contains("Here are all the user messages"))
|
||||
&& text.contains(SUMMARY_TEXT)
|
||||
}) else {
|
||||
panic!("expected a bridge message containing the summary");
|
||||
};
|
||||
assert!(
|
||||
bridge_text.contains("hello world"),
|
||||
"bridge should capture earlier user messages"
|
||||
!messages.iter().any(|(_, t)| t.contains("hello world")),
|
||||
"third request should not include the original user input"
|
||||
);
|
||||
assert!(
|
||||
!bridge_text.contains(SUMMARIZE_TRIGGER),
|
||||
"bridge text should not echo the summarize trigger"
|
||||
);
|
||||
assert!(
|
||||
!messages
|
||||
.iter()
|
||||
.any(|(_, text)| text.contains(SUMMARIZE_TRIGGER)),
|
||||
!messages.iter().any(|(_, t)| t.contains(SUMMARIZE_TRIGGER)),
|
||||
"third request should not include the summarize trigger"
|
||||
);
|
||||
|
||||
// Shut down Codex to flush rollout entries before inspecting the file.
|
||||
codex.submit(Op::Shutdown).await.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await;
|
||||
|
||||
// Verify rollout contains APITurn entries for each API call and a Compacted entry.
|
||||
println!("rollout path: {}", rollout_path.display());
|
||||
let text = std::fs::read_to_string(&rollout_path).unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"failed to read rollout file {}: {e}",
|
||||
rollout_path.display()
|
||||
)
|
||||
});
|
||||
let mut api_turn_count = 0usize;
|
||||
let mut saw_compacted_summary = false;
|
||||
for line in text.lines() {
|
||||
let trimmed = line.trim();
|
||||
if trimmed.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let Ok(entry): Result<RolloutLine, _> = serde_json::from_str(trimmed) else {
|
||||
continue;
|
||||
};
|
||||
match entry.item {
|
||||
RolloutItem::TurnContext(_) => {
|
||||
api_turn_count += 1;
|
||||
}
|
||||
RolloutItem::Compacted(ci) => {
|
||||
if ci.message == SUMMARY_TEXT {
|
||||
saw_compacted_summary = true;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
assert!(
|
||||
api_turn_count == 3,
|
||||
"expected three APITurn entries in rollout"
|
||||
);
|
||||
assert!(
|
||||
saw_compacted_summary,
|
||||
"expected a Compacted entry containing the summarizer output"
|
||||
);
|
||||
}
|
||||
|
||||
// Windows CI only: bump to 4 workers to prevent SSE/event starvation and test timeouts.
|
||||
#[cfg_attr(windows, tokio::test(flavor = "multi_thread", worker_threads = 4))]
|
||||
#[cfg_attr(not(windows), tokio::test(flavor = "multi_thread", worker_threads = 2))]
|
||||
async fn auto_compact_runs_after_token_limit_hit() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let sse1 = sse(vec![
|
||||
ev_assistant_message("m1", FIRST_REPLY),
|
||||
responses::ev_completed_with_tokens("r1", 70_000),
|
||||
]);
|
||||
|
||||
let sse2 = sse(vec![
|
||||
ev_assistant_message("m2", "SECOND_REPLY"),
|
||||
responses::ev_completed_with_tokens("r2", 330_000),
|
||||
]);
|
||||
|
||||
let sse3 = sse(vec![
|
||||
ev_assistant_message("m3", AUTO_SUMMARY_TEXT),
|
||||
responses::ev_completed_with_tokens("r3", 200),
|
||||
]);
|
||||
|
||||
let first_matcher = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains(FIRST_AUTO_MSG)
|
||||
&& !body.contains(SECOND_AUTO_MSG)
|
||||
&& !body.contains("You have exceeded the maximum number of tokens")
|
||||
};
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(first_matcher)
|
||||
.respond_with(responses::sse_response(sse1))
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let second_matcher = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains(SECOND_AUTO_MSG)
|
||||
&& body.contains(FIRST_AUTO_MSG)
|
||||
&& !body.contains("You have exceeded the maximum number of tokens")
|
||||
};
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(second_matcher)
|
||||
.respond_with(responses::sse_response(sse2))
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let third_matcher = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains("You have exceeded the maximum number of tokens")
|
||||
};
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(third_matcher)
|
||||
.respond_with(responses::sse_response(sse3))
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.model_provider = model_provider;
|
||||
config.model_auto_compact_token_limit = Some(200_000);
|
||||
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.unwrap()
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: FIRST_AUTO_MSG.into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: SECOND_AUTO_MSG.into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
// wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let requests = server.received_requests().await.unwrap();
|
||||
assert!(
|
||||
requests.len() >= 3,
|
||||
"auto compact should add at least a third request, got {}",
|
||||
requests.len()
|
||||
);
|
||||
let is_auto_compact = |req: &wiremock::Request| {
|
||||
std::str::from_utf8(&req.body)
|
||||
.unwrap_or("")
|
||||
.contains("You have exceeded the maximum number of tokens")
|
||||
};
|
||||
let auto_compact_count = requests.iter().filter(|req| is_auto_compact(req)).count();
|
||||
assert_eq!(
|
||||
auto_compact_count, 1,
|
||||
"expected exactly one auto compact request"
|
||||
);
|
||||
let auto_compact_index = requests
|
||||
.iter()
|
||||
.enumerate()
|
||||
.find_map(|(idx, req)| is_auto_compact(req).then_some(idx))
|
||||
.expect("auto compact request missing");
|
||||
assert_eq!(
|
||||
auto_compact_index, 2,
|
||||
"auto compact should add a third request"
|
||||
);
|
||||
|
||||
let body3 = requests[auto_compact_index]
|
||||
.body_json::<serde_json::Value>()
|
||||
.unwrap();
|
||||
let instructions = body3
|
||||
.get("instructions")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or_default();
|
||||
assert!(
|
||||
instructions.contains("You have exceeded the maximum number of tokens"),
|
||||
"auto compact should reuse summarization instructions"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn auto_compact_persists_rollout_entries() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let sse1 = sse(vec![
|
||||
ev_assistant_message("m1", FIRST_REPLY),
|
||||
responses::ev_completed_with_tokens("r1", 70_000),
|
||||
]);
|
||||
|
||||
let sse2 = sse(vec![
|
||||
ev_assistant_message("m2", "SECOND_REPLY"),
|
||||
responses::ev_completed_with_tokens("r2", 330_000),
|
||||
]);
|
||||
|
||||
let sse3 = sse(vec![
|
||||
ev_assistant_message("m3", AUTO_SUMMARY_TEXT),
|
||||
responses::ev_completed_with_tokens("r3", 200),
|
||||
]);
|
||||
|
||||
let first_matcher = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains(FIRST_AUTO_MSG)
|
||||
&& !body.contains(SECOND_AUTO_MSG)
|
||||
&& !body.contains("You have exceeded the maximum number of tokens")
|
||||
};
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(first_matcher)
|
||||
.respond_with(responses::sse_response(sse1))
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let second_matcher = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains(SECOND_AUTO_MSG)
|
||||
&& body.contains(FIRST_AUTO_MSG)
|
||||
&& !body.contains("You have exceeded the maximum number of tokens")
|
||||
};
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(second_matcher)
|
||||
.respond_with(responses::sse_response(sse2))
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let third_matcher = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains("You have exceeded the maximum number of tokens")
|
||||
};
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(third_matcher)
|
||||
.respond_with(responses::sse_response(sse3))
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.model_provider = model_provider;
|
||||
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
session_configured,
|
||||
..
|
||||
} = conversation_manager.new_conversation(config).await.unwrap();
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: FIRST_AUTO_MSG.into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: SECOND_AUTO_MSG.into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
codex.submit(Op::Shutdown).await.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await;
|
||||
|
||||
let rollout_path = session_configured.rollout_path;
|
||||
let text = std::fs::read_to_string(&rollout_path).unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"failed to read rollout file {}: {e}",
|
||||
rollout_path.display()
|
||||
)
|
||||
});
|
||||
|
||||
let mut turn_context_count = 0usize;
|
||||
for line in text.lines() {
|
||||
let trimmed = line.trim();
|
||||
if trimmed.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let Ok(entry): Result<RolloutLine, _> = serde_json::from_str(trimmed) else {
|
||||
continue;
|
||||
};
|
||||
match entry.item {
|
||||
RolloutItem::TurnContext(_) => {
|
||||
turn_context_count += 1;
|
||||
}
|
||||
RolloutItem::Compacted(_) => {}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
assert!(
|
||||
turn_context_count >= 2,
|
||||
"expected at least two turn context entries, got {turn_context_count}"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn auto_compact_stops_after_failed_attempt() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let sse1 = sse(vec![
|
||||
ev_assistant_message("m1", FIRST_REPLY),
|
||||
responses::ev_completed_with_tokens("r1", 500),
|
||||
]);
|
||||
|
||||
let sse2 = sse(vec![
|
||||
ev_assistant_message("m2", SUMMARY_TEXT),
|
||||
responses::ev_completed_with_tokens("r2", 50),
|
||||
]);
|
||||
|
||||
let sse3 = sse(vec![
|
||||
ev_assistant_message("m3", STILL_TOO_BIG_REPLY),
|
||||
responses::ev_completed_with_tokens("r3", 500),
|
||||
]);
|
||||
|
||||
let first_matcher = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains(FIRST_AUTO_MSG)
|
||||
&& !body.contains("You have exceeded the maximum number of tokens")
|
||||
};
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(first_matcher)
|
||||
.respond_with(responses::sse_response(sse1.clone()))
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let second_matcher = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains("You have exceeded the maximum number of tokens")
|
||||
};
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(second_matcher)
|
||||
.respond_with(responses::sse_response(sse2.clone()))
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let third_matcher = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
!body.contains("You have exceeded the maximum number of tokens")
|
||||
&& body.contains(SUMMARY_TEXT)
|
||||
};
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(third_matcher)
|
||||
.respond_with(responses::sse_response(sse3.clone()))
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.model_provider = model_provider;
|
||||
config.model_auto_compact_token_limit = Some(200);
|
||||
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.unwrap()
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: FIRST_AUTO_MSG.into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let error_event = wait_for_event(&codex, |ev| matches!(ev, EventMsg::Error(_))).await;
|
||||
let EventMsg::Error(ErrorEvent { message }) = error_event else {
|
||||
panic!("expected error event");
|
||||
};
|
||||
assert!(
|
||||
message.contains("limit"),
|
||||
"error message should include limit information: {message}"
|
||||
);
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let requests = server.received_requests().await.unwrap();
|
||||
assert_eq!(
|
||||
requests.len(),
|
||||
3,
|
||||
"auto compact should attempt at most one summarization before erroring"
|
||||
);
|
||||
|
||||
let last_body = requests[2].body_json::<serde_json::Value>().unwrap();
|
||||
let instructions = last_body
|
||||
.get("instructions")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or_default();
|
||||
assert!(
|
||||
!instructions.contains("You have exceeded the maximum number of tokens"),
|
||||
"third request should be the follow-up turn, not another summarization"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn auto_compact_allows_multiple_attempts_when_interleaved_with_other_turn_events() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let sse1 = sse(vec![
|
||||
ev_assistant_message("m1", FIRST_REPLY),
|
||||
responses::ev_completed_with_tokens("r1", 500),
|
||||
]);
|
||||
let sse2 = sse(vec![
|
||||
ev_assistant_message("m2", FIRST_AUTO_SUMMARY),
|
||||
responses::ev_completed_with_tokens("r2", 50),
|
||||
]);
|
||||
let sse3 = sse(vec![
|
||||
responses::ev_function_call(DUMMY_CALL_ID, DUMMY_FUNCTION_NAME, "{}"),
|
||||
responses::ev_completed_with_tokens("r3", 150),
|
||||
]);
|
||||
let sse4 = sse(vec![
|
||||
ev_assistant_message("m4", SECOND_LARGE_REPLY),
|
||||
responses::ev_completed_with_tokens("r4", 450),
|
||||
]);
|
||||
let sse5 = sse(vec![
|
||||
ev_assistant_message("m5", SECOND_AUTO_SUMMARY),
|
||||
responses::ev_completed_with_tokens("r5", 60),
|
||||
]);
|
||||
let sse6 = sse(vec![
|
||||
ev_assistant_message("m6", FINAL_REPLY),
|
||||
responses::ev_completed_with_tokens("r6", 120),
|
||||
]);
|
||||
|
||||
#[derive(Clone)]
|
||||
struct SeqResponder {
|
||||
bodies: Arc<Vec<String>>,
|
||||
calls: Arc<AtomicUsize>,
|
||||
requests: Arc<Mutex<Vec<Vec<u8>>>>,
|
||||
}
|
||||
|
||||
impl SeqResponder {
|
||||
fn new(bodies: Vec<String>) -> Self {
|
||||
Self {
|
||||
bodies: Arc::new(bodies),
|
||||
calls: Arc::new(AtomicUsize::new(0)),
|
||||
requests: Arc::new(Mutex::new(Vec::new())),
|
||||
}
|
||||
}
|
||||
|
||||
fn recorded_requests(&self) -> Vec<Vec<u8>> {
|
||||
self.requests.lock().unwrap().clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Respond for SeqResponder {
|
||||
fn respond(&self, req: &Request) -> ResponseTemplate {
|
||||
let idx = self.calls.fetch_add(1, Ordering::SeqCst);
|
||||
self.requests.lock().unwrap().push(req.body.clone());
|
||||
let body = self
|
||||
.bodies
|
||||
.get(idx)
|
||||
.unwrap_or_else(|| panic!("unexpected request index {idx}"))
|
||||
.clone();
|
||||
ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(body, "text/event-stream")
|
||||
}
|
||||
}
|
||||
|
||||
let responder = SeqResponder::new(vec![sse1, sse2, sse3, sse4, sse5, sse6]);
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(responder.clone())
|
||||
.expect(6)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.model_provider = model_provider;
|
||||
config.model_auto_compact_token_limit = Some(200);
|
||||
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.unwrap()
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: MULTI_AUTO_MSG.into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut auto_compact_lifecycle_events = Vec::new();
|
||||
loop {
|
||||
let event = codex.next_event().await.unwrap();
|
||||
if event.id.starts_with("auto-compact-")
|
||||
&& matches!(
|
||||
event.msg,
|
||||
EventMsg::TaskStarted(_) | EventMsg::TaskComplete(_)
|
||||
)
|
||||
{
|
||||
auto_compact_lifecycle_events.push(event);
|
||||
continue;
|
||||
}
|
||||
if let EventMsg::TaskComplete(_) = &event.msg
|
||||
&& !event.id.starts_with("auto-compact-")
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(
|
||||
auto_compact_lifecycle_events.is_empty(),
|
||||
"auto compact should not emit task lifecycle events"
|
||||
);
|
||||
|
||||
let request_bodies: Vec<String> = responder
|
||||
.recorded_requests()
|
||||
.into_iter()
|
||||
.map(|body| String::from_utf8(body).unwrap_or_default())
|
||||
.collect();
|
||||
assert_eq!(
|
||||
request_bodies.len(),
|
||||
6,
|
||||
"expected six requests including two auto compactions"
|
||||
);
|
||||
assert!(
|
||||
request_bodies[0].contains(MULTI_AUTO_MSG),
|
||||
"first request should contain the user input"
|
||||
);
|
||||
assert!(
|
||||
request_bodies[1].contains("You have exceeded the maximum number of tokens"),
|
||||
"first auto compact request should use summarization instructions"
|
||||
);
|
||||
assert!(
|
||||
request_bodies[3].contains(&format!("unsupported call: {DUMMY_FUNCTION_NAME}")),
|
||||
"function call output should be sent before the second auto compact"
|
||||
);
|
||||
assert!(
|
||||
request_bodies[4].contains("You have exceeded the maximum number of tokens"),
|
||||
"second auto compact request should reuse summarization instructions"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,837 +0,0 @@
|
||||
#![allow(clippy::expect_used)]
|
||||
|
||||
//! Integration tests that cover compacting, resuming, and forking conversations.
|
||||
//!
|
||||
//! Each test sets up a mocked SSE conversation and drives the conversation through
|
||||
//! a specific sequence of operations. After every operation we capture the
|
||||
//! request payload that Codex would send to the model and assert that the
|
||||
//! model-visible history matches the expected sequence of messages.
|
||||
|
||||
use super::compact::FIRST_REPLY;
|
||||
use super::compact::SUMMARIZE_TRIGGER;
|
||||
use super::compact::SUMMARY_TEXT;
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::CodexConversation;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::NewConversation;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::protocol::ConversationPathResponseEvent;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::responses::ev_assistant_message;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::mount_sse_once;
|
||||
use core_test_support::responses::sse;
|
||||
use core_test_support::wait_for_event;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::Value;
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
use tempfile::TempDir;
|
||||
use wiremock::MockServer;
|
||||
|
||||
const AFTER_SECOND_RESUME: &str = "AFTER_SECOND_RESUME";
|
||||
|
||||
fn network_disabled() -> bool {
|
||||
std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok()
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
/// Scenario: compact an initial conversation, resume it, fork one turn back, and
|
||||
/// ensure the model-visible history matches expectations at each request.
|
||||
async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
if network_disabled() {
|
||||
println!("Skipping test because network is disabled in this sandbox");
|
||||
return;
|
||||
}
|
||||
|
||||
// 1. Arrange mocked SSE responses for the initial compact/resume/fork flow.
|
||||
let server = MockServer::start().await;
|
||||
mount_initial_flow(&server).await;
|
||||
|
||||
// 2. Start a new conversation and drive it through the compact/resume/fork steps.
|
||||
let (_home, config, manager, base) = start_test_conversation(&server).await;
|
||||
|
||||
user_turn(&base, "hello world").await;
|
||||
compact_conversation(&base).await;
|
||||
user_turn(&base, "AFTER_COMPACT").await;
|
||||
let base_path = fetch_conversation_path(&base, "base conversation").await;
|
||||
assert!(
|
||||
base_path.exists(),
|
||||
"compact+resume test expects base path {base_path:?} to exist",
|
||||
);
|
||||
|
||||
let resumed = resume_conversation(&manager, &config, base_path).await;
|
||||
user_turn(&resumed, "AFTER_RESUME").await;
|
||||
let resumed_path = fetch_conversation_path(&resumed, "resumed conversation").await;
|
||||
assert!(
|
||||
resumed_path.exists(),
|
||||
"compact+resume test expects resumed path {resumed_path:?} to exist",
|
||||
);
|
||||
|
||||
let forked = fork_conversation(&manager, &config, resumed_path, 2).await;
|
||||
user_turn(&forked, "AFTER_FORK").await;
|
||||
|
||||
// 3. Capture the requests to the model and validate the history slices.
|
||||
let requests = gather_request_bodies(&server).await;
|
||||
|
||||
// input after compact is a prefix of input after resume/fork
|
||||
let input_after_compact = json!(requests[requests.len() - 3]["input"]);
|
||||
let input_after_resume = json!(requests[requests.len() - 2]["input"]);
|
||||
let input_after_fork = json!(requests[requests.len() - 1]["input"]);
|
||||
|
||||
let compact_arr = input_after_compact
|
||||
.as_array()
|
||||
.expect("input after compact should be an array");
|
||||
let resume_arr = input_after_resume
|
||||
.as_array()
|
||||
.expect("input after resume should be an array");
|
||||
let fork_arr = input_after_fork
|
||||
.as_array()
|
||||
.expect("input after fork should be an array");
|
||||
|
||||
assert!(
|
||||
compact_arr.len() <= resume_arr.len(),
|
||||
"after-resume input should have at least as many items as after-compact",
|
||||
);
|
||||
assert_eq!(compact_arr.as_slice(), &resume_arr[..compact_arr.len()]);
|
||||
|
||||
assert!(
|
||||
compact_arr.len() <= fork_arr.len(),
|
||||
"after-fork input should have at least as many items as after-compact",
|
||||
);
|
||||
assert_eq!(
|
||||
&compact_arr.as_slice()[..compact_arr.len()],
|
||||
&fork_arr[..compact_arr.len()]
|
||||
);
|
||||
|
||||
let prompt = requests[0]["instructions"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let user_instructions = requests[0]["input"][0]["content"][0]["text"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let environment_context = requests[0]["input"][1]["content"][0]["text"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let tool_calls = json!(requests[0]["tools"].as_array());
|
||||
let prompt_cache_key = requests[0]["prompt_cache_key"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let fork_prompt_cache_key = requests[requests.len() - 1]["prompt_cache_key"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let user_turn_1 = json!(
|
||||
{
|
||||
"model": "gpt-5",
|
||||
"instructions": prompt,
|
||||
"input": [
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": user_instructions
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": environment_context
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "hello world"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"tools": tool_calls,
|
||||
"tool_choice": "auto",
|
||||
"parallel_tool_calls": false,
|
||||
"reasoning": {
|
||||
"summary": "auto"
|
||||
},
|
||||
"store": false,
|
||||
"stream": true,
|
||||
"include": [
|
||||
"reasoning.encrypted_content"
|
||||
],
|
||||
"prompt_cache_key": prompt_cache_key
|
||||
});
|
||||
let compact_1 = json!(
|
||||
{
|
||||
"model": "gpt-5",
|
||||
"instructions": "You have exceeded the maximum number of tokens, please stop coding and instead write a short memento message for the next agent. Your note should:
|
||||
- Summarize what you finished and what still needs work. If there was a recent update_plan call, repeat its steps verbatim.
|
||||
- List outstanding TODOs with file paths / line numbers so they're easy to find.
|
||||
- Flag code that needs more tests (edge cases, performance, integration, etc.).
|
||||
- Record any open bugs, quirks, or setup steps that will make it easier for the next agent to pick up where you left off.",
|
||||
"input": [
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": user_instructions
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": environment_context
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "hello world"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{
|
||||
"type": "output_text",
|
||||
"text": "FIRST_REPLY"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "Start Summarization"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"tools": [],
|
||||
"tool_choice": "auto",
|
||||
"parallel_tool_calls": false,
|
||||
"reasoning": {
|
||||
"summary": "auto"
|
||||
},
|
||||
"store": false,
|
||||
"stream": true,
|
||||
"include": [
|
||||
"reasoning.encrypted_content"
|
||||
],
|
||||
"prompt_cache_key": prompt_cache_key
|
||||
});
|
||||
let user_turn_2_after_compact = json!(
|
||||
{
|
||||
"model": "gpt-5",
|
||||
"instructions": prompt,
|
||||
"input": [
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": user_instructions
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": environment_context
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "You were originally given instructions from a user over one or more turns. Here were the user messages:
|
||||
|
||||
hello world
|
||||
|
||||
Another language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:
|
||||
|
||||
SUMMARY_ONLY_CONTEXT"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "AFTER_COMPACT"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"tools": tool_calls,
|
||||
"tool_choice": "auto",
|
||||
"parallel_tool_calls": false,
|
||||
"reasoning": {
|
||||
"summary": "auto"
|
||||
},
|
||||
"store": false,
|
||||
"stream": true,
|
||||
"include": [
|
||||
"reasoning.encrypted_content"
|
||||
],
|
||||
"prompt_cache_key": prompt_cache_key
|
||||
});
|
||||
let usert_turn_3_after_resume = json!(
|
||||
{
|
||||
"model": "gpt-5",
|
||||
"instructions": prompt,
|
||||
"input": [
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": user_instructions
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": environment_context
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "You were originally given instructions from a user over one or more turns. Here were the user messages:
|
||||
|
||||
hello world
|
||||
|
||||
Another language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:
|
||||
|
||||
SUMMARY_ONLY_CONTEXT"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "AFTER_COMPACT"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{
|
||||
"type": "output_text",
|
||||
"text": "AFTER_COMPACT_REPLY"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "AFTER_RESUME"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"tools": tool_calls,
|
||||
"tool_choice": "auto",
|
||||
"parallel_tool_calls": false,
|
||||
"reasoning": {
|
||||
"summary": "auto"
|
||||
},
|
||||
"store": false,
|
||||
"stream": true,
|
||||
"include": [
|
||||
"reasoning.encrypted_content"
|
||||
],
|
||||
"prompt_cache_key": prompt_cache_key
|
||||
});
|
||||
let user_turn_3_after_fork = json!(
|
||||
{
|
||||
"model": "gpt-5",
|
||||
"instructions": prompt,
|
||||
"input": [
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": user_instructions
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": environment_context
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "You were originally given instructions from a user over one or more turns. Here were the user messages:
|
||||
|
||||
hello world
|
||||
|
||||
Another language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:
|
||||
|
||||
SUMMARY_ONLY_CONTEXT"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "AFTER_COMPACT"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{
|
||||
"type": "output_text",
|
||||
"text": "AFTER_COMPACT_REPLY"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "AFTER_FORK"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"tools": tool_calls,
|
||||
"tool_choice": "auto",
|
||||
"parallel_tool_calls": false,
|
||||
"reasoning": {
|
||||
"summary": "auto"
|
||||
},
|
||||
"store": false,
|
||||
"stream": true,
|
||||
"include": [
|
||||
"reasoning.encrypted_content"
|
||||
],
|
||||
"prompt_cache_key": fork_prompt_cache_key
|
||||
});
|
||||
let expected = json!([
|
||||
user_turn_1,
|
||||
compact_1,
|
||||
user_turn_2_after_compact,
|
||||
usert_turn_3_after_resume,
|
||||
user_turn_3_after_fork
|
||||
]);
|
||||
assert_eq!(requests.len(), 5);
|
||||
assert_eq!(json!(requests), expected);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
/// Scenario: after the forked branch is compacted, resuming again should reuse
|
||||
/// the compacted history and only append the new user message.
|
||||
async fn compact_resume_after_second_compaction_preserves_history() {
|
||||
if network_disabled() {
|
||||
println!("Skipping test because network is disabled in this sandbox");
|
||||
return;
|
||||
}
|
||||
|
||||
// 1. Arrange mocked SSE responses for the initial flow plus the second compact.
|
||||
let server = MockServer::start().await;
|
||||
mount_initial_flow(&server).await;
|
||||
mount_second_compact_flow(&server).await;
|
||||
|
||||
// 2. Drive the conversation through compact -> resume -> fork -> compact -> resume.
|
||||
let (_home, config, manager, base) = start_test_conversation(&server).await;
|
||||
|
||||
user_turn(&base, "hello world").await;
|
||||
compact_conversation(&base).await;
|
||||
user_turn(&base, "AFTER_COMPACT").await;
|
||||
let base_path = fetch_conversation_path(&base, "base conversation").await;
|
||||
assert!(
|
||||
base_path.exists(),
|
||||
"second compact test expects base path {base_path:?} to exist",
|
||||
);
|
||||
|
||||
let resumed = resume_conversation(&manager, &config, base_path).await;
|
||||
user_turn(&resumed, "AFTER_RESUME").await;
|
||||
let resumed_path = fetch_conversation_path(&resumed, "resumed conversation").await;
|
||||
assert!(
|
||||
resumed_path.exists(),
|
||||
"second compact test expects resumed path {resumed_path:?} to exist",
|
||||
);
|
||||
|
||||
let forked = fork_conversation(&manager, &config, resumed_path, 3).await;
|
||||
user_turn(&forked, "AFTER_FORK").await;
|
||||
|
||||
compact_conversation(&forked).await;
|
||||
user_turn(&forked, "AFTER_COMPACT_2").await;
|
||||
let forked_path = fetch_conversation_path(&forked, "forked conversation").await;
|
||||
assert!(
|
||||
forked_path.exists(),
|
||||
"second compact test expects forked path {forked_path:?} to exist",
|
||||
);
|
||||
|
||||
let resumed_again = resume_conversation(&manager, &config, forked_path).await;
|
||||
user_turn(&resumed_again, AFTER_SECOND_RESUME).await;
|
||||
|
||||
let requests = gather_request_bodies(&server).await;
|
||||
let input_after_compact = json!(requests[requests.len() - 2]["input"]);
|
||||
let input_after_resume = json!(requests[requests.len() - 1]["input"]);
|
||||
|
||||
// test input after compact before resume is the same as input after resume
|
||||
let compact_input_array = input_after_compact
|
||||
.as_array()
|
||||
.expect("input after compact should be an array");
|
||||
let resume_input_array = input_after_resume
|
||||
.as_array()
|
||||
.expect("input after resume should be an array");
|
||||
assert!(
|
||||
compact_input_array.len() <= resume_input_array.len(),
|
||||
"after-resume input should have at least as many items as after-compact"
|
||||
);
|
||||
assert_eq!(
|
||||
compact_input_array.as_slice(),
|
||||
&resume_input_array[..compact_input_array.len()]
|
||||
);
|
||||
// hard coded test
|
||||
let prompt = requests[0]["instructions"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let user_instructions = requests[0]["input"][0]["content"][0]["text"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let environment_instructions = requests[0]["input"][1]["content"][0]["text"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
|
||||
let expected = json!([
|
||||
{
|
||||
"instructions": prompt,
|
||||
"input": [
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": user_instructions
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": environment_instructions
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "You were originally given instructions from a user over one or more turns. Here were the user messages:\n\nAFTER_FORK\n\nAnother language model started to solve this problem and produced a summary of its thinking process. You also have access to the state of the tools that were used by that language model. Use this to build on the work that has already been done and avoid duplicating work. Here is the summary produced by the other language model, use the information in this summary to assist with your own analysis:\n\nSUMMARY_ONLY_CONTEXT"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "AFTER_COMPACT_2"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "AFTER_SECOND_RESUME"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
]);
|
||||
let last_request_after_2_compacts = json!([{
|
||||
"instructions": requests[requests.len() -1]["instructions"],
|
||||
"input": requests[requests.len() -1]["input"],
|
||||
}]);
|
||||
assert_eq!(expected, last_request_after_2_compacts);
|
||||
}
|
||||
|
||||
fn normalize_line_endings(value: &mut Value) {
|
||||
match value {
|
||||
Value::String(text) => {
|
||||
if text.contains('\r') {
|
||||
*text = text.replace("\r\n", "\n").replace('\r', "\n");
|
||||
}
|
||||
}
|
||||
Value::Array(items) => {
|
||||
for item in items {
|
||||
normalize_line_endings(item);
|
||||
}
|
||||
}
|
||||
Value::Object(map) => {
|
||||
for item in map.values_mut() {
|
||||
normalize_line_endings(item);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
async fn gather_request_bodies(server: &MockServer) -> Vec<Value> {
|
||||
server
|
||||
.received_requests()
|
||||
.await
|
||||
.expect("mock server should not fail")
|
||||
.into_iter()
|
||||
.map(|req| {
|
||||
let mut value = req.body_json::<Value>().expect("valid JSON body");
|
||||
normalize_line_endings(&mut value);
|
||||
value
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
async fn mount_initial_flow(server: &MockServer) {
|
||||
let sse1 = sse(vec![
|
||||
ev_assistant_message("m1", FIRST_REPLY),
|
||||
ev_completed("r1"),
|
||||
]);
|
||||
let sse2 = sse(vec![
|
||||
ev_assistant_message("m2", SUMMARY_TEXT),
|
||||
ev_completed("r2"),
|
||||
]);
|
||||
let sse3 = sse(vec![
|
||||
ev_assistant_message("m3", "AFTER_COMPACT_REPLY"),
|
||||
ev_completed("r3"),
|
||||
]);
|
||||
let sse4 = sse(vec![ev_completed("r4")]);
|
||||
let sse5 = sse(vec![ev_completed("r5")]);
|
||||
|
||||
let match_first = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains("\"text\":\"hello world\"")
|
||||
&& !body.contains(&format!("\"text\":\"{SUMMARIZE_TRIGGER}\""))
|
||||
&& !body.contains("\"text\":\"AFTER_COMPACT\"")
|
||||
&& !body.contains("\"text\":\"AFTER_RESUME\"")
|
||||
&& !body.contains("\"text\":\"AFTER_FORK\"")
|
||||
};
|
||||
mount_sse_once(server, match_first, sse1).await;
|
||||
|
||||
let match_compact = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains(&format!("\"text\":\"{SUMMARIZE_TRIGGER}\""))
|
||||
};
|
||||
mount_sse_once(server, match_compact, sse2).await;
|
||||
|
||||
let match_after_compact = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains("\"text\":\"AFTER_COMPACT\"")
|
||||
&& !body.contains("\"text\":\"AFTER_RESUME\"")
|
||||
&& !body.contains("\"text\":\"AFTER_FORK\"")
|
||||
};
|
||||
mount_sse_once(server, match_after_compact, sse3).await;
|
||||
|
||||
let match_after_resume = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains("\"text\":\"AFTER_RESUME\"")
|
||||
};
|
||||
mount_sse_once(server, match_after_resume, sse4).await;
|
||||
|
||||
let match_after_fork = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains("\"text\":\"AFTER_FORK\"")
|
||||
};
|
||||
mount_sse_once(server, match_after_fork, sse5).await;
|
||||
}
|
||||
|
||||
async fn mount_second_compact_flow(server: &MockServer) {
|
||||
let sse6 = sse(vec![
|
||||
ev_assistant_message("m4", SUMMARY_TEXT),
|
||||
ev_completed("r6"),
|
||||
]);
|
||||
let sse7 = sse(vec![ev_completed("r7")]);
|
||||
|
||||
let match_second_compact = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains(&format!("\"text\":\"{SUMMARIZE_TRIGGER}\"")) && body.contains("AFTER_FORK")
|
||||
};
|
||||
mount_sse_once(server, match_second_compact, sse6).await;
|
||||
|
||||
let match_after_second_resume = |req: &wiremock::Request| {
|
||||
let body = std::str::from_utf8(&req.body).unwrap_or("");
|
||||
body.contains(&format!("\"text\":\"{AFTER_SECOND_RESUME}\""))
|
||||
};
|
||||
mount_sse_once(server, match_after_second_resume, sse7).await;
|
||||
}
|
||||
|
||||
async fn start_test_conversation(
|
||||
server: &MockServer,
|
||||
) -> (TempDir, Config, ConversationManager, Arc<CodexConversation>) {
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
let home = TempDir::new().expect("create temp dir");
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.model_provider = model_provider;
|
||||
|
||||
let manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
|
||||
let NewConversation { conversation, .. } = manager
|
||||
.new_conversation(config.clone())
|
||||
.await
|
||||
.expect("create conversation");
|
||||
|
||||
(home, config, manager, conversation)
|
||||
}
|
||||
|
||||
async fn user_turn(conversation: &Arc<CodexConversation>, text: &str) {
|
||||
conversation
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text { text: text.into() }],
|
||||
})
|
||||
.await
|
||||
.expect("submit user turn");
|
||||
wait_for_event(conversation, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
}
|
||||
|
||||
async fn compact_conversation(conversation: &Arc<CodexConversation>) {
|
||||
conversation
|
||||
.submit(Op::Compact)
|
||||
.await
|
||||
.expect("compact conversation");
|
||||
wait_for_event(conversation, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
}
|
||||
|
||||
async fn fetch_conversation_path(
|
||||
conversation: &Arc<CodexConversation>,
|
||||
context: &str,
|
||||
) -> std::path::PathBuf {
|
||||
conversation
|
||||
.submit(Op::GetPath)
|
||||
.await
|
||||
.expect("request conversation path");
|
||||
match wait_for_event(conversation, |ev| {
|
||||
matches!(ev, EventMsg::ConversationPath(_))
|
||||
})
|
||||
.await
|
||||
{
|
||||
EventMsg::ConversationPath(ConversationPathResponseEvent { path, .. }) => path,
|
||||
_ => panic!("expected ConversationPath event for {context}"),
|
||||
}
|
||||
}
|
||||
|
||||
async fn resume_conversation(
|
||||
manager: &ConversationManager,
|
||||
config: &Config,
|
||||
path: std::path::PathBuf,
|
||||
) -> Arc<CodexConversation> {
|
||||
let auth_manager =
|
||||
codex_core::AuthManager::from_auth_for_testing(CodexAuth::from_api_key("dummy"));
|
||||
let NewConversation { conversation, .. } = manager
|
||||
.resume_conversation_from_rollout(config.clone(), path, auth_manager)
|
||||
.await
|
||||
.expect("resume conversation");
|
||||
conversation
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn fork_conversation(
|
||||
manager: &ConversationManager,
|
||||
config: &Config,
|
||||
path: std::path::PathBuf,
|
||||
nth_user_message: usize,
|
||||
) -> Arc<CodexConversation> {
|
||||
let NewConversation { conversation, .. } = manager
|
||||
.fork_conversation(nth_user_message, config.clone(), path)
|
||||
.await
|
||||
.expect("fork conversation");
|
||||
conversation
|
||||
}
|
||||
@@ -39,7 +39,7 @@ async fn run_test_cmd(tmp: TempDir, cmd: Vec<&str>) -> Result<ExecToolCallOutput
|
||||
|
||||
let policy = SandboxPolicy::new_read_only_policy();
|
||||
|
||||
process_exec_tool_call(params, sandbox_type, &policy, tmp.path(), &None, None).await
|
||||
process_exec_tool_call(params, sandbox_type, &policy, &None, None).await
|
||||
}
|
||||
|
||||
/// Command succeeds with exit code 0 normally
|
||||
|
||||
@@ -2,11 +2,8 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_channel::Receiver;
|
||||
use codex_core::error::CodexErr;
|
||||
use codex_core::error::SandboxErr;
|
||||
use codex_core::exec::ExecParams;
|
||||
use codex_core::exec::SandboxType;
|
||||
use codex_core::exec::StdoutStream;
|
||||
@@ -49,10 +46,9 @@ async fn test_exec_stdout_stream_events_echo() {
|
||||
"printf 'hello-world\n'".to_string(),
|
||||
];
|
||||
|
||||
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
|
||||
let params = ExecParams {
|
||||
command: cmd,
|
||||
cwd: cwd.clone(),
|
||||
cwd: std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")),
|
||||
timeout_ms: Some(5_000),
|
||||
env: HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
@@ -65,7 +61,6 @@ async fn test_exec_stdout_stream_events_echo() {
|
||||
params,
|
||||
SandboxType::None,
|
||||
&policy,
|
||||
cwd.as_path(),
|
||||
&None,
|
||||
Some(stdout_stream),
|
||||
)
|
||||
@@ -101,10 +96,9 @@ async fn test_exec_stderr_stream_events_echo() {
|
||||
"printf 'oops\n' 1>&2".to_string(),
|
||||
];
|
||||
|
||||
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
|
||||
let params = ExecParams {
|
||||
command: cmd,
|
||||
cwd: cwd.clone(),
|
||||
cwd: std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")),
|
||||
timeout_ms: Some(5_000),
|
||||
env: HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
@@ -117,7 +111,6 @@ async fn test_exec_stderr_stream_events_echo() {
|
||||
params,
|
||||
SandboxType::None,
|
||||
&policy,
|
||||
cwd.as_path(),
|
||||
&None,
|
||||
Some(stdout_stream),
|
||||
)
|
||||
@@ -156,10 +149,9 @@ async fn test_aggregated_output_interleaves_in_order() {
|
||||
"printf 'O1\\n'; sleep 0.01; printf 'E1\\n' 1>&2; sleep 0.01; printf 'O2\\n'; sleep 0.01; printf 'E2\\n' 1>&2".to_string(),
|
||||
];
|
||||
|
||||
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
|
||||
let params = ExecParams {
|
||||
command: cmd,
|
||||
cwd: cwd.clone(),
|
||||
cwd: std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")),
|
||||
timeout_ms: Some(5_000),
|
||||
env: HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
@@ -168,16 +160,9 @@ async fn test_aggregated_output_interleaves_in_order() {
|
||||
|
||||
let policy = SandboxPolicy::new_read_only_policy();
|
||||
|
||||
let result = process_exec_tool_call(
|
||||
params,
|
||||
SandboxType::None,
|
||||
&policy,
|
||||
cwd.as_path(),
|
||||
&None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.expect("process_exec_tool_call");
|
||||
let result = process_exec_tool_call(params, SandboxType::None, &policy, &None, None)
|
||||
.await
|
||||
.expect("process_exec_tool_call");
|
||||
|
||||
assert_eq!(result.exit_code, 0);
|
||||
assert_eq!(result.stdout.text, "O1\nO2\n");
|
||||
@@ -185,45 +170,3 @@ async fn test_aggregated_output_interleaves_in_order() {
|
||||
assert_eq!(result.aggregated_output.text, "O1\nE1\nO2\nE2\n");
|
||||
assert_eq!(result.aggregated_output.truncated_after_lines, None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_exec_timeout_returns_partial_output() {
|
||||
let cmd = vec![
|
||||
"/bin/sh".to_string(),
|
||||
"-c".to_string(),
|
||||
"printf 'before\\n'; sleep 2; printf 'after\\n'".to_string(),
|
||||
];
|
||||
|
||||
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
|
||||
let params = ExecParams {
|
||||
command: cmd,
|
||||
cwd: cwd.clone(),
|
||||
timeout_ms: Some(200),
|
||||
env: HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
};
|
||||
|
||||
let policy = SandboxPolicy::new_read_only_policy();
|
||||
|
||||
let result = process_exec_tool_call(
|
||||
params,
|
||||
SandboxType::None,
|
||||
&policy,
|
||||
cwd.as_path(),
|
||||
&None,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
let Err(CodexErr::Sandbox(SandboxErr::Timeout { output })) = result else {
|
||||
panic!("expected timeout error");
|
||||
};
|
||||
|
||||
assert_eq!(output.exit_code, 124);
|
||||
assert_eq!(output.stdout.text, "before\n");
|
||||
assert!(output.stderr.text.is_empty());
|
||||
assert_eq!(output.aggregated_output.text, "before\n");
|
||||
assert!(output.duration >= Duration::from_millis(200));
|
||||
assert!(output.timed_out);
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ use codex_core::ModelProviderInfo;
|
||||
use codex_core::NewConversation;
|
||||
use codex_core::ResponseItem;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::content_items_to_text;
|
||||
use codex_core::is_session_prefix_message;
|
||||
use codex_core::protocol::ConversationPathResponseEvent;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
@@ -106,16 +104,13 @@ async fn fork_conversation_twice_drops_to_first_message() {
|
||||
items
|
||||
};
|
||||
|
||||
// Compute expected prefixes after each fork by truncating base rollout
|
||||
// strictly before the nth user input (0-based).
|
||||
// Compute expected prefixes after each fork by truncating base rollout at nth-from-last user input.
|
||||
let base_items = read_items(&base_path);
|
||||
let find_user_input_positions = |items: &[RolloutItem]| -> Vec<usize> {
|
||||
let mut pos = Vec::new();
|
||||
for (i, it) in items.iter().enumerate() {
|
||||
if let RolloutItem::ResponseItem(ResponseItem::Message { role, content, .. }) = it
|
||||
&& role == "user"
|
||||
&& content_items_to_text(content)
|
||||
.is_some_and(|text| !is_session_prefix_message(&text))
|
||||
{
|
||||
// Consider any user message as an input boundary; recorder stores both EventMsg and ResponseItem.
|
||||
// We specifically look for input items, which are represented as ContentItem::InputText.
|
||||
@@ -131,8 +126,11 @@ async fn fork_conversation_twice_drops_to_first_message() {
|
||||
};
|
||||
let user_inputs = find_user_input_positions(&base_items);
|
||||
|
||||
// After cutting at nth user input (n=1 → second user message), cut strictly before that input.
|
||||
let cut1 = user_inputs.get(1).copied().unwrap_or(0);
|
||||
// After dropping last user input (n=1), cut strictly before that input if present, else empty.
|
||||
let cut1 = user_inputs
|
||||
.get(user_inputs.len().saturating_sub(1))
|
||||
.copied()
|
||||
.unwrap_or(0);
|
||||
let expected_after_first: Vec<RolloutItem> = base_items[..cut1].to_vec();
|
||||
|
||||
// After dropping again (n=1 on fork1), compute expected relative to fork1's rollout.
|
||||
@@ -163,12 +161,12 @@ async fn fork_conversation_twice_drops_to_first_message() {
|
||||
serde_json::to_value(&expected_after_first).unwrap()
|
||||
);
|
||||
|
||||
// Fork again with n=0 → drops the (new) last user message, leaving only the first.
|
||||
// Fork again with n=1 → drops the (new) last user message, leaving only the first.
|
||||
let NewConversation {
|
||||
conversation: codex_fork2,
|
||||
..
|
||||
} = conversation_manager
|
||||
.fork_conversation(0, config_for_fork.clone(), fork1_path.clone())
|
||||
.fork_conversation(1, config_for_fork.clone(), fork1_path.clone())
|
||||
.await
|
||||
.expect("fork 2");
|
||||
|
||||
|
||||
@@ -3,15 +3,11 @@
|
||||
mod cli_stream;
|
||||
mod client;
|
||||
mod compact;
|
||||
mod compact_resume_fork;
|
||||
mod exec;
|
||||
mod exec_stream_events;
|
||||
mod fork_conversation;
|
||||
mod live_cli;
|
||||
mod model_overrides;
|
||||
mod prompt_caching;
|
||||
mod review;
|
||||
mod rollout_list_find;
|
||||
mod seatbelt;
|
||||
mod stream_error_allows_next_turn;
|
||||
mod stream_no_completed;
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol_config_types::ReasoningEffort;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::wait_for_event;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
const CONFIG_TOML: &str = "config.toml";
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn override_turn_context_does_not_persist_when_config_exists() {
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let config_path = codex_home.path().join(CONFIG_TOML);
|
||||
let initial_contents = "model = \"gpt-4o\"\n";
|
||||
tokio::fs::write(&config_path, initial_contents)
|
||||
.await
|
||||
.expect("seed config.toml");
|
||||
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model = "gpt-4o".to_string();
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create conversation")
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::OverrideTurnContext {
|
||||
cwd: None,
|
||||
approval_policy: None,
|
||||
sandbox_policy: None,
|
||||
model: Some("o3".to_string()),
|
||||
effort: Some(Some(ReasoningEffort::High)),
|
||||
summary: None,
|
||||
})
|
||||
.await
|
||||
.expect("submit override");
|
||||
|
||||
codex.submit(Op::Shutdown).await.expect("request shutdown");
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await;
|
||||
|
||||
let contents = tokio::fs::read_to_string(&config_path)
|
||||
.await
|
||||
.expect("read config.toml after override");
|
||||
assert_eq!(contents, initial_contents);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn override_turn_context_does_not_create_config_file() {
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let config_path = codex_home.path().join(CONFIG_TOML);
|
||||
assert!(
|
||||
!config_path.exists(),
|
||||
"test setup should start without config"
|
||||
);
|
||||
|
||||
let config = load_default_config_for_test(&codex_home);
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create conversation")
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::OverrideTurnContext {
|
||||
cwd: None,
|
||||
approval_policy: None,
|
||||
sandbox_policy: None,
|
||||
model: Some("o3".to_string()),
|
||||
effort: Some(Some(ReasoningEffort::Medium)),
|
||||
summary: None,
|
||||
})
|
||||
.await
|
||||
.expect("submit override");
|
||||
|
||||
codex.submit(Op::Shutdown).await.expect("request shutdown");
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await;
|
||||
|
||||
assert!(
|
||||
!config_path.exists(),
|
||||
"override should not create config.toml"
|
||||
);
|
||||
}
|
||||
@@ -12,7 +12,6 @@ use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_core::protocol_config_types::ReasoningEffort;
|
||||
use codex_core::protocol_config_types::ReasoningSummary;
|
||||
use codex_core::shell::Shell;
|
||||
use codex_core::shell::default_user_shell;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::load_sse_fixture_with_id;
|
||||
@@ -24,30 +23,6 @@ use wiremock::ResponseTemplate;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
fn text_user_input(text: String) -> serde_json::Value {
|
||||
serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": text } ]
|
||||
})
|
||||
}
|
||||
|
||||
fn default_env_context_str(cwd: &str, shell: &Shell) -> String {
|
||||
format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>read-only</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
{}</environment_context>"#,
|
||||
cwd,
|
||||
match shell.name() {
|
||||
Some(name) => format!(" <shell>{name}</shell>\n"),
|
||||
None => String::new(),
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
/// Build minimal SSE stream with completed marker using the JSON fixture.
|
||||
fn sse_completed(id: &str) -> String {
|
||||
load_sse_fixture_with_id("tests/fixtures/completed_template.json", id)
|
||||
@@ -216,7 +191,8 @@ async fn prompt_tools_are_consistent_across_requests() {
|
||||
let expected_instructions: &str = include_str!("../../prompt.md");
|
||||
// our internal implementation is responsible for keeping tools in sync
|
||||
// with the OpenAI schema, so we just verify the tool presence here
|
||||
let expected_tools_names: &[&str] = &["shell", "update_plan", "apply_patch", "view_image"];
|
||||
let expected_tools_names: &[&str] =
|
||||
&["unified_exec", "update_plan", "apply_patch", "view_image"];
|
||||
let body0 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
assert_eq!(
|
||||
body0["instructions"],
|
||||
@@ -296,7 +272,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
|
||||
let shell = default_user_shell().await;
|
||||
|
||||
let expected_env_text = format!(
|
||||
let expected_env_text_init = format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
@@ -309,13 +285,28 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
None => String::new(),
|
||||
}
|
||||
);
|
||||
// Per-turn environment context omits the shell tag.
|
||||
let expected_env_text_turn = format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>read-only</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
</environment_context>"#,
|
||||
cwd.path().to_string_lossy(),
|
||||
);
|
||||
let expected_ui_text =
|
||||
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
|
||||
|
||||
let expected_env_msg = serde_json::json!({
|
||||
let expected_env_msg_init = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": expected_env_text } ]
|
||||
"content": [ { "type": "input_text", "text": expected_env_text_init } ]
|
||||
});
|
||||
let expected_env_msg_turn = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": expected_env_text_turn } ]
|
||||
});
|
||||
let expected_ui_msg = serde_json::json!({
|
||||
"type": "message",
|
||||
@@ -331,7 +322,12 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
assert_eq!(
|
||||
body1["input"],
|
||||
serde_json::json!([expected_ui_msg, expected_env_msg, expected_user_message_1])
|
||||
serde_json::json!([
|
||||
expected_ui_msg,
|
||||
expected_env_msg_init,
|
||||
expected_env_msg_turn,
|
||||
expected_user_message_1
|
||||
])
|
||||
);
|
||||
|
||||
let expected_user_message_2 = serde_json::json!({
|
||||
@@ -343,7 +339,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
let expected_body2 = serde_json::json!(
|
||||
[
|
||||
body1["input"].as_array().unwrap().as_slice(),
|
||||
[expected_user_message_2].as_slice(),
|
||||
[expected_env_msg_turn, expected_user_message_2].as_slice(),
|
||||
]
|
||||
.concat()
|
||||
);
|
||||
@@ -412,7 +408,7 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
|
||||
exclude_slash_tmp: true,
|
||||
}),
|
||||
model: Some("o3".to_string()),
|
||||
effort: Some(Some(ReasoningEffort::High)),
|
||||
effort: Some(ReasoningEffort::High),
|
||||
summary: Some(ReasoningSummary::Detailed),
|
||||
})
|
||||
.await
|
||||
@@ -544,7 +540,7 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
|
||||
exclude_slash_tmp: true,
|
||||
},
|
||||
model: "o3".to_string(),
|
||||
effort: Some(ReasoningEffort::High),
|
||||
effort: ReasoningEffort::High,
|
||||
summary: ReasoningSummary::Detailed,
|
||||
})
|
||||
.await
|
||||
@@ -577,12 +573,8 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>workspace-write</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
<writable_roots>
|
||||
<root>{}</root>
|
||||
</writable_roots>
|
||||
</environment_context>"#,
|
||||
new_cwd.path().to_string_lossy(),
|
||||
writable.path().to_string_lossy(),
|
||||
new_cwd.path().to_string_lossy()
|
||||
);
|
||||
let expected_env_msg_2 = serde_json::json!({
|
||||
"type": "message",
|
||||
@@ -598,235 +590,3 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
|
||||
);
|
||||
assert_eq!(body2["input"], expected_body2);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn send_user_turn_with_no_changes_does_not_send_environment_context() {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let sse = sse_completed("resp");
|
||||
let template = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(sse, "text/event-stream");
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(template)
|
||||
.expect(2)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let cwd = TempDir::new().unwrap();
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.cwd = cwd.path().to_path_buf();
|
||||
config.model_provider = model_provider;
|
||||
config.user_instructions = Some("be consistent and helpful".to_string());
|
||||
|
||||
let default_cwd = config.cwd.clone();
|
||||
let default_approval_policy = config.approval_policy;
|
||||
let default_sandbox_policy = config.sandbox_policy.clone();
|
||||
let default_model = config.model.clone();
|
||||
let default_effort = config.model_reasoning_effort;
|
||||
let default_summary = config.model_reasoning_summary;
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello 1".into(),
|
||||
}],
|
||||
cwd: default_cwd.clone(),
|
||||
approval_policy: default_approval_policy,
|
||||
sandbox_policy: default_sandbox_policy.clone(),
|
||||
model: default_model.clone(),
|
||||
effort: default_effort,
|
||||
summary: default_summary,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello 2".into(),
|
||||
}],
|
||||
cwd: default_cwd.clone(),
|
||||
approval_policy: default_approval_policy,
|
||||
sandbox_policy: default_sandbox_policy.clone(),
|
||||
model: default_model.clone(),
|
||||
effort: default_effort,
|
||||
summary: default_summary,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let requests = server.received_requests().await.unwrap();
|
||||
assert_eq!(requests.len(), 2, "expected two POST requests");
|
||||
|
||||
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
|
||||
|
||||
let shell = default_user_shell().await;
|
||||
let expected_ui_text =
|
||||
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
|
||||
let expected_ui_msg = text_user_input(expected_ui_text.to_string());
|
||||
|
||||
let expected_env_msg_1 = text_user_input(default_env_context_str(
|
||||
&cwd.path().to_string_lossy(),
|
||||
&shell,
|
||||
));
|
||||
let expected_user_message_1 = text_user_input("hello 1".to_string());
|
||||
|
||||
let expected_input_1 = serde_json::Value::Array(vec![
|
||||
expected_ui_msg.clone(),
|
||||
expected_env_msg_1.clone(),
|
||||
expected_user_message_1.clone(),
|
||||
]);
|
||||
assert_eq!(body1["input"], expected_input_1);
|
||||
|
||||
let expected_user_message_2 = text_user_input("hello 2".to_string());
|
||||
let expected_input_2 = serde_json::Value::Array(vec![
|
||||
expected_ui_msg,
|
||||
expected_env_msg_1,
|
||||
expected_user_message_1,
|
||||
expected_user_message_2,
|
||||
]);
|
||||
assert_eq!(body2["input"], expected_input_2);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn send_user_turn_with_changes_sends_environment_context() {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let sse = sse_completed("resp");
|
||||
let template = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(sse, "text/event-stream");
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(template)
|
||||
.expect(2)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let cwd = TempDir::new().unwrap();
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.cwd = cwd.path().to_path_buf();
|
||||
config.model_provider = model_provider;
|
||||
config.user_instructions = Some("be consistent and helpful".to_string());
|
||||
|
||||
let default_cwd = config.cwd.clone();
|
||||
let default_approval_policy = config.approval_policy;
|
||||
let default_sandbox_policy = config.sandbox_policy.clone();
|
||||
let default_model = config.model.clone();
|
||||
let default_effort = config.model_reasoning_effort;
|
||||
let default_summary = config.model_reasoning_summary;
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config.clone())
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello 1".into(),
|
||||
}],
|
||||
cwd: default_cwd.clone(),
|
||||
approval_policy: default_approval_policy,
|
||||
sandbox_policy: default_sandbox_policy.clone(),
|
||||
model: default_model,
|
||||
effort: default_effort,
|
||||
summary: default_summary,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello 2".into(),
|
||||
}],
|
||||
cwd: default_cwd.clone(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: "o3".to_string(),
|
||||
effort: Some(ReasoningEffort::High),
|
||||
summary: ReasoningSummary::Detailed,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let requests = server.received_requests().await.unwrap();
|
||||
assert_eq!(requests.len(), 2, "expected two POST requests");
|
||||
|
||||
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
|
||||
|
||||
let shell = default_user_shell().await;
|
||||
let expected_ui_text =
|
||||
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
|
||||
let expected_ui_msg = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": expected_ui_text } ]
|
||||
});
|
||||
let expected_env_text_1 = default_env_context_str(&default_cwd.to_string_lossy(), &shell);
|
||||
let expected_env_msg_1 = text_user_input(expected_env_text_1);
|
||||
let expected_user_message_1 = text_user_input("hello 1".to_string());
|
||||
let expected_input_1 = serde_json::Value::Array(vec![
|
||||
expected_ui_msg.clone(),
|
||||
expected_env_msg_1.clone(),
|
||||
expected_user_message_1.clone(),
|
||||
]);
|
||||
assert_eq!(body1["input"], expected_input_1);
|
||||
|
||||
let expected_env_msg_2 = text_user_input(format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>danger-full-access</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
</environment_context>"#,
|
||||
default_cwd.to_string_lossy()
|
||||
));
|
||||
let expected_user_message_2 = text_user_input("hello 2".to_string());
|
||||
let expected_input_2 = serde_json::Value::Array(vec![
|
||||
expected_ui_msg,
|
||||
expected_env_msg_1,
|
||||
expected_user_message_1,
|
||||
expected_env_msg_2,
|
||||
expected_user_message_2,
|
||||
]);
|
||||
assert_eq!(body2["input"], expected_input_2);
|
||||
}
|
||||
|
||||
@@ -1,701 +0,0 @@
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::CodexConversation;
|
||||
use codex_core::ContentItem;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::REVIEW_PROMPT;
|
||||
use codex_core::ResponseItem;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::protocol::ConversationPathResponseEvent;
|
||||
use codex_core::protocol::ENVIRONMENT_CONTEXT_OPEN_TAG;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::ExitedReviewModeEvent;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::ReviewCodeLocation;
|
||||
use codex_core::protocol::ReviewFinding;
|
||||
use codex_core::protocol::ReviewLineRange;
|
||||
use codex_core::protocol::ReviewOutputEvent;
|
||||
use codex_core::protocol::ReviewRequest;
|
||||
use codex_core::protocol::RolloutItem;
|
||||
use codex_core::protocol::RolloutLine;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::load_sse_fixture_with_id_from_str;
|
||||
use core_test_support::wait_for_event;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tempfile::TempDir;
|
||||
use tokio::io::AsyncWriteExt as _;
|
||||
use uuid::Uuid;
|
||||
use wiremock::Mock;
|
||||
use wiremock::MockServer;
|
||||
use wiremock::ResponseTemplate;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
/// Verify that submitting `Op::Review` spawns a child task and emits
|
||||
/// EnteredReviewMode -> ExitedReviewMode(None) -> TaskComplete
|
||||
/// in that order when the model returns a structured review JSON payload.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn review_op_emits_lifecycle_and_review_output() {
|
||||
// Skip under Codex sandbox network restrictions.
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Start mock Responses API server. Return a single assistant message whose
|
||||
// text is a JSON-encoded ReviewOutputEvent.
|
||||
let review_json = serde_json::json!({
|
||||
"findings": [
|
||||
{
|
||||
"title": "Prefer Stylize helpers",
|
||||
"body": "Use .dim()/.bold() chaining instead of manual Style where possible.",
|
||||
"confidence_score": 0.9,
|
||||
"priority": 1,
|
||||
"code_location": {
|
||||
"absolute_file_path": "/tmp/file.rs",
|
||||
"line_range": {"start": 10, "end": 20}
|
||||
}
|
||||
}
|
||||
],
|
||||
"overall_correctness": "good",
|
||||
"overall_explanation": "All good with some improvements suggested.",
|
||||
"overall_confidence_score": 0.8
|
||||
})
|
||||
.to_string();
|
||||
let sse_template = r#"[
|
||||
{"type":"response.output_item.done", "item":{
|
||||
"type":"message", "role":"assistant",
|
||||
"content":[{"type":"output_text","text":__REVIEW__}]
|
||||
}},
|
||||
{"type":"response.completed", "response": {"id": "__ID__"}}
|
||||
]"#;
|
||||
let review_json_escaped = serde_json::to_string(&review_json).unwrap();
|
||||
let sse_raw = sse_template.replace("__REVIEW__", &review_json_escaped);
|
||||
let server = start_responses_server_with_sse(&sse_raw, 1).await;
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await;
|
||||
|
||||
// Submit review request.
|
||||
codex
|
||||
.submit(Op::Review {
|
||||
review_request: ReviewRequest {
|
||||
prompt: "Please review my changes".to_string(),
|
||||
user_facing_hint: "my changes".to_string(),
|
||||
},
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify lifecycle: Entered -> Exited(Some(review)) -> TaskComplete.
|
||||
let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await;
|
||||
let closed = wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExitedReviewMode(_))).await;
|
||||
let review = match closed {
|
||||
EventMsg::ExitedReviewMode(ev) => ev
|
||||
.review_output
|
||||
.expect("expected ExitedReviewMode with Some(review_output)"),
|
||||
other => panic!("expected ExitedReviewMode(..), got {other:?}"),
|
||||
};
|
||||
|
||||
// Deep compare full structure using PartialEq (floats are f32 on both sides).
|
||||
let expected = ReviewOutputEvent {
|
||||
findings: vec![ReviewFinding {
|
||||
title: "Prefer Stylize helpers".to_string(),
|
||||
body: "Use .dim()/.bold() chaining instead of manual Style where possible.".to_string(),
|
||||
confidence_score: 0.9,
|
||||
priority: 1,
|
||||
code_location: ReviewCodeLocation {
|
||||
absolute_file_path: PathBuf::from("/tmp/file.rs"),
|
||||
line_range: ReviewLineRange { start: 10, end: 20 },
|
||||
},
|
||||
}],
|
||||
overall_correctness: "good".to_string(),
|
||||
overall_explanation: "All good with some improvements suggested.".to_string(),
|
||||
overall_confidence_score: 0.8,
|
||||
};
|
||||
assert_eq!(expected, review);
|
||||
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
// Also verify that a user message with the header and a formatted finding
|
||||
// was recorded back in the parent session's rollout.
|
||||
codex.submit(Op::GetPath).await.unwrap();
|
||||
let history_event =
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ConversationPath(_))).await;
|
||||
let path = match history_event {
|
||||
EventMsg::ConversationPath(ConversationPathResponseEvent { path, .. }) => path,
|
||||
other => panic!("expected ConversationPath event, got {other:?}"),
|
||||
};
|
||||
let text = std::fs::read_to_string(&path).expect("read rollout file");
|
||||
|
||||
let mut saw_header = false;
|
||||
let mut saw_finding_line = false;
|
||||
for line in text.lines() {
|
||||
if line.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
let v: serde_json::Value = serde_json::from_str(line).expect("jsonl line");
|
||||
let rl: RolloutLine = serde_json::from_value(v).expect("rollout line");
|
||||
if let RolloutItem::ResponseItem(ResponseItem::Message { role, content, .. }) = rl.item
|
||||
&& role == "user"
|
||||
{
|
||||
for c in content {
|
||||
if let ContentItem::InputText { text } = c {
|
||||
if text.contains("full review output from reviewer model") {
|
||||
saw_header = true;
|
||||
}
|
||||
if text.contains("- Prefer Stylize helpers — /tmp/file.rs:10-20") {
|
||||
saw_finding_line = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assert!(saw_header, "user header missing from rollout");
|
||||
assert!(
|
||||
saw_finding_line,
|
||||
"formatted finding line missing from rollout"
|
||||
);
|
||||
|
||||
server.verify().await;
|
||||
}
|
||||
|
||||
/// When the model returns plain text that is not JSON, ensure the child
|
||||
/// lifecycle still occurs and the plain text is surfaced via
|
||||
/// ExitedReviewMode(Some(..)) as the overall_explanation.
|
||||
// Windows CI only: bump to 4 workers to prevent SSE/event starvation and test timeouts.
|
||||
#[cfg_attr(windows, tokio::test(flavor = "multi_thread", worker_threads = 4))]
|
||||
#[cfg_attr(not(windows), tokio::test(flavor = "multi_thread", worker_threads = 2))]
|
||||
async fn review_op_with_plain_text_emits_review_fallback() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let sse_raw = r#"[
|
||||
{"type":"response.output_item.done", "item":{
|
||||
"type":"message", "role":"assistant",
|
||||
"content":[{"type":"output_text","text":"just plain text"}]
|
||||
}},
|
||||
{"type":"response.completed", "response": {"id": "__ID__"}}
|
||||
]"#;
|
||||
let server = start_responses_server_with_sse(sse_raw, 1).await;
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await;
|
||||
|
||||
codex
|
||||
.submit(Op::Review {
|
||||
review_request: ReviewRequest {
|
||||
prompt: "Plain text review".to_string(),
|
||||
user_facing_hint: "plain text review".to_string(),
|
||||
},
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await;
|
||||
let closed = wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExitedReviewMode(_))).await;
|
||||
let review = match closed {
|
||||
EventMsg::ExitedReviewMode(ev) => ev
|
||||
.review_output
|
||||
.expect("expected ExitedReviewMode with Some(review_output)"),
|
||||
other => panic!("expected ExitedReviewMode(..), got {other:?}"),
|
||||
};
|
||||
|
||||
// Expect a structured fallback carrying the plain text.
|
||||
let expected = ReviewOutputEvent {
|
||||
overall_explanation: "just plain text".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
assert_eq!(expected, review);
|
||||
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
server.verify().await;
|
||||
}
|
||||
|
||||
/// When the model returns structured JSON in a review, ensure no AgentMessage
|
||||
/// is emitted; the UI consumes the structured result via ExitedReviewMode.
|
||||
// Windows CI only: bump to 4 workers to prevent SSE/event starvation and test timeouts.
|
||||
#[cfg_attr(windows, tokio::test(flavor = "multi_thread", worker_threads = 4))]
|
||||
#[cfg_attr(not(windows), tokio::test(flavor = "multi_thread", worker_threads = 2))]
|
||||
async fn review_does_not_emit_agent_message_on_structured_output() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let review_json = serde_json::json!({
|
||||
"findings": [
|
||||
{
|
||||
"title": "Example",
|
||||
"body": "Structured review output.",
|
||||
"confidence_score": 0.5,
|
||||
"priority": 1,
|
||||
"code_location": {
|
||||
"absolute_file_path": "/tmp/file.rs",
|
||||
"line_range": {"start": 1, "end": 2}
|
||||
}
|
||||
}
|
||||
],
|
||||
"overall_correctness": "ok",
|
||||
"overall_explanation": "ok",
|
||||
"overall_confidence_score": 0.5
|
||||
})
|
||||
.to_string();
|
||||
let sse_template = r#"[
|
||||
{"type":"response.output_item.done", "item":{
|
||||
"type":"message", "role":"assistant",
|
||||
"content":[{"type":"output_text","text":__REVIEW__}]
|
||||
}},
|
||||
{"type":"response.completed", "response": {"id": "__ID__"}}
|
||||
]"#;
|
||||
let review_json_escaped = serde_json::to_string(&review_json).unwrap();
|
||||
let sse_raw = sse_template.replace("__REVIEW__", &review_json_escaped);
|
||||
let server = start_responses_server_with_sse(&sse_raw, 1).await;
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await;
|
||||
|
||||
codex
|
||||
.submit(Op::Review {
|
||||
review_request: ReviewRequest {
|
||||
prompt: "check structured".to_string(),
|
||||
user_facing_hint: "check structured".to_string(),
|
||||
},
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Drain events until TaskComplete; ensure none are AgentMessage.
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::timeout;
|
||||
let mut saw_entered = false;
|
||||
let mut saw_exited = false;
|
||||
loop {
|
||||
let ev = timeout(Duration::from_secs(5), codex.next_event())
|
||||
.await
|
||||
.expect("timeout waiting for event")
|
||||
.expect("stream ended unexpectedly");
|
||||
match ev.msg {
|
||||
EventMsg::TaskComplete(_) => break,
|
||||
EventMsg::AgentMessage(_) => {
|
||||
panic!("unexpected AgentMessage during review with structured output")
|
||||
}
|
||||
EventMsg::EnteredReviewMode(_) => saw_entered = true,
|
||||
EventMsg::ExitedReviewMode(_) => saw_exited = true,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
assert!(saw_entered && saw_exited, "missing review lifecycle events");
|
||||
|
||||
server.verify().await;
|
||||
}
|
||||
|
||||
/// Ensure that when a custom `review_model` is set in the config, the review
|
||||
/// request uses that model (and not the main chat model).
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn review_uses_custom_review_model_from_config() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Minimal stream: just a completed event
|
||||
let sse_raw = r#"[
|
||||
{"type":"response.completed", "response": {"id": "__ID__"}}
|
||||
]"#;
|
||||
let server = start_responses_server_with_sse(sse_raw, 1).await;
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
// Choose a review model different from the main model; ensure it is used.
|
||||
let codex = new_conversation_for_server(&server, &codex_home, |cfg| {
|
||||
cfg.model = "gpt-4.1".to_string();
|
||||
cfg.review_model = "gpt-5".to_string();
|
||||
})
|
||||
.await;
|
||||
|
||||
codex
|
||||
.submit(Op::Review {
|
||||
review_request: ReviewRequest {
|
||||
prompt: "use custom model".to_string(),
|
||||
user_facing_hint: "use custom model".to_string(),
|
||||
},
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Wait for completion
|
||||
let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await;
|
||||
let _closed = wait_for_event(&codex, |ev| {
|
||||
matches!(
|
||||
ev,
|
||||
EventMsg::ExitedReviewMode(ExitedReviewModeEvent {
|
||||
review_output: None
|
||||
})
|
||||
)
|
||||
})
|
||||
.await;
|
||||
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
// Assert the request body model equals the configured review model
|
||||
let request = &server.received_requests().await.unwrap()[0];
|
||||
let body = request.body_json::<serde_json::Value>().unwrap();
|
||||
assert_eq!(body["model"].as_str().unwrap(), "gpt-5");
|
||||
|
||||
server.verify().await;
|
||||
}
|
||||
|
||||
/// When a review session begins, it must not prepend prior chat history from
|
||||
/// the parent session. The request `input` should contain only the review
|
||||
/// prompt from the user.
|
||||
// Windows CI only: bump to 4 workers to prevent SSE/event starvation and test timeouts.
|
||||
#[cfg_attr(windows, tokio::test(flavor = "multi_thread", worker_threads = 4))]
|
||||
#[cfg_attr(not(windows), tokio::test(flavor = "multi_thread", worker_threads = 2))]
|
||||
async fn review_input_isolated_from_parent_history() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Mock server for the single review request
|
||||
let sse_raw = r#"[
|
||||
{"type":"response.completed", "response": {"id": "__ID__"}}
|
||||
]"#;
|
||||
let server = start_responses_server_with_sse(sse_raw, 1).await;
|
||||
|
||||
// Seed a parent session history via resume file with both user + assistant items.
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let session_file = codex_home.path().join("resume.jsonl");
|
||||
{
|
||||
let mut f = tokio::fs::File::create(&session_file).await.unwrap();
|
||||
let convo_id = Uuid::new_v4();
|
||||
// Proper session_meta line (enveloped) with a conversation id
|
||||
let meta_line = serde_json::json!({
|
||||
"timestamp": "2024-01-01T00:00:00.000Z",
|
||||
"type": "session_meta",
|
||||
"payload": {
|
||||
"id": convo_id,
|
||||
"timestamp": "2024-01-01T00:00:00Z",
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
}
|
||||
});
|
||||
f.write_all(format!("{meta_line}\n").as_bytes())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Prior user message (enveloped response_item)
|
||||
let user = codex_protocol::models::ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![codex_protocol::models::ContentItem::InputText {
|
||||
text: "parent: earlier user message".to_string(),
|
||||
}],
|
||||
};
|
||||
let user_json = serde_json::to_value(&user).unwrap();
|
||||
let user_line = serde_json::json!({
|
||||
"timestamp": "2024-01-01T00:00:01.000Z",
|
||||
"type": "response_item",
|
||||
"payload": user_json
|
||||
});
|
||||
f.write_all(format!("{user_line}\n").as_bytes())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Prior assistant message (enveloped response_item)
|
||||
let assistant = codex_protocol::models::ResponseItem::Message {
|
||||
id: None,
|
||||
role: "assistant".to_string(),
|
||||
content: vec![codex_protocol::models::ContentItem::OutputText {
|
||||
text: "parent: assistant reply".to_string(),
|
||||
}],
|
||||
};
|
||||
let assistant_json = serde_json::to_value(&assistant).unwrap();
|
||||
let assistant_line = serde_json::json!({
|
||||
"timestamp": "2024-01-01T00:00:02.000Z",
|
||||
"type": "response_item",
|
||||
"payload": assistant_json
|
||||
});
|
||||
f.write_all(format!("{assistant_line}\n").as_bytes())
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
let codex =
|
||||
resume_conversation_for_server(&server, &codex_home, session_file.clone(), |_| {}).await;
|
||||
|
||||
// Submit review request; it must start fresh (no parent history in `input`).
|
||||
let review_prompt = "Please review only this".to_string();
|
||||
codex
|
||||
.submit(Op::Review {
|
||||
review_request: ReviewRequest {
|
||||
prompt: review_prompt.clone(),
|
||||
user_facing_hint: review_prompt.clone(),
|
||||
},
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await;
|
||||
let _closed = wait_for_event(&codex, |ev| {
|
||||
matches!(
|
||||
ev,
|
||||
EventMsg::ExitedReviewMode(ExitedReviewModeEvent {
|
||||
review_output: None
|
||||
})
|
||||
)
|
||||
})
|
||||
.await;
|
||||
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
// Assert the request `input` contains the environment context followed by the review prompt.
|
||||
let request = &server.received_requests().await.unwrap()[0];
|
||||
let body = request.body_json::<serde_json::Value>().unwrap();
|
||||
let input = body["input"].as_array().expect("input array");
|
||||
assert_eq!(
|
||||
input.len(),
|
||||
2,
|
||||
"expected environment context and review prompt"
|
||||
);
|
||||
|
||||
let env_msg = &input[0];
|
||||
assert_eq!(env_msg["type"].as_str().unwrap(), "message");
|
||||
assert_eq!(env_msg["role"].as_str().unwrap(), "user");
|
||||
let env_text = env_msg["content"][0]["text"].as_str().expect("env text");
|
||||
assert!(
|
||||
env_text.starts_with(ENVIRONMENT_CONTEXT_OPEN_TAG),
|
||||
"environment context must be the first item"
|
||||
);
|
||||
assert!(
|
||||
env_text.contains("<cwd>"),
|
||||
"environment context should include cwd"
|
||||
);
|
||||
|
||||
let review_msg = &input[1];
|
||||
assert_eq!(review_msg["type"].as_str().unwrap(), "message");
|
||||
assert_eq!(review_msg["role"].as_str().unwrap(), "user");
|
||||
assert_eq!(
|
||||
review_msg["content"][0]["text"].as_str().unwrap(),
|
||||
format!("{REVIEW_PROMPT}\n\n---\n\nNow, here's your task: Please review only this",)
|
||||
);
|
||||
|
||||
// Also verify that a user interruption note was recorded in the rollout.
|
||||
codex.submit(Op::GetPath).await.unwrap();
|
||||
let history_event =
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ConversationPath(_))).await;
|
||||
let path = match history_event {
|
||||
EventMsg::ConversationPath(ConversationPathResponseEvent { path, .. }) => path,
|
||||
other => panic!("expected ConversationPath event, got {other:?}"),
|
||||
};
|
||||
let text = std::fs::read_to_string(&path).expect("read rollout file");
|
||||
let mut saw_interruption_message = false;
|
||||
for line in text.lines() {
|
||||
if line.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
let v: serde_json::Value = serde_json::from_str(line).expect("jsonl line");
|
||||
let rl: RolloutLine = serde_json::from_value(v).expect("rollout line");
|
||||
if let RolloutItem::ResponseItem(ResponseItem::Message { role, content, .. }) = rl.item
|
||||
&& role == "user"
|
||||
{
|
||||
for c in content {
|
||||
if let ContentItem::InputText { text } = c
|
||||
&& text.contains("User initiated a review task, but was interrupted.")
|
||||
{
|
||||
saw_interruption_message = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if saw_interruption_message {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert!(
|
||||
saw_interruption_message,
|
||||
"expected user interruption message in rollout"
|
||||
);
|
||||
|
||||
server.verify().await;
|
||||
}
|
||||
|
||||
/// After a review thread finishes, its conversation should not leak into the
|
||||
/// parent session. A subsequent parent turn must not include any review
|
||||
/// messages in its request `input`.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn review_history_does_not_leak_into_parent_session() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Respond to both the review request and the subsequent parent request.
|
||||
let sse_raw = r#"[
|
||||
{"type":"response.output_item.done", "item":{
|
||||
"type":"message", "role":"assistant",
|
||||
"content":[{"type":"output_text","text":"review assistant output"}]
|
||||
}},
|
||||
{"type":"response.completed", "response": {"id": "__ID__"}}
|
||||
]"#;
|
||||
let server = start_responses_server_with_sse(sse_raw, 2).await;
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await;
|
||||
|
||||
// 1) Run a review turn that produces an assistant message (isolated in child).
|
||||
codex
|
||||
.submit(Op::Review {
|
||||
review_request: ReviewRequest {
|
||||
prompt: "Start a review".to_string(),
|
||||
user_facing_hint: "Start a review".to_string(),
|
||||
},
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await;
|
||||
let _closed = wait_for_event(&codex, |ev| {
|
||||
matches!(
|
||||
ev,
|
||||
EventMsg::ExitedReviewMode(ExitedReviewModeEvent {
|
||||
review_output: Some(_)
|
||||
})
|
||||
)
|
||||
})
|
||||
.await;
|
||||
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
// 2) Continue in the parent session; request input must not include any review items.
|
||||
let followup = "back to parent".to_string();
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: followup.clone(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
// Inspect the second request (parent turn) input contents.
|
||||
// Parent turns include session initial messages (user_instructions, environment_context).
|
||||
// Critically, no messages from the review thread should appear.
|
||||
let requests = server.received_requests().await.unwrap();
|
||||
assert_eq!(requests.len(), 2);
|
||||
let body = requests[1].body_json::<serde_json::Value>().unwrap();
|
||||
let input = body["input"].as_array().expect("input array");
|
||||
|
||||
// Must include the followup as the last item for this turn
|
||||
let last = input.last().expect("at least one item in input");
|
||||
assert_eq!(last["role"].as_str().unwrap(), "user");
|
||||
let last_text = last["content"][0]["text"].as_str().unwrap();
|
||||
assert_eq!(last_text, followup);
|
||||
|
||||
// Ensure no review-thread content leaked into the parent request
|
||||
let contains_review_prompt = input
|
||||
.iter()
|
||||
.any(|msg| msg["content"][0]["text"].as_str().unwrap_or_default() == "Start a review");
|
||||
let contains_review_assistant = input.iter().any(|msg| {
|
||||
msg["content"][0]["text"].as_str().unwrap_or_default() == "review assistant output"
|
||||
});
|
||||
assert!(
|
||||
!contains_review_prompt,
|
||||
"review prompt leaked into parent turn input"
|
||||
);
|
||||
assert!(
|
||||
!contains_review_assistant,
|
||||
"review assistant output leaked into parent turn input"
|
||||
);
|
||||
|
||||
server.verify().await;
|
||||
}
|
||||
|
||||
/// Start a mock Responses API server and mount the given SSE stream body.
|
||||
async fn start_responses_server_with_sse(sse_raw: &str, expected_requests: usize) -> MockServer {
|
||||
let server = MockServer::start().await;
|
||||
let sse = load_sse_fixture_with_id_from_str(sse_raw, &Uuid::new_v4().to_string());
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(
|
||||
ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(sse.clone(), "text/event-stream"),
|
||||
)
|
||||
.expect(expected_requests as u64)
|
||||
.mount(&server)
|
||||
.await;
|
||||
server
|
||||
}
|
||||
|
||||
/// Create a conversation configured to talk to the provided mock server.
|
||||
#[expect(clippy::expect_used)]
|
||||
async fn new_conversation_for_server<F>(
|
||||
server: &MockServer,
|
||||
codex_home: &TempDir,
|
||||
mutator: F,
|
||||
) -> Arc<CodexConversation>
|
||||
where
|
||||
F: FnOnce(&mut Config),
|
||||
{
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
let mut config = load_default_config_for_test(codex_home);
|
||||
config.model_provider = model_provider;
|
||||
mutator(&mut config);
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create conversation")
|
||||
.conversation
|
||||
}
|
||||
|
||||
/// Create a conversation resuming from a rollout file, configured to talk to the provided mock server.
|
||||
#[expect(clippy::expect_used)]
|
||||
async fn resume_conversation_for_server<F>(
|
||||
server: &MockServer,
|
||||
codex_home: &TempDir,
|
||||
resume_path: std::path::PathBuf,
|
||||
mutator: F,
|
||||
) -> Arc<CodexConversation>
|
||||
where
|
||||
F: FnOnce(&mut Config),
|
||||
{
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
let mut config = load_default_config_for_test(codex_home);
|
||||
config.model_provider = model_provider;
|
||||
mutator(&mut config);
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let auth_manager =
|
||||
codex_core::AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
|
||||
conversation_manager
|
||||
.resume_conversation_from_rollout(config, resume_path, auth_manager)
|
||||
.await
|
||||
.expect("resume conversation")
|
||||
.conversation
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
#![allow(clippy::unwrap_used, clippy::expect_used)]
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_core::find_conversation_path_by_id_str;
|
||||
use tempfile::TempDir;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Create sessions/YYYY/MM/DD and write a minimal rollout file containing the
|
||||
/// provided conversation id in the SessionMeta line. Returns the absolute path.
|
||||
fn write_minimal_rollout_with_id(codex_home: &TempDir, id: Uuid) -> PathBuf {
|
||||
let sessions = codex_home.path().join("sessions/2024/01/01");
|
||||
std::fs::create_dir_all(&sessions).unwrap();
|
||||
|
||||
let file = sessions.join(format!("rollout-2024-01-01T00-00-00-{id}.jsonl"));
|
||||
let mut f = std::fs::File::create(&file).unwrap();
|
||||
// Minimal first line: session_meta with the id so content search can find it
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
serde_json::json!({
|
||||
"timestamp": "2024-01-01T00:00:00.000Z",
|
||||
"type": "session_meta",
|
||||
"payload": {
|
||||
"id": id,
|
||||
"timestamp": "2024-01-01T00:00:00Z",
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test",
|
||||
"cli_version": "test"
|
||||
}
|
||||
})
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
file
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn find_locates_rollout_file_by_id() {
|
||||
let home = TempDir::new().unwrap();
|
||||
let id = Uuid::new_v4();
|
||||
let expected = write_minimal_rollout_with_id(&home, id);
|
||||
|
||||
let found = find_conversation_path_by_id_str(home.path(), &id.to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(found.unwrap(), expected);
|
||||
}
|
||||
@@ -171,8 +171,6 @@ async fn python_getpwuid_works_under_seatbelt() {
|
||||
|
||||
// ReadOnly is sufficient here since we are only exercising user lookup.
|
||||
let policy = SandboxPolicy::ReadOnly;
|
||||
let command_cwd = std::env::current_dir().expect("getcwd");
|
||||
let sandbox_cwd = command_cwd.clone();
|
||||
|
||||
let mut child = spawn_command_under_seatbelt(
|
||||
vec![
|
||||
@@ -181,9 +179,8 @@ async fn python_getpwuid_works_under_seatbelt() {
|
||||
// Print the passwd struct; success implies lookup worked.
|
||||
"import pwd, os; print(pwd.getpwuid(os.getuid()))".to_string(),
|
||||
],
|
||||
command_cwd,
|
||||
&policy,
|
||||
sandbox_cwd.as_path(),
|
||||
std::env::current_dir().expect("should be able to get current dir"),
|
||||
StdioPolicy::RedirectForShellTool,
|
||||
HashMap::new(),
|
||||
)
|
||||
@@ -219,16 +216,13 @@ fn create_test_scenario(tmp: &TempDir) -> TestScenario {
|
||||
/// Note that `path` must be absolute.
|
||||
async fn touch(path: &Path, policy: &SandboxPolicy) -> bool {
|
||||
assert!(path.is_absolute(), "Path must be absolute: {path:?}");
|
||||
let command_cwd = std::env::current_dir().expect("getcwd");
|
||||
let sandbox_cwd = command_cwd.clone();
|
||||
let mut child = spawn_command_under_seatbelt(
|
||||
vec![
|
||||
"/usr/bin/touch".to_string(),
|
||||
path.to_string_lossy().to_string(),
|
||||
],
|
||||
command_cwd,
|
||||
policy,
|
||||
sandbox_cwd.as_path(),
|
||||
std::env::current_dir().expect("should be able to get current dir"),
|
||||
StdioPolicy::RedirectForShellTool,
|
||||
HashMap::new(),
|
||||
)
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
# Codex MCP Interface [experimental]
|
||||
|
||||
This document describes Codex’s experimental MCP interface: a JSON‑RPC API that runs over the Model Context Protocol (MCP) transport to control a local Codex engine.
|
||||
|
||||
- Status: experimental and subject to change without notice
|
||||
- Server binary: `codex mcp` (or `codex-mcp-server`)
|
||||
- Transport: standard MCP over stdio (JSON‑RPC 2.0, line‑delimited)
|
||||
|
||||
## Overview
|
||||
|
||||
Codex exposes a small set of MCP‑compatible methods to create and manage conversations, send user input, receive live events, and handle approval prompts. The types are defined in `protocol/src/mcp_protocol.rs` and re‑used by the MCP server implementation in `mcp-server/`.
|
||||
|
||||
At a glance:
|
||||
|
||||
- Conversations
|
||||
- `newConversation` → start a Codex session
|
||||
- `sendUserMessage` / `sendUserTurn` → send user input into a conversation
|
||||
- `interruptConversation` → stop the current turn
|
||||
- `listConversations`, `resumeConversation`, `archiveConversation`
|
||||
- Configuration and info
|
||||
- `getUserSavedConfig`, `setDefaultModel`, `getUserAgent`, `userInfo`
|
||||
- Auth
|
||||
- `loginApiKey`, `loginChatGpt`, `cancelLoginChatGpt`, `logoutChatGpt`, `getAuthStatus`
|
||||
- Utilities
|
||||
- `gitDiffToRemote`, `execOneOffCommand`
|
||||
- Approvals (server → client requests)
|
||||
- `applyPatchApproval`, `execCommandApproval`
|
||||
- Notifications (server → client)
|
||||
- `loginChatGptComplete`, `authStatusChange`
|
||||
- `codex/event` stream with agent events
|
||||
|
||||
See code for full type definitions and exact shapes: `protocol/src/mcp_protocol.rs`.
|
||||
|
||||
## Starting the server
|
||||
|
||||
Run Codex as an MCP server and connect an MCP client:
|
||||
|
||||
```bash
|
||||
codex mcp | your_mcp_client
|
||||
```
|
||||
|
||||
For a simple inspection UI, you can also try:
|
||||
|
||||
```bash
|
||||
npx @modelcontextprotocol/inspector codex mcp
|
||||
```
|
||||
|
||||
## Conversations
|
||||
|
||||
Start a new session with optional overrides:
|
||||
|
||||
Request `newConversation` params (subset):
|
||||
|
||||
- `model`: string model id (e.g. "o3", "gpt-5")
|
||||
- `profile`: optional named profile
|
||||
- `cwd`: optional working directory
|
||||
- `approvalPolicy`: `untrusted` | `on-request` | `on-failure` | `never`
|
||||
- `sandbox`: `read-only` | `workspace-write` | `danger-full-access`
|
||||
- `config`: map of additional config overrides
|
||||
- `baseInstructions`: optional instruction override
|
||||
- `includePlanTool` / `includeApplyPatchTool`: booleans
|
||||
|
||||
Response: `{ conversationId, model, reasoningEffort?, rolloutPath }`
|
||||
|
||||
Send input to the active turn:
|
||||
|
||||
- `sendUserMessage` → enqueue items to the conversation
|
||||
- `sendUserTurn` → structured turn with explicit `cwd`, `approvalPolicy`, `sandboxPolicy`, `model`, optional `effort`, and `summary`
|
||||
|
||||
Interrupt a running turn: `interruptConversation`.
|
||||
|
||||
List/resume/archive: `listConversations`, `resumeConversation`, `archiveConversation`.
|
||||
|
||||
## Event stream
|
||||
|
||||
While a conversation runs, the server sends notifications:
|
||||
|
||||
- `codex/event` with the serialized Codex event payload. The shape matches `core/src/protocol.rs`’s `Event` and `EventMsg` types. Some notifications include a `_meta.requestId` to correlate with the originating request.
|
||||
- Auth notifications via method names `loginChatGptComplete` and `authStatusChange`.
|
||||
|
||||
Clients should render events and, when present, surface approval requests (see next section).
|
||||
|
||||
## Approvals (server → client)
|
||||
|
||||
When Codex needs approval to apply changes or run commands, the server issues JSON‑RPC requests to the client:
|
||||
|
||||
- `applyPatchApproval { conversationId, callId, fileChanges, reason?, grantRoot? }`
|
||||
- `execCommandApproval { conversationId, callId, command, cwd, reason? }`
|
||||
|
||||
The client must reply with `{ decision: "allow" | "deny" }` for each request.
|
||||
|
||||
## Auth helpers
|
||||
|
||||
For ChatGPT or API‑key based auth flows, the server exposes helpers:
|
||||
|
||||
- `loginApiKey { apiKey }`
|
||||
- `loginChatGpt` → returns `{ loginId, authUrl }`; browser completes flow; then `loginChatGptComplete` notification follows
|
||||
- `cancelLoginChatGpt { loginId }`, `logoutChatGpt`, `getAuthStatus { includeToken?, refreshToken? }`
|
||||
|
||||
## Example: start and send a message
|
||||
|
||||
```json
|
||||
{ "jsonrpc": "2.0", "id": 1, "method": "newConversation", "params": { "model": "gpt-5", "approvalPolicy": "on-request" } }
|
||||
```
|
||||
|
||||
Server responds:
|
||||
|
||||
```json
|
||||
{ "jsonrpc": "2.0", "id": 1, "result": { "conversationId": "c7b0…", "model": "gpt-5", "rolloutPath": "/path/to/rollout.jsonl" } }
|
||||
```
|
||||
|
||||
Then send input:
|
||||
|
||||
```json
|
||||
{ "jsonrpc": "2.0", "id": 2, "method": "sendUserMessage", "params": { "conversationId": "c7b0…", "items": [{ "type": "text", "text": "Hello Codex" }] } }
|
||||
```
|
||||
|
||||
While processing, the server emits `codex/event` notifications containing agent output, approvals, and status updates.
|
||||
|
||||
## Compatibility and stability
|
||||
|
||||
This interface is experimental. Method names, fields, and event shapes may evolve. For the authoritative schema, consult `protocol/src/mcp_protocol.rs` and the corresponding server wiring in `mcp-server/`.
|
||||
|
||||
@@ -38,8 +38,7 @@ tokio = { version = "1", features = [
|
||||
"signal",
|
||||
] }
|
||||
tracing = { version = "0.1.41", features = ["log"] }
|
||||
tracing-subscriber = { version = "0.3.20", features = ["env-filter"] }
|
||||
opentelemetry-appender-tracing = "0.30.1"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "2"
|
||||
@@ -47,6 +46,4 @@ core_test_support = { path = "../core/tests/common" }
|
||||
libc = "0.2"
|
||||
predicates = "3"
|
||||
tempfile = "3.13.0"
|
||||
uuid = "1"
|
||||
walkdir = "2"
|
||||
wiremock = "0.6"
|
||||
|
||||
@@ -6,10 +6,6 @@ use std::path::PathBuf;
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version)]
|
||||
pub struct Cli {
|
||||
/// Action to perform. If omitted, runs a new non-interactive session.
|
||||
#[command(subcommand)]
|
||||
pub command: Option<Command>,
|
||||
|
||||
/// Optional image(s) to attach to the initial prompt.
|
||||
#[arg(long = "image", short = 'i', value_name = "FILE", value_delimiter = ',', num_args = 1..)]
|
||||
pub images: Vec<PathBuf>,
|
||||
@@ -73,28 +69,6 @@ pub struct Cli {
|
||||
pub prompt: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
pub enum Command {
|
||||
/// Resume a previous session by id or pick the most recent with --last.
|
||||
Resume(ResumeArgs),
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
pub struct ResumeArgs {
|
||||
/// Conversation/session id (UUID). When provided, resumes this session.
|
||||
/// If omitted, use --last to pick the most recent recorded session.
|
||||
#[arg(value_name = "SESSION_ID")]
|
||||
pub session_id: Option<String>,
|
||||
|
||||
/// Resume the most recent recorded session (newest) without specifying an id.
|
||||
#[arg(long = "last", default_value_t = false, conflicts_with = "session_id")]
|
||||
pub last: bool,
|
||||
|
||||
/// Prompt to send after resuming the session. If `-` is used, read from stdin.
|
||||
#[arg(value_name = "PROMPT")]
|
||||
pub prompt: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, ValueEnum)]
|
||||
#[value(rename_all = "kebab-case")]
|
||||
pub enum Color {
|
||||
|
||||
@@ -280,7 +280,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
parsed_cmd: _,
|
||||
}) => {
|
||||
self.call_id_to_command.insert(
|
||||
call_id,
|
||||
call_id.clone(),
|
||||
ExecCommandBegin {
|
||||
command: command.clone(),
|
||||
},
|
||||
@@ -382,7 +382,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
// Store metadata so we can calculate duration later when we
|
||||
// receive the corresponding PatchApplyEnd event.
|
||||
self.call_id_to_patch.insert(
|
||||
call_id,
|
||||
call_id.clone(),
|
||||
PatchApplyBegin {
|
||||
start_time: Instant::now(),
|
||||
auto_approved,
|
||||
@@ -520,7 +520,6 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
let SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model,
|
||||
reasoning_effort: _,
|
||||
history_log_id: _,
|
||||
history_entry_count: _,
|
||||
initial_messages: _,
|
||||
@@ -558,15 +557,10 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
TurnAbortReason::Replaced => {
|
||||
ts_println!(self, "task aborted: replaced by a new task");
|
||||
}
|
||||
TurnAbortReason::ReviewEnded => {
|
||||
ts_println!(self, "task aborted: review ended");
|
||||
}
|
||||
},
|
||||
EventMsg::ShutdownComplete => return CodexStatus::Shutdown,
|
||||
EventMsg::ConversationPath(_) => {}
|
||||
EventMsg::UserMessage(_) => {}
|
||||
EventMsg::EnteredReviewMode(_) => {}
|
||||
EventMsg::ExitedReviewMode(_) => {}
|
||||
}
|
||||
CodexStatus::Running
|
||||
}
|
||||
|
||||
@@ -3,6 +3,10 @@ mod event_processor;
|
||||
mod event_processor_with_human_output;
|
||||
mod event_processor_with_json_output;
|
||||
|
||||
use std::io::IsTerminal;
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub use cli::Cli;
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::BUILT_IN_OSS_MODEL_PROVIDER_ID;
|
||||
@@ -21,24 +25,16 @@ use codex_ollama::DEFAULT_OSS_MODEL;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use event_processor_with_human_output::EventProcessorWithHumanOutput;
|
||||
use event_processor_with_json_output::EventProcessorWithJsonOutput;
|
||||
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
|
||||
use std::io::IsTerminal;
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
||||
use tracing::debug;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::prelude::*;
|
||||
|
||||
use crate::cli::Command as ExecCommand;
|
||||
use crate::event_processor::CodexStatus;
|
||||
use crate::event_processor::EventProcessor;
|
||||
use codex_core::find_conversation_path_by_id_str;
|
||||
|
||||
pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()> {
|
||||
let Cli {
|
||||
command,
|
||||
images,
|
||||
model: model_cli_arg,
|
||||
oss,
|
||||
@@ -55,15 +51,8 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
config_overrides,
|
||||
} = cli;
|
||||
|
||||
// Determine the prompt source (parent or subcommand) and read from stdin if needed.
|
||||
let prompt_arg = match &command {
|
||||
// Allow prompt before the subcommand by falling back to the parent-level prompt
|
||||
// when the Resume subcommand did not provide its own prompt.
|
||||
Some(ExecCommand::Resume(args)) => args.prompt.clone().or(prompt),
|
||||
None => prompt,
|
||||
};
|
||||
|
||||
let prompt = match prompt_arg {
|
||||
// Determine the prompt based on CLI arg and/or stdin.
|
||||
let prompt = match prompt {
|
||||
Some(p) if p != "-" => p,
|
||||
// Either `-` was passed or no positional arg.
|
||||
maybe_dash => {
|
||||
@@ -106,18 +95,19 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
),
|
||||
};
|
||||
|
||||
// Build fmt layer (existing logging) to compose with OTEL layer.
|
||||
// TODO(mbolin): Take a more thoughtful approach to logging.
|
||||
let default_level = "error";
|
||||
|
||||
// Build env_filter separately and attach via with_filter.
|
||||
let env_filter = EnvFilter::try_from_default_env()
|
||||
.or_else(|_| EnvFilter::try_new(default_level))
|
||||
.unwrap_or_else(|_| EnvFilter::new(default_level));
|
||||
|
||||
let fmt_layer = tracing_subscriber::fmt::layer()
|
||||
let _ = tracing_subscriber::fmt()
|
||||
// Fallback to the `default_level` log filter if the environment
|
||||
// variable is not set _or_ contains an invalid value
|
||||
.with_env_filter(
|
||||
EnvFilter::try_from_default_env()
|
||||
.or_else(|_| EnvFilter::try_new(default_level))
|
||||
.unwrap_or_else(|_| EnvFilter::new(default_level)),
|
||||
)
|
||||
.with_ansi(stderr_with_ansi)
|
||||
.with_writer(std::io::stderr)
|
||||
.with_filter(env_filter);
|
||||
.try_init();
|
||||
|
||||
let sandbox_mode = if full_auto {
|
||||
Some(SandboxMode::WorkspaceWrite)
|
||||
@@ -147,7 +137,6 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
// Load configuration and determine approval policy
|
||||
let overrides = ConfigOverrides {
|
||||
model,
|
||||
review_model: None,
|
||||
config_profile,
|
||||
// This CLI is intended to be headless and has no affordances for asking
|
||||
// the user for approval.
|
||||
@@ -173,31 +162,6 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
};
|
||||
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides, overrides)?;
|
||||
|
||||
let otel = codex_core::otel_init::build_provider(&config, env!("CARGO_PKG_VERSION"));
|
||||
|
||||
#[allow(clippy::print_stderr)]
|
||||
let otel = match otel {
|
||||
Ok(otel) => otel,
|
||||
Err(e) => {
|
||||
eprintln!("Could not create otel exporter: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(provider) = otel {
|
||||
let otel_layer = OpenTelemetryTracingBridge::new(&provider.logger).with_filter(
|
||||
tracing_subscriber::filter::filter_fn(codex_core::otel_init::codex_export_filter),
|
||||
);
|
||||
|
||||
let _ = tracing_subscriber::registry()
|
||||
.with(fmt_layer)
|
||||
.with(otel_layer)
|
||||
.try_init();
|
||||
} else {
|
||||
let _ = tracing_subscriber::registry().with(fmt_layer).try_init();
|
||||
}
|
||||
|
||||
let mut event_processor: Box<dyn EventProcessor> = if json_mode {
|
||||
Box::new(EventProcessorWithJsonOutput::new(last_message_file.clone()))
|
||||
} else {
|
||||
@@ -223,31 +187,15 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::new(AuthManager::shared(config.codex_home.clone()));
|
||||
|
||||
// Handle resume subcommand by resolving a rollout path and using explicit resume API.
|
||||
let conversation_manager = ConversationManager::new(AuthManager::shared(
|
||||
config.codex_home.clone(),
|
||||
config.preferred_auth_method,
|
||||
));
|
||||
let NewConversation {
|
||||
conversation_id: _,
|
||||
conversation,
|
||||
session_configured,
|
||||
} = if let Some(ExecCommand::Resume(args)) = command {
|
||||
let resume_path = resolve_resume_path(&config, &args).await?;
|
||||
|
||||
if let Some(path) = resume_path {
|
||||
conversation_manager
|
||||
.resume_conversation_from_rollout(
|
||||
config.clone(),
|
||||
path,
|
||||
AuthManager::shared(config.codex_home.clone()),
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
conversation_manager.new_conversation(config).await?
|
||||
}
|
||||
} else {
|
||||
conversation_manager.new_conversation(config).await?
|
||||
};
|
||||
} = conversation_manager.new_conversation(config).await?;
|
||||
info!("Codex initialized with event: {session_configured:?}");
|
||||
|
||||
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::<Event>();
|
||||
@@ -332,23 +280,3 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn resolve_resume_path(
|
||||
config: &Config,
|
||||
args: &crate::cli::ResumeArgs,
|
||||
) -> anyhow::Result<Option<PathBuf>> {
|
||||
if args.last {
|
||||
match codex_core::RolloutRecorder::list_conversations(&config.codex_home, 1, None).await {
|
||||
Ok(page) => Ok(page.items.first().map(|it| it.path.clone())),
|
||||
Err(e) => {
|
||||
error!("Error listing conversations: {e}");
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
} else if let Some(id_str) = args.session_id.as_deref() {
|
||||
let path = find_conversation_path_by_id_str(&config.codex_home, id_str).await?;
|
||||
Ok(path)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
event: response.created
|
||||
data: {"type":"response.created","response":{"id":"resp1"}}
|
||||
|
||||
event: response.output_item.done
|
||||
data: {"type":"response.output_item.done","item":{"type":"message","role":"assistant","content":[{"type":"output_text","text":"fixture hello"}]}}
|
||||
|
||||
event: response.completed
|
||||
data: {"type":"response.completed","response":{"id":"resp1","output":[]}}
|
||||
|
||||
|
||||
25
codex-rs/exec/tests/fixtures/sse_apply_patch_add.json
vendored
Normal file
25
codex-rs/exec/tests/fixtures/sse_apply_patch_add.json
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "custom_tool_call",
|
||||
"name": "apply_patch",
|
||||
"input": "*** Begin Patch\n*** Add File: test.md\n+Hello world\n*** End Patch",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]
|
||||
25
codex-rs/exec/tests/fixtures/sse_apply_patch_freeform_add.json
vendored
Normal file
25
codex-rs/exec/tests/fixtures/sse_apply_patch_freeform_add.json
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "custom_tool_call",
|
||||
"name": "apply_patch",
|
||||
"input": "*** Begin Patch\n*** Add File: app.py\n+class BaseClass:\n+ def method():\n+ return False\n*** End Patch",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]
|
||||
25
codex-rs/exec/tests/fixtures/sse_apply_patch_freeform_update.json
vendored
Normal file
25
codex-rs/exec/tests/fixtures/sse_apply_patch_freeform_update.json
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "custom_tool_call",
|
||||
"name": "apply_patch",
|
||||
"input": "*** Begin Patch\n*** Update File: app.py\n@@ def method():\n- return False\n+\n+ return True\n*** End Patch",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user