mirror of
https://github.com/openai/codex.git
synced 2026-02-02 15:03:38 +00:00
Compare commits
35 Commits
easong/rea
...
patch-squa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8aa5e7770c | ||
|
|
162e1235a8 | ||
|
|
c09ed74a16 | ||
|
|
65f3528cad | ||
|
|
44262d8fd8 | ||
|
|
95a9938d3a | ||
|
|
f69f07b028 | ||
|
|
8d766088e6 | ||
|
|
87654ec0b7 | ||
|
|
51d9e05de7 | ||
|
|
8068cc75f8 | ||
|
|
acb28bf914 | ||
|
|
97338de578 | ||
|
|
5200b7a95d | ||
|
|
64e6c4afbb | ||
|
|
39db113cc9 | ||
|
|
45bd5ca4b9 | ||
|
|
c13c3dadbf | ||
|
|
8636bff46d | ||
|
|
43809a454e | ||
|
|
5c48600bb3 | ||
|
|
de6559f2ab | ||
|
|
5bcc9d8b77 | ||
|
|
5eab4c7ab4 | ||
|
|
f656e192bf | ||
|
|
ee5ecae7c0 | ||
|
|
58bb2048ac | ||
|
|
ac8a3155d6 | ||
|
|
ace14e8d36 | ||
|
|
2a76a08a9e | ||
|
|
16309d6b68 | ||
|
|
62bd0e3d9d | ||
|
|
a9c68ea270 | ||
|
|
ac58749bd3 | ||
|
|
79cbd2ab1b |
33
.github/workflows/rust-ci.yml
vendored
33
.github/workflows/rust-ci.yml
vendored
@@ -63,6 +63,24 @@ jobs:
|
||||
- name: cargo fmt
|
||||
run: cargo fmt -- --config imports_granularity=Item --check
|
||||
|
||||
cargo_shear:
|
||||
name: cargo shear
|
||||
runs-on: ubuntu-24.04
|
||||
needs: changed
|
||||
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
- uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
|
||||
with:
|
||||
tool: cargo-shear
|
||||
version: 1.5.1
|
||||
- name: cargo shear
|
||||
run: cargo shear
|
||||
|
||||
# --- CI to validate on different os/targets --------------------------------
|
||||
lint_build_test:
|
||||
name: ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
|
||||
@@ -160,12 +178,17 @@ jobs:
|
||||
find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 \
|
||||
| xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo check --profile ${{ matrix.profile }}'
|
||||
|
||||
- name: cargo test
|
||||
- uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
|
||||
with:
|
||||
tool: nextest
|
||||
version: 0.9.103
|
||||
|
||||
- name: tests
|
||||
id: test
|
||||
# `cargo test` takes too long for release builds to run them on every PR
|
||||
# Tests take too long for release builds to run them on every PR.
|
||||
if: ${{ matrix.profile != 'release' }}
|
||||
continue-on-error: true
|
||||
run: cargo test --all-features --target ${{ matrix.target }} --profile ${{ matrix.profile }}
|
||||
run: cargo nextest run --all-features --no-fail-fast --target ${{ matrix.target }}
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
@@ -182,7 +205,7 @@ jobs:
|
||||
# --- Gatherer job that you mark as the ONLY required status -----------------
|
||||
results:
|
||||
name: CI results (required)
|
||||
needs: [changed, general, lint_build_test]
|
||||
needs: [changed, general, cargo_shear, lint_build_test]
|
||||
if: always()
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
@@ -190,6 +213,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
echo "general: ${{ needs.general.result }}"
|
||||
echo "shear : ${{ needs.cargo_shear.result }}"
|
||||
echo "matrix : ${{ needs.lint_build_test.result }}"
|
||||
|
||||
# If nothing relevant changed (PR touching only root README, etc.),
|
||||
@@ -201,4 +225,5 @@ jobs:
|
||||
|
||||
# Otherwise require the jobs to have succeeded
|
||||
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
|
||||
[[ '${{ needs.cargo_shear.result }}' == 'success' ]] || { echo 'cargo_shear failed'; exit 1; }
|
||||
[[ '${{ needs.lint_build_test.result }}' == 'success' ]] || { echo 'matrix failed'; exit 1; }
|
||||
|
||||
815
codex-rs/Cargo.lock
generated
815
codex-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -35,7 +35,7 @@ npx @modelcontextprotocol/inspector codex mcp
|
||||
|
||||
You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS.
|
||||
|
||||
### `codex exec` to run Codex programmatially/non-interactively
|
||||
### `codex exec` to run Codex programmatically/non-interactively
|
||||
|
||||
To run Codex non-interactively, run `codex exec PROMPT` (you can also pass the prompt via `stdin`) and Codex will work on your task until it decides that it is done and exits. Output is printed to the terminal directly. You can set the `RUST_LOG` environment variable to see more about what's going on.
|
||||
|
||||
|
||||
@@ -726,9 +726,9 @@ fn compute_replacements(
|
||||
line_index = start_idx + pattern.len();
|
||||
} else {
|
||||
return Err(ApplyPatchError::ComputeReplacements(format!(
|
||||
"Failed to find expected lines {:?} in {}",
|
||||
chunk.old_lines,
|
||||
path.display()
|
||||
"Failed to find expected lines in {}:\n{}",
|
||||
path.display(),
|
||||
chunk.old_lines.join("\n"),
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ clap = { version = "4", features = ["derive"] }
|
||||
codex-common = { path = "../common", features = ["cli"] }
|
||||
codex-core = { path = "../core" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
reqwest = { version = "0.12", features = ["json", "stream"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
||||
@@ -31,7 +31,7 @@ pub async fn run_apply_command(
|
||||
ConfigOverrides::default(),
|
||||
)?;
|
||||
|
||||
init_chatgpt_token_from_auth(&config.codex_home, &config.responses_originator_header).await?;
|
||||
init_chatgpt_token_from_auth(&config.codex_home).await?;
|
||||
|
||||
let task_response = get_task(&config, apply_cli.task_id).await?;
|
||||
apply_diff_from_task(task_response, cwd).await
|
||||
|
||||
@@ -13,10 +13,10 @@ pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>(
|
||||
path: String,
|
||||
) -> anyhow::Result<T> {
|
||||
let chatgpt_base_url = &config.chatgpt_base_url;
|
||||
init_chatgpt_token_from_auth(&config.codex_home, &config.responses_originator_header).await?;
|
||||
init_chatgpt_token_from_auth(&config.codex_home).await?;
|
||||
|
||||
// Make direct HTTP request to ChatGPT backend API with the token
|
||||
let client = create_client(&config.responses_originator_header);
|
||||
let client = create_client();
|
||||
let url = format!("{chatgpt_base_url}{path}");
|
||||
|
||||
let token =
|
||||
|
||||
@@ -19,11 +19,8 @@ pub fn set_chatgpt_token_data(value: TokenData) {
|
||||
}
|
||||
|
||||
/// Initialize the ChatGPT token from auth.json file
|
||||
pub async fn init_chatgpt_token_from_auth(
|
||||
codex_home: &Path,
|
||||
originator: &str,
|
||||
) -> std::io::Result<()> {
|
||||
let auth = CodexAuth::from_codex_home(codex_home, AuthMode::ChatGPT, originator)?;
|
||||
pub async fn init_chatgpt_token_from_auth(codex_home: &Path) -> std::io::Result<()> {
|
||||
let auth = CodexAuth::from_codex_home(codex_home, AuthMode::ChatGPT)?;
|
||||
if let Some(auth) = auth {
|
||||
let token_data = auth.get_token_data().await?;
|
||||
set_chatgpt_token_data(token_data);
|
||||
|
||||
@@ -12,8 +12,8 @@ use codex_protocol::mcp_protocol::AuthMode;
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub async fn login_with_chatgpt(codex_home: PathBuf, originator: String) -> std::io::Result<()> {
|
||||
let opts = ServerOptions::new(codex_home, CLIENT_ID.to_string(), originator);
|
||||
pub async fn login_with_chatgpt(codex_home: PathBuf) -> std::io::Result<()> {
|
||||
let opts = ServerOptions::new(codex_home, CLIENT_ID.to_string());
|
||||
let server = run_login_server(opts)?;
|
||||
|
||||
eprintln!(
|
||||
@@ -27,12 +27,7 @@ pub async fn login_with_chatgpt(codex_home: PathBuf, originator: String) -> std:
|
||||
pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
|
||||
match login_with_chatgpt(
|
||||
config.codex_home,
|
||||
config.responses_originator_header.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
match login_with_chatgpt(config.codex_home).await {
|
||||
Ok(_) => {
|
||||
eprintln!("Successfully logged in");
|
||||
std::process::exit(0);
|
||||
@@ -65,11 +60,7 @@ pub async fn run_login_with_api_key(
|
||||
pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
|
||||
match CodexAuth::from_codex_home(
|
||||
&config.codex_home,
|
||||
config.preferred_auth_method,
|
||||
&config.responses_originator_header,
|
||||
) {
|
||||
match CodexAuth::from_codex_home(&config.codex_home, config.preferred_auth_method) {
|
||||
Ok(Some(auth)) => match auth.mode {
|
||||
AuthMode::ApiKey => match auth.get_token().await {
|
||||
Ok(api_key) => {
|
||||
|
||||
@@ -40,18 +40,6 @@ struct MultitoolCli {
|
||||
#[clap(flatten)]
|
||||
interactive: TuiCli,
|
||||
|
||||
/// Reasoning effort for the model (maps to model_reasoning_effort).
|
||||
#[arg(long = "reasoning", value_parser = ["minimal", "low", "medium", "high"]) ]
|
||||
reasoning: Option<String>,
|
||||
|
||||
/// Reasoning summary verbosity (maps to model_reasoning_summary).
|
||||
#[arg(long = "reasoning-summary", value_parser = ["auto", "concise", "detailed", "none"]) ]
|
||||
reasoning_summary: Option<String>,
|
||||
|
||||
/// Text verbosity for GPT‑5 models (maps to model_verbosity).
|
||||
#[arg(long = "verbosity", value_parser = ["low", "medium", "high"]) ]
|
||||
verbosity: Option<String>,
|
||||
|
||||
#[clap(subcommand)]
|
||||
subcommand: Option<Subcommand>,
|
||||
}
|
||||
@@ -155,25 +143,7 @@ fn main() -> anyhow::Result<()> {
|
||||
}
|
||||
|
||||
async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()> {
|
||||
let mut cli = MultitoolCli::parse();
|
||||
|
||||
// Synthesize -c overrides from root-level reasoning flags so they flow to
|
||||
// both interactive and subcommand CLIs via prepend_config_flags.
|
||||
if let Some(ref v) = cli.reasoning {
|
||||
cli.config_overrides
|
||||
.raw_overrides
|
||||
.push(format!("model_reasoning_effort=\"{v}\""));
|
||||
}
|
||||
if let Some(ref v) = cli.reasoning_summary {
|
||||
cli.config_overrides
|
||||
.raw_overrides
|
||||
.push(format!("model_reasoning_summary=\"{v}\""));
|
||||
}
|
||||
if let Some(ref v) = cli.verbosity {
|
||||
cli.config_overrides
|
||||
.raw_overrides
|
||||
.push(format!("model_verbosity=\"{v}\""));
|
||||
}
|
||||
let cli = MultitoolCli::parse();
|
||||
|
||||
match cli.subcommand {
|
||||
None => {
|
||||
|
||||
@@ -40,7 +40,6 @@ pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> {
|
||||
let conversation_manager = ConversationManager::new(AuthManager::shared(
|
||||
config.codex_home.clone(),
|
||||
config.preferred_auth_method,
|
||||
config.responses_originator_header.clone(),
|
||||
));
|
||||
let NewConversation {
|
||||
conversation_id: _,
|
||||
|
||||
@@ -26,14 +26,12 @@ eventsource-stream = "0.2.3"
|
||||
futures = "0.3"
|
||||
libc = "0.2.175"
|
||||
mcp-types = { path = "../mcp-types" }
|
||||
mime_guess = "2.0"
|
||||
os_info = "3.12.0"
|
||||
portable-pty = "0.9.0"
|
||||
rand = "0.9"
|
||||
regex-lite = "0.1.7"
|
||||
reqwest = { version = "0.12", features = ["json", "stream"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_bytes = "0.11"
|
||||
serde_json = "1"
|
||||
sha1 = "0.10.6"
|
||||
shlex = "1.3.0"
|
||||
@@ -56,7 +54,7 @@ tracing = { version = "0.1.41", features = ["log"] }
|
||||
tree-sitter = "0.25.9"
|
||||
tree-sitter-bash = "0.25.0"
|
||||
uuid = { version = "1", features = ["serde", "v4"] }
|
||||
whoami = "1.6.1"
|
||||
which = "6"
|
||||
wildmatch = "2.4.0"
|
||||
|
||||
|
||||
@@ -72,9 +70,6 @@ openssl-sys = { version = "*", features = ["vendored"] }
|
||||
[target.aarch64-unknown-linux-musl.dependencies]
|
||||
openssl-sys = { version = "*", features = ["vendored"] }
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies]
|
||||
which = "6"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "2"
|
||||
core_test_support = { path = "tests/common" }
|
||||
@@ -85,3 +80,6 @@ tempfile = "3"
|
||||
tokio-test = "0.4"
|
||||
walkdir = "2.5.0"
|
||||
wiremock = "0.6"
|
||||
|
||||
[package.metadata.cargo-shear]
|
||||
ignored = ["openssl-sys"]
|
||||
|
||||
@@ -75,9 +75,8 @@ impl CodexAuth {
|
||||
pub fn from_codex_home(
|
||||
codex_home: &Path,
|
||||
preferred_auth_method: AuthMode,
|
||||
originator: &str,
|
||||
) -> std::io::Result<Option<CodexAuth>> {
|
||||
load_auth(codex_home, true, preferred_auth_method, originator)
|
||||
load_auth(codex_home, true, preferred_auth_method)
|
||||
}
|
||||
|
||||
pub async fn get_token_data(&self) -> Result<TokenData, std::io::Error> {
|
||||
@@ -173,7 +172,7 @@ impl CodexAuth {
|
||||
mode: AuthMode::ChatGPT,
|
||||
auth_file: PathBuf::new(),
|
||||
auth_dot_json,
|
||||
client: crate::default_client::create_client("codex_cli_rs"),
|
||||
client: crate::default_client::create_client(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,10 +187,7 @@ impl CodexAuth {
|
||||
}
|
||||
|
||||
pub fn from_api_key(api_key: &str) -> Self {
|
||||
Self::from_api_key_with_client(
|
||||
api_key,
|
||||
crate::default_client::create_client(crate::default_client::DEFAULT_ORIGINATOR),
|
||||
)
|
||||
Self::from_api_key_with_client(api_key, crate::default_client::create_client())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,13 +228,12 @@ fn load_auth(
|
||||
codex_home: &Path,
|
||||
include_env_var: bool,
|
||||
preferred_auth_method: AuthMode,
|
||||
originator: &str,
|
||||
) -> std::io::Result<Option<CodexAuth>> {
|
||||
// First, check to see if there is a valid auth.json file. If not, we fall
|
||||
// back to AuthMode::ApiKey using the OPENAI_API_KEY environment variable
|
||||
// (if it is set).
|
||||
let auth_file = get_auth_file(codex_home);
|
||||
let client = crate::default_client::create_client(originator);
|
||||
let client = crate::default_client::create_client();
|
||||
let auth_dot_json = match try_read_auth_json(&auth_file) {
|
||||
Ok(auth) => auth,
|
||||
// If auth.json does not exist, try to read the OPENAI_API_KEY from the
|
||||
@@ -473,7 +468,7 @@ mod tests {
|
||||
auth_dot_json,
|
||||
auth_file: _,
|
||||
..
|
||||
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT, "codex_cli_rs")
|
||||
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(None, api_key);
|
||||
@@ -525,7 +520,7 @@ mod tests {
|
||||
auth_dot_json,
|
||||
auth_file: _,
|
||||
..
|
||||
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT, "codex_cli_rs")
|
||||
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(None, api_key);
|
||||
@@ -576,7 +571,7 @@ mod tests {
|
||||
auth_dot_json,
|
||||
auth_file: _,
|
||||
..
|
||||
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT, "codex_cli_rs")
|
||||
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(Some("sk-test-key".to_string()), api_key);
|
||||
@@ -596,7 +591,7 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let auth = super::load_auth(dir.path(), false, AuthMode::ChatGPT, "codex_cli_rs")
|
||||
let auth = super::load_auth(dir.path(), false, AuthMode::ChatGPT)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(auth.mode, AuthMode::ApiKey);
|
||||
@@ -680,7 +675,6 @@ mod tests {
|
||||
#[derive(Debug)]
|
||||
pub struct AuthManager {
|
||||
codex_home: PathBuf,
|
||||
originator: String,
|
||||
inner: RwLock<CachedAuth>,
|
||||
}
|
||||
|
||||
@@ -689,13 +683,12 @@ impl AuthManager {
|
||||
/// preferred auth method. Errors loading auth are swallowed; `auth()` will
|
||||
/// simply return `None` in that case so callers can treat it as an
|
||||
/// unauthenticated state.
|
||||
pub fn new(codex_home: PathBuf, preferred_auth_mode: AuthMode, originator: String) -> Self {
|
||||
let auth = CodexAuth::from_codex_home(&codex_home, preferred_auth_mode, &originator)
|
||||
pub fn new(codex_home: PathBuf, preferred_auth_mode: AuthMode) -> Self {
|
||||
let auth = CodexAuth::from_codex_home(&codex_home, preferred_auth_mode)
|
||||
.ok()
|
||||
.flatten();
|
||||
Self {
|
||||
codex_home,
|
||||
originator,
|
||||
inner: RwLock::new(CachedAuth {
|
||||
preferred_auth_mode,
|
||||
auth,
|
||||
@@ -712,7 +705,6 @@ impl AuthManager {
|
||||
};
|
||||
Arc::new(Self {
|
||||
codex_home: PathBuf::new(),
|
||||
originator: "codex_cli_rs".to_string(),
|
||||
inner: RwLock::new(cached),
|
||||
})
|
||||
}
|
||||
@@ -734,7 +726,7 @@ impl AuthManager {
|
||||
/// whether the auth value changed.
|
||||
pub fn reload(&self) -> bool {
|
||||
let preferred = self.preferred_auth_method();
|
||||
let new_auth = CodexAuth::from_codex_home(&self.codex_home, preferred, &self.originator)
|
||||
let new_auth = CodexAuth::from_codex_home(&self.codex_home, preferred)
|
||||
.ok()
|
||||
.flatten();
|
||||
if let Ok(mut guard) = self.inner.write() {
|
||||
@@ -755,12 +747,8 @@ impl AuthManager {
|
||||
}
|
||||
|
||||
/// Convenience constructor returning an `Arc` wrapper.
|
||||
pub fn shared(
|
||||
codex_home: PathBuf,
|
||||
preferred_auth_mode: AuthMode,
|
||||
originator: String,
|
||||
) -> Arc<Self> {
|
||||
Arc::new(Self::new(codex_home, preferred_auth_mode, originator))
|
||||
pub fn shared(codex_home: PathBuf, preferred_auth_mode: AuthMode) -> Arc<Self> {
|
||||
Arc::new(Self::new(codex_home, preferred_auth_mode))
|
||||
}
|
||||
|
||||
/// Attempt to refresh the current auth token (if any). On success, reload
|
||||
|
||||
@@ -84,7 +84,7 @@ impl ModelClient {
|
||||
summary: ReasoningSummaryConfig,
|
||||
conversation_id: ConversationId,
|
||||
) -> Self {
|
||||
let client = create_client(&config.responses_originator_header);
|
||||
let client = create_client();
|
||||
|
||||
Self {
|
||||
config,
|
||||
|
||||
@@ -9,6 +9,9 @@ use std::sync::atomic::AtomicU64;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::AuthManager;
|
||||
use crate::config_edit::CONFIG_KEY_EFFORT;
|
||||
use crate::config_edit::CONFIG_KEY_MODEL;
|
||||
use crate::config_edit::persist_non_null_overrides;
|
||||
use crate::event_mapping::map_response_item_to_event_messages;
|
||||
use async_channel::Receiver;
|
||||
use async_channel::Sender;
|
||||
@@ -16,12 +19,14 @@ use codex_apply_patch::ApplyPatchAction;
|
||||
use codex_apply_patch::MaybeApplyPatchVerified;
|
||||
use codex_apply_patch::maybe_parse_apply_patch_verified;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use codex_protocol::protocol::ConversationHistoryResponseEvent;
|
||||
use codex_protocol::protocol::ConversationPathResponseEvent;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::TaskStartedEvent;
|
||||
use codex_protocol::protocol::TurnAbortReason;
|
||||
use codex_protocol::protocol::TurnAbortedEvent;
|
||||
use futures::prelude::*;
|
||||
use mcp_types::CallToolResult;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde_json;
|
||||
use tokio::sync::oneshot;
|
||||
@@ -44,7 +49,6 @@ use crate::client_common::ResponseEvent;
|
||||
use crate::config::Config;
|
||||
use crate::config_types::ShellEnvironmentPolicy;
|
||||
use crate::conversation_history::ConversationHistory;
|
||||
use crate::conversation_manager::InitialHistory;
|
||||
use crate::environment_context::EnvironmentContext;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result as CodexResult;
|
||||
@@ -109,6 +113,7 @@ use crate::safety::assess_command_safety;
|
||||
use crate::safety::assess_safety_for_untrusted_command;
|
||||
use crate::shell;
|
||||
use crate::turn_diff_tracker::TurnDiffTracker;
|
||||
use crate::unified_exec::UnifiedExecSessionManager;
|
||||
use crate::user_instructions::UserInstructions;
|
||||
use crate::user_notification::UserNotification;
|
||||
use crate::util::backoff;
|
||||
@@ -121,6 +126,7 @@ use codex_protocol::models::LocalShellAction;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::models::ShellToolCallParams;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
|
||||
// A convenience extension trait for acquiring mutex locks where poisoning is
|
||||
// unrecoverable and should abort the program. This avoids scattered `.unwrap()`
|
||||
@@ -203,9 +209,6 @@ impl Codex {
|
||||
error!("Failed to create session: {e:#}");
|
||||
CodexErr::InternalAgentDied
|
||||
})?;
|
||||
session
|
||||
.record_initial_history(&turn_context, conversation_history)
|
||||
.await;
|
||||
let conversation_id = session.conversation_id;
|
||||
|
||||
// This task will run until Op::Shutdown is received.
|
||||
@@ -279,6 +282,7 @@ pub(crate) struct Session {
|
||||
/// Manager for external MCP servers/tools.
|
||||
mcp_connection_manager: McpConnectionManager,
|
||||
session_manager: ExecSessionManager,
|
||||
unified_exec_manager: UnifiedExecSessionManager,
|
||||
|
||||
/// External notifier command (will be passed as args to exec()). When
|
||||
/// `None` this feature is disabled.
|
||||
@@ -416,6 +420,7 @@ impl Session {
|
||||
error!("failed to initialize rollout recorder: {e:#}");
|
||||
anyhow::anyhow!("failed to initialize rollout recorder: {e:#}")
|
||||
})?;
|
||||
let rollout_path = rollout_recorder.rollout_path.clone();
|
||||
// Create the mutable state for the Session.
|
||||
let state = State {
|
||||
history: ConversationHistory::new(),
|
||||
@@ -463,12 +468,12 @@ impl Session {
|
||||
tools_config: ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &config.model_family,
|
||||
approval_policy,
|
||||
sandbox_policy: sandbox_policy.clone(),
|
||||
include_plan_tool: config.include_plan_tool,
|
||||
include_apply_patch_tool: config.include_apply_patch_tool,
|
||||
include_web_search_request: config.tools_web_search_request,
|
||||
use_streamable_shell_tool: config.use_experimental_streamable_shell_tool,
|
||||
include_view_image_tool: config.include_view_image_tool,
|
||||
experimental_unified_exec_tool: config.use_experimental_unified_exec_tool,
|
||||
}),
|
||||
user_instructions,
|
||||
base_instructions,
|
||||
@@ -482,6 +487,7 @@ impl Session {
|
||||
tx_event: tx_event.clone(),
|
||||
mcp_connection_manager,
|
||||
session_manager: ExecSessionManager::default(),
|
||||
unified_exec_manager: UnifiedExecSessionManager::default(),
|
||||
notify,
|
||||
state: Mutex::new(state),
|
||||
rollout: Mutex::new(Some(rollout_recorder)),
|
||||
@@ -492,13 +498,9 @@ impl Session {
|
||||
|
||||
// Dispatch the SessionConfiguredEvent first and then report any errors.
|
||||
// If resuming, include converted initial messages in the payload so UIs can render them immediately.
|
||||
let initial_messages = match &initial_history {
|
||||
InitialHistory::New => None,
|
||||
InitialHistory::Forked(items) => Some(sess.build_initial_messages(items)),
|
||||
InitialHistory::Resumed(resumed_history) => {
|
||||
Some(sess.build_initial_messages(&resumed_history.history))
|
||||
}
|
||||
};
|
||||
let initial_messages = initial_history.get_event_msgs();
|
||||
sess.record_initial_history(&turn_context, initial_history)
|
||||
.await;
|
||||
|
||||
let events = std::iter::once(Event {
|
||||
id: INITIAL_SUBMIT_ID.to_owned(),
|
||||
@@ -508,13 +510,12 @@ impl Session {
|
||||
history_log_id,
|
||||
history_entry_count,
|
||||
initial_messages,
|
||||
rollout_path,
|
||||
}),
|
||||
})
|
||||
.chain(post_session_configured_error_events.into_iter());
|
||||
for event in events {
|
||||
if let Err(e) = tx_event.send(event).await {
|
||||
error!("failed to send event: {e:?}");
|
||||
}
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
|
||||
Ok((sess, turn_context))
|
||||
@@ -544,53 +545,33 @@ impl Session {
|
||||
) {
|
||||
match conversation_history {
|
||||
InitialHistory::New => {
|
||||
self.record_initial_history_new(turn_context).await;
|
||||
// Build and record initial items (user instructions + environment context)
|
||||
let items = self.build_initial_context(turn_context);
|
||||
self.record_conversation_items(&items).await;
|
||||
}
|
||||
InitialHistory::Forked(items) => {
|
||||
self.record_initial_history_from_items(items).await;
|
||||
}
|
||||
InitialHistory::Resumed(resumed_history) => {
|
||||
self.record_initial_history_from_items(resumed_history.history)
|
||||
.await;
|
||||
InitialHistory::Resumed(_) | InitialHistory::Forked(_) => {
|
||||
let rollout_items = conversation_history.get_rollout_items();
|
||||
let persist = matches!(conversation_history, InitialHistory::Forked(_));
|
||||
|
||||
// Always add response items to conversation history
|
||||
let response_items = conversation_history.get_response_items();
|
||||
if !response_items.is_empty() {
|
||||
self.record_into_history(&response_items);
|
||||
}
|
||||
|
||||
// If persisting, persist all rollout items as-is (recorder filters)
|
||||
if persist && !rollout_items.is_empty() {
|
||||
self.persist_rollout_items(&rollout_items).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn record_initial_history_new(&self, turn_context: &TurnContext) {
|
||||
// record the initial user instructions and environment context,
|
||||
// regardless of whether we restored items.
|
||||
// TODO: Those items shouldn't be "user messages" IMO. Maybe developer messages.
|
||||
let mut conversation_items = Vec::<ResponseItem>::with_capacity(2);
|
||||
if let Some(user_instructions) = turn_context.user_instructions.as_deref() {
|
||||
conversation_items.push(UserInstructions::new(user_instructions.to_string()).into());
|
||||
}
|
||||
conversation_items.push(ResponseItem::from(EnvironmentContext::new(
|
||||
Some(turn_context.cwd.clone()),
|
||||
Some(turn_context.approval_policy),
|
||||
Some(turn_context.sandbox_policy.clone()),
|
||||
Some(self.user_shell.clone()),
|
||||
)));
|
||||
self.record_conversation_items(&conversation_items).await;
|
||||
}
|
||||
|
||||
async fn record_initial_history_from_items(&self, items: Vec<ResponseItem>) {
|
||||
self.record_conversation_items_internal(&items, false).await;
|
||||
}
|
||||
|
||||
/// build the initial messages vector for SessionConfigured by converting
|
||||
/// ResponseItems into EventMsg.
|
||||
fn build_initial_messages(&self, items: &[ResponseItem]) -> Vec<EventMsg> {
|
||||
items
|
||||
.iter()
|
||||
.flat_map(|item| {
|
||||
map_response_item_to_event_messages(item, self.show_raw_agent_reasoning)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Sends the given event to the client and swallows the send event, if
|
||||
/// any, logging it as an error.
|
||||
/// Persist the event to rollout and send it to clients.
|
||||
pub(crate) async fn send_event(&self, event: Event) {
|
||||
// Persist the event into rollout (recorder filters as needed)
|
||||
let rollout_items = vec![RolloutItem::EventMsg(event.msg.clone())];
|
||||
self.persist_rollout_items(&rollout_items).await;
|
||||
if let Err(e) = self.tx_event.send(event).await {
|
||||
error!("failed to send tool call event: {e}");
|
||||
}
|
||||
@@ -624,7 +605,7 @@ impl Session {
|
||||
reason,
|
||||
}),
|
||||
};
|
||||
let _ = self.tx_event.send(event).await;
|
||||
self.send_event(event).await;
|
||||
rx_approve
|
||||
}
|
||||
|
||||
@@ -656,7 +637,7 @@ impl Session {
|
||||
grant_root,
|
||||
}),
|
||||
};
|
||||
let _ = self.tx_event.send(event).await;
|
||||
self.send_event(event).await;
|
||||
rx_approve
|
||||
}
|
||||
|
||||
@@ -680,36 +661,76 @@ impl Session {
|
||||
state.approved_commands.insert(cmd);
|
||||
}
|
||||
|
||||
/// Records items to both the rollout and the chat completions/ZDR
|
||||
/// transcript, if enabled.
|
||||
/// Records input items: always append to conversation history and
|
||||
/// persist these response items to rollout.
|
||||
async fn record_conversation_items(&self, items: &[ResponseItem]) {
|
||||
self.record_conversation_items_internal(items, true).await;
|
||||
self.record_into_history(items);
|
||||
self.persist_rollout_response_items(items).await;
|
||||
}
|
||||
|
||||
async fn record_conversation_items_internal(&self, items: &[ResponseItem], persist: bool) {
|
||||
debug!("Recording items for conversation: {items:?}");
|
||||
if persist {
|
||||
self.record_state_snapshot(items).await;
|
||||
/// Append ResponseItems to the in-memory conversation history only.
|
||||
fn record_into_history(&self, items: &[ResponseItem]) {
|
||||
self.state
|
||||
.lock_unchecked()
|
||||
.history
|
||||
.record_items(items.iter());
|
||||
}
|
||||
|
||||
async fn persist_rollout_response_items(&self, items: &[ResponseItem]) {
|
||||
let rollout_items: Vec<RolloutItem> = items
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(RolloutItem::ResponseItem)
|
||||
.collect();
|
||||
self.persist_rollout_items(&rollout_items).await;
|
||||
}
|
||||
|
||||
fn build_initial_context(&self, turn_context: &TurnContext) -> Vec<ResponseItem> {
|
||||
let mut items = Vec::<ResponseItem>::with_capacity(2);
|
||||
if let Some(user_instructions) = turn_context.user_instructions.as_deref() {
|
||||
items.push(UserInstructions::new(user_instructions.to_string()).into());
|
||||
}
|
||||
|
||||
self.state.lock_unchecked().history.record_items(items);
|
||||
items.push(ResponseItem::from(EnvironmentContext::new(
|
||||
Some(turn_context.cwd.clone()),
|
||||
Some(turn_context.approval_policy),
|
||||
Some(turn_context.sandbox_policy.clone()),
|
||||
Some(self.user_shell.clone()),
|
||||
)));
|
||||
items
|
||||
}
|
||||
|
||||
async fn record_state_snapshot(&self, items: &[ResponseItem]) {
|
||||
let snapshot = { crate::rollout::SessionStateSnapshot {} };
|
||||
|
||||
async fn persist_rollout_items(&self, items: &[RolloutItem]) {
|
||||
let recorder = {
|
||||
let guard = self.rollout.lock_unchecked();
|
||||
guard.as_ref().cloned()
|
||||
};
|
||||
if let Some(rec) = recorder
|
||||
&& let Err(e) = rec.record_items(items).await
|
||||
{
|
||||
error!("failed to record rollout items: {e:#}");
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(rec) = recorder {
|
||||
if let Err(e) = rec.record_state(snapshot).await {
|
||||
error!("failed to record rollout state: {e:#}");
|
||||
}
|
||||
if let Err(e) = rec.record_items(items).await {
|
||||
error!("failed to record rollout items: {e:#}");
|
||||
}
|
||||
/// Record a user input item to conversation history and also persist a
|
||||
/// corresponding UserMessage EventMsg to rollout.
|
||||
async fn record_input_and_rollout_usermsg(&self, response_input: &ResponseInputItem) {
|
||||
let response_item: ResponseItem = response_input.clone().into();
|
||||
// Add to conversation history and persist response item to rollout
|
||||
self.record_conversation_items(std::slice::from_ref(&response_item))
|
||||
.await;
|
||||
|
||||
// Derive user message events and persist only UserMessage to rollout
|
||||
let msgs =
|
||||
map_response_item_to_event_messages(&response_item, self.show_raw_agent_reasoning);
|
||||
let user_msgs: Vec<RolloutItem> = msgs
|
||||
.into_iter()
|
||||
.filter_map(|m| match m {
|
||||
EventMsg::UserMessage(ev) => Some(RolloutItem::EventMsg(EventMsg::UserMessage(ev))),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
if !user_msgs.is_empty() {
|
||||
self.persist_rollout_items(&user_msgs).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -752,7 +773,7 @@ impl Session {
|
||||
id: sub_id.to_string(),
|
||||
msg,
|
||||
};
|
||||
let _ = self.tx_event.send(event).await;
|
||||
self.send_event(event).await;
|
||||
}
|
||||
|
||||
async fn on_exec_command_end(
|
||||
@@ -799,7 +820,7 @@ impl Session {
|
||||
id: sub_id.to_string(),
|
||||
msg,
|
||||
};
|
||||
let _ = self.tx_event.send(event).await;
|
||||
self.send_event(event).await;
|
||||
|
||||
// If this is an apply_patch, after we emit the end patch, emit a second event
|
||||
// with the full turn diff if there is one.
|
||||
@@ -811,7 +832,7 @@ impl Session {
|
||||
id: sub_id.into(),
|
||||
msg,
|
||||
};
|
||||
let _ = self.tx_event.send(event).await;
|
||||
self.send_event(event).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -877,7 +898,7 @@ impl Session {
|
||||
message: message.into(),
|
||||
}),
|
||||
};
|
||||
let _ = self.tx_event.send(event).await;
|
||||
self.send_event(event).await;
|
||||
}
|
||||
|
||||
async fn notify_stream_error(&self, sub_id: &str, message: impl Into<String>) {
|
||||
@@ -887,7 +908,7 @@ impl Session {
|
||||
message: message.into(),
|
||||
}),
|
||||
};
|
||||
let _ = self.tx_event.send(event).await;
|
||||
self.send_event(event).await;
|
||||
}
|
||||
|
||||
/// Build the full turn input by concatenating the current conversation
|
||||
@@ -1050,9 +1071,9 @@ impl AgentTask {
|
||||
id: self.sub_id,
|
||||
msg: EventMsg::TurnAborted(TurnAbortedEvent { reason }),
|
||||
};
|
||||
let tx_event = self.sess.tx_event.clone();
|
||||
let sess = self.sess.clone();
|
||||
tokio::spawn(async move {
|
||||
tx_event.send(event).await.ok();
|
||||
sess.send_event(event).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1086,10 +1107,10 @@ async fn submission_loop(
|
||||
let provider = prev.client.get_provider();
|
||||
|
||||
// Effective model + family
|
||||
let (effective_model, effective_family) = if let Some(m) = model {
|
||||
let (effective_model, effective_family) = if let Some(ref m) = model {
|
||||
let fam =
|
||||
find_family_for_model(&m).unwrap_or_else(|| config.model_family.clone());
|
||||
(m, fam)
|
||||
find_family_for_model(m).unwrap_or_else(|| config.model_family.clone());
|
||||
(m.clone(), fam)
|
||||
} else {
|
||||
(prev.client.get_model(), prev.client.get_model_family())
|
||||
};
|
||||
@@ -1126,12 +1147,12 @@ async fn submission_loop(
|
||||
let tools_config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &effective_family,
|
||||
approval_policy: new_approval_policy,
|
||||
sandbox_policy: new_sandbox_policy.clone(),
|
||||
include_plan_tool: config.include_plan_tool,
|
||||
include_apply_patch_tool: config.include_apply_patch_tool,
|
||||
include_web_search_request: config.tools_web_search_request,
|
||||
use_streamable_shell_tool: config.use_experimental_streamable_shell_tool,
|
||||
include_view_image_tool: config.include_view_image_tool,
|
||||
experimental_unified_exec_tool: config.use_experimental_unified_exec_tool,
|
||||
});
|
||||
|
||||
let new_turn_context = TurnContext {
|
||||
@@ -1147,25 +1168,34 @@ async fn submission_loop(
|
||||
|
||||
// Install the new persistent context for subsequent tasks/turns.
|
||||
turn_context = Arc::new(new_turn_context);
|
||||
if cwd.is_some() || approval_policy.is_some() || sandbox_policy.is_some() {
|
||||
sess.record_conversation_items(&[ResponseItem::from(EnvironmentContext::new(
|
||||
cwd,
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
// Shell is not configurable from turn to turn
|
||||
None,
|
||||
))])
|
||||
.await;
|
||||
|
||||
// Optionally persist changes to model / effort
|
||||
let effort_str = effort.map(|_| effective_effort.to_string());
|
||||
|
||||
if let Err(e) = persist_non_null_overrides(
|
||||
&config.codex_home,
|
||||
config.active_profile.as_deref(),
|
||||
&[
|
||||
(&[CONFIG_KEY_MODEL], model.as_deref()),
|
||||
(&[CONFIG_KEY_EFFORT], effort_str.as_deref()),
|
||||
],
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!("failed to persist overrides: {e:#}");
|
||||
}
|
||||
}
|
||||
Op::UserInput { items } => {
|
||||
// attempt to inject input into current task
|
||||
if let Err(items) = sess.inject_input(items) {
|
||||
// no current task, spawn a new one
|
||||
let task =
|
||||
AgentTask::spawn(sess.clone(), Arc::clone(&turn_context), sub.id, items);
|
||||
sess.set_task(task);
|
||||
}
|
||||
submit_user_input(
|
||||
turn_context.cwd.clone(),
|
||||
turn_context.approval_policy,
|
||||
turn_context.sandbox_policy.clone(),
|
||||
&sess,
|
||||
&turn_context,
|
||||
sub.id.clone(),
|
||||
items,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Op::UserTurn {
|
||||
items,
|
||||
@@ -1210,13 +1240,14 @@ async fn submission_loop(
|
||||
tools_config: ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy,
|
||||
sandbox_policy: sandbox_policy.clone(),
|
||||
include_plan_tool: config.include_plan_tool,
|
||||
include_apply_patch_tool: config.include_apply_patch_tool,
|
||||
include_web_search_request: config.tools_web_search_request,
|
||||
use_streamable_shell_tool: config
|
||||
.use_experimental_streamable_shell_tool,
|
||||
include_view_image_tool: config.include_view_image_tool,
|
||||
experimental_unified_exec_tool: config
|
||||
.use_experimental_unified_exec_tool,
|
||||
}),
|
||||
user_instructions: turn_context.user_instructions.clone(),
|
||||
base_instructions: turn_context.base_instructions.clone(),
|
||||
@@ -1225,11 +1256,16 @@ async fn submission_loop(
|
||||
shell_environment_policy: turn_context.shell_environment_policy.clone(),
|
||||
cwd,
|
||||
};
|
||||
// TODO: record the new environment context in the conversation history
|
||||
// no current task, spawn a new one with the per‑turn context
|
||||
let task =
|
||||
AgentTask::spawn(sess.clone(), Arc::new(fresh_turn_context), sub.id, items);
|
||||
sess.set_task(task);
|
||||
submit_user_input(
|
||||
fresh_turn_context.cwd.clone(),
|
||||
fresh_turn_context.approval_policy,
|
||||
fresh_turn_context.sandbox_policy.clone(),
|
||||
&sess,
|
||||
&Arc::new(fresh_turn_context),
|
||||
sub.id.clone(),
|
||||
items,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Op::ExecApproval { id, decision } => match decision {
|
||||
@@ -1257,7 +1293,7 @@ async fn submission_loop(
|
||||
|
||||
Op::GetHistoryEntryRequest { offset, log_id } => {
|
||||
let config = config.clone();
|
||||
let tx_event = sess.tx_event.clone();
|
||||
let sess_clone = sess.clone();
|
||||
let sub_id = sub.id.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
@@ -1285,13 +1321,10 @@ async fn submission_loop(
|
||||
),
|
||||
};
|
||||
|
||||
if let Err(e) = tx_event.send(event).await {
|
||||
warn!("failed to send GetHistoryEntryResponse event: {e}");
|
||||
}
|
||||
sess_clone.send_event(event).await;
|
||||
});
|
||||
}
|
||||
Op::ListMcpTools => {
|
||||
let tx_event = sess.tx_event.clone();
|
||||
let sub_id = sub.id.clone();
|
||||
|
||||
// This is a cheap lookup from the connection manager's cache.
|
||||
@@ -1302,12 +1335,9 @@ async fn submission_loop(
|
||||
crate::protocol::McpListToolsResponseEvent { tools },
|
||||
),
|
||||
};
|
||||
if let Err(e) = tx_event.send(event).await {
|
||||
warn!("failed to send McpListToolsResponse event: {e}");
|
||||
}
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
Op::ListCustomPrompts => {
|
||||
let tx_event = sess.tx_event.clone();
|
||||
let sub_id = sub.id.clone();
|
||||
|
||||
let custom_prompts: Vec<CustomPrompt> =
|
||||
@@ -1323,9 +1353,7 @@ async fn submission_loop(
|
||||
custom_prompts,
|
||||
}),
|
||||
};
|
||||
if let Err(e) = tx_event.send(event).await {
|
||||
warn!("failed to send ListCustomPromptsResponse event: {e}");
|
||||
}
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
Op::Compact => {
|
||||
// Create a summarization request as user input
|
||||
@@ -1361,34 +1389,42 @@ async fn submission_loop(
|
||||
message: "Failed to shutdown rollout recorder".to_string(),
|
||||
}),
|
||||
};
|
||||
if let Err(e) = sess.tx_event.send(event).await {
|
||||
warn!("failed to send error message: {e:?}");
|
||||
}
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
|
||||
let event = Event {
|
||||
id: sub.id.clone(),
|
||||
msg: EventMsg::ShutdownComplete,
|
||||
};
|
||||
if let Err(e) = sess.tx_event.send(event).await {
|
||||
warn!("failed to send Shutdown event: {e}");
|
||||
}
|
||||
sess.send_event(event).await;
|
||||
break;
|
||||
}
|
||||
Op::GetHistory => {
|
||||
let tx_event = sess.tx_event.clone();
|
||||
Op::GetPath => {
|
||||
let sub_id = sub.id.clone();
|
||||
|
||||
// Flush rollout writes before returning the path so readers observe a consistent file.
|
||||
let (path, rec_opt) = {
|
||||
let guard = sess.rollout.lock_unchecked();
|
||||
match guard.as_ref() {
|
||||
Some(rec) => (rec.get_rollout_path(), Some(rec.clone())),
|
||||
None => {
|
||||
error!("rollout recorder not found");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
if let Some(rec) = rec_opt
|
||||
&& let Err(e) = rec.flush().await
|
||||
{
|
||||
warn!("failed to flush rollout recorder before GetHistory: {e}");
|
||||
}
|
||||
let event = Event {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::ConversationHistory(ConversationHistoryResponseEvent {
|
||||
msg: EventMsg::ConversationPath(ConversationPathResponseEvent {
|
||||
conversation_id: sess.conversation_id,
|
||||
entries: sess.state.lock_unchecked().history.contents(),
|
||||
path,
|
||||
}),
|
||||
};
|
||||
if let Err(e) = tx_event.send(event).await {
|
||||
warn!("failed to send ConversationHistory event: {e}");
|
||||
}
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
_ => {
|
||||
// Ignore unknown ops; enum is non_exhaustive to allow extensions.
|
||||
@@ -1426,12 +1462,10 @@ async fn run_task(
|
||||
model_context_window: turn_context.client.get_model_context_window(),
|
||||
}),
|
||||
};
|
||||
if sess.tx_event.send(event).await.is_err() {
|
||||
return;
|
||||
}
|
||||
sess.send_event(event).await;
|
||||
|
||||
let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input);
|
||||
sess.record_conversation_items(&[initial_input_for_turn.clone().into()])
|
||||
sess.record_input_and_rollout_usermsg(&initial_input_for_turn)
|
||||
.await;
|
||||
|
||||
let mut last_agent_message: Option<String> = None;
|
||||
@@ -1600,7 +1634,7 @@ async fn run_task(
|
||||
message: e.to_string(),
|
||||
}),
|
||||
};
|
||||
sess.tx_event.send(event).await.ok();
|
||||
sess.send_event(event).await;
|
||||
// let the user continue the conversation
|
||||
break;
|
||||
}
|
||||
@@ -1611,7 +1645,7 @@ async fn run_task(
|
||||
id: sub_id,
|
||||
msg: EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }),
|
||||
};
|
||||
sess.tx_event.send(event).await.ok();
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
|
||||
async fn run_turn(
|
||||
@@ -1809,13 +1843,12 @@ async fn try_run_turn(
|
||||
st.token_info = info.clone();
|
||||
info
|
||||
};
|
||||
sess.tx_event
|
||||
.send(Event {
|
||||
let _ = sess
|
||||
.send_event(Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::TokenCount(crate::protocol::TokenCountEvent { info }),
|
||||
})
|
||||
.await
|
||||
.ok();
|
||||
.await;
|
||||
|
||||
let unified_diff = turn_diff_tracker.get_unified_diff();
|
||||
if let Ok(Some(unified_diff)) = unified_diff {
|
||||
@@ -1824,7 +1857,7 @@ async fn try_run_turn(
|
||||
id: sub_id.to_string(),
|
||||
msg,
|
||||
};
|
||||
let _ = sess.tx_event.send(event).await;
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
|
||||
return Ok(output);
|
||||
@@ -1834,21 +1867,21 @@ async fn try_run_turn(
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }),
|
||||
};
|
||||
sess.tx_event.send(event).await.ok();
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
ResponseEvent::ReasoningSummaryDelta(delta) => {
|
||||
let event = Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }),
|
||||
};
|
||||
sess.tx_event.send(event).await.ok();
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
ResponseEvent::ReasoningSummaryPartAdded => {
|
||||
let event = Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::AgentReasoningSectionBreak(AgentReasoningSectionBreakEvent {}),
|
||||
};
|
||||
sess.tx_event.send(event).await.ok();
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
ResponseEvent::ReasoningContentDelta(delta) => {
|
||||
if sess.show_raw_agent_reasoning {
|
||||
@@ -1858,7 +1891,7 @@ async fn try_run_turn(
|
||||
AgentReasoningRawContentDeltaEvent { delta },
|
||||
),
|
||||
};
|
||||
sess.tx_event.send(event).await.ok();
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1879,9 +1912,7 @@ async fn run_compact_task(
|
||||
model_context_window,
|
||||
}),
|
||||
};
|
||||
if sess.tx_event.send(start_event).await.is_err() {
|
||||
return;
|
||||
}
|
||||
sess.send_event(start_event).await;
|
||||
|
||||
let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input);
|
||||
let turn_input: Vec<ResponseItem> =
|
||||
@@ -2059,7 +2090,7 @@ async fn handle_response_item(
|
||||
id: sub_id.to_string(),
|
||||
msg,
|
||||
};
|
||||
sess.tx_event.send(event).await.ok();
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
None
|
||||
}
|
||||
@@ -2068,6 +2099,72 @@ async fn handle_response_item(
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
async fn handle_unified_exec_tool_call(
|
||||
sess: &Session,
|
||||
call_id: String,
|
||||
session_id: Option<String>,
|
||||
arguments: Vec<String>,
|
||||
timeout_ms: Option<u64>,
|
||||
) -> ResponseInputItem {
|
||||
let parsed_session_id = if let Some(session_id) = session_id {
|
||||
match session_id.parse::<i32>() {
|
||||
Ok(parsed) => Some(parsed),
|
||||
Err(output) => {
|
||||
return ResponseInputItem::FunctionCallOutput {
|
||||
call_id: call_id.to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: format!("invalid session_id: {session_id} due to error {output}"),
|
||||
success: Some(false),
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let request = crate::unified_exec::UnifiedExecRequest {
|
||||
session_id: parsed_session_id,
|
||||
input_chunks: &arguments,
|
||||
timeout_ms,
|
||||
};
|
||||
|
||||
let result = sess.unified_exec_manager.handle_request(request).await;
|
||||
|
||||
let output_payload = match result {
|
||||
Ok(value) => {
|
||||
#[derive(Serialize)]
|
||||
struct SerializedUnifiedExecResult<'a> {
|
||||
session_id: Option<String>,
|
||||
output: &'a str,
|
||||
}
|
||||
|
||||
match serde_json::to_string(&SerializedUnifiedExecResult {
|
||||
session_id: value.session_id.map(|id| id.to_string()),
|
||||
output: &value.output,
|
||||
}) {
|
||||
Ok(serialized) => FunctionCallOutputPayload {
|
||||
content: serialized,
|
||||
success: Some(true),
|
||||
},
|
||||
Err(err) => FunctionCallOutputPayload {
|
||||
content: format!("failed to serialize unified exec output: {err}"),
|
||||
success: Some(false),
|
||||
},
|
||||
}
|
||||
}
|
||||
Err(err) => FunctionCallOutputPayload {
|
||||
content: format!("unified exec failed: {err}"),
|
||||
success: Some(false),
|
||||
},
|
||||
};
|
||||
|
||||
ResponseInputItem::FunctionCallOutput {
|
||||
call_id,
|
||||
output: output_payload,
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_function_call(
|
||||
sess: &Session,
|
||||
turn_context: &TurnContext,
|
||||
@@ -2095,6 +2192,38 @@ async fn handle_function_call(
|
||||
)
|
||||
.await
|
||||
}
|
||||
"unified_exec" => {
|
||||
#[derive(Deserialize)]
|
||||
struct UnifiedExecArgs {
|
||||
input: Vec<String>,
|
||||
#[serde(default)]
|
||||
session_id: Option<String>,
|
||||
#[serde(default)]
|
||||
timeout_ms: Option<u64>,
|
||||
}
|
||||
|
||||
let args = match serde_json::from_str::<UnifiedExecArgs>(&arguments) {
|
||||
Ok(args) => args,
|
||||
Err(err) => {
|
||||
return ResponseInputItem::FunctionCallOutput {
|
||||
call_id,
|
||||
output: FunctionCallOutputPayload {
|
||||
content: format!("failed to parse function arguments: {err}"),
|
||||
success: Some(false),
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
handle_unified_exec_tool_call(
|
||||
sess,
|
||||
call_id,
|
||||
args.session_id,
|
||||
args.input,
|
||||
args.timeout_ms,
|
||||
)
|
||||
.await
|
||||
}
|
||||
"view_image" => {
|
||||
#[derive(serde::Deserialize)]
|
||||
struct SeeImageArgs {
|
||||
@@ -2698,6 +2827,30 @@ async fn handle_sandbox_error(
|
||||
}
|
||||
}
|
||||
|
||||
async fn submit_user_input(
|
||||
cwd: PathBuf,
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: SandboxPolicy,
|
||||
sess: &Arc<Session>,
|
||||
turn_context: &Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
items: Vec<InputItem>,
|
||||
) {
|
||||
sess.record_conversation_items(&[ResponseItem::from(EnvironmentContext::new(
|
||||
Some(cwd),
|
||||
Some(approval_policy),
|
||||
Some(sandbox_policy),
|
||||
// Shell is not configurable from turn to turn
|
||||
None,
|
||||
))])
|
||||
.await;
|
||||
if let Err(items) = sess.inject_input(items) {
|
||||
// no current task, spawn a new one
|
||||
let task = AgentTask::spawn(Arc::clone(sess), Arc::clone(turn_context), sub_id, items);
|
||||
sess.set_task(task);
|
||||
}
|
||||
}
|
||||
|
||||
fn format_exec_output_str(exec_output: &ExecToolCallOutput) -> String {
|
||||
let ExecToolCallOutput {
|
||||
aggregated_output, ..
|
||||
|
||||
@@ -38,9 +38,7 @@ const OPENAI_DEFAULT_MODEL: &str = "gpt-5";
|
||||
/// the context window.
|
||||
pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB
|
||||
|
||||
const CONFIG_TOML_FILE: &str = "config.toml";
|
||||
|
||||
const DEFAULT_RESPONSES_ORIGINATOR_HEADER: &str = "codex_cli_rs";
|
||||
pub(crate) const CONFIG_TOML_FILE: &str = "config.toml";
|
||||
|
||||
/// Application configuration loaded from disk and merged with overrides.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
@@ -169,16 +167,20 @@ pub struct Config {
|
||||
|
||||
pub tools_web_search_request: bool,
|
||||
|
||||
/// The value for the `originator` header included with Responses API requests.
|
||||
pub responses_originator_header: String,
|
||||
|
||||
/// If set to `true`, the API key will be signed with the `originator` header.
|
||||
pub preferred_auth_method: AuthMode,
|
||||
|
||||
pub use_experimental_streamable_shell_tool: bool,
|
||||
|
||||
/// If set to `true`, used only the experimental unified exec tool.
|
||||
pub use_experimental_unified_exec_tool: bool,
|
||||
|
||||
/// Include the `view_image` tool that lets the agent attach a local image path to context.
|
||||
pub include_view_image_tool: bool,
|
||||
|
||||
/// The active profile name used to derive this `Config` (if any).
|
||||
pub active_profile: Option<String>,
|
||||
|
||||
/// When true, disables burst-paste detection for typed input entirely.
|
||||
/// All characters are inserted as they are received, and no buffering
|
||||
/// or placeholder replacement will occur for fast keypress bursts.
|
||||
@@ -262,17 +264,7 @@ pub fn load_config_as_toml(codex_home: &Path) -> std::io::Result<TomlValue> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Patch `CODEX_HOME/config.toml` project state.
|
||||
/// Use with caution.
|
||||
pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> anyhow::Result<()> {
|
||||
let config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
// Parse existing config if present; otherwise start a new document.
|
||||
let mut doc = match std::fs::read_to_string(config_path.clone()) {
|
||||
Ok(s) => s.parse::<DocumentMut>()?,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => DocumentMut::new(),
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
fn set_project_trusted_inner(doc: &mut DocumentMut, project_path: &Path) -> anyhow::Result<()> {
|
||||
// Ensure we render a human-friendly structure:
|
||||
//
|
||||
// [projects]
|
||||
@@ -288,14 +280,26 @@ pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> anyhow::Re
|
||||
// Ensure top-level `projects` exists as a non-inline, explicit table. If it
|
||||
// exists but was previously represented as a non-table (e.g., inline),
|
||||
// replace it with an explicit table.
|
||||
let mut created_projects_table = false;
|
||||
{
|
||||
let root = doc.as_table_mut();
|
||||
let needs_table = !root.contains_key("projects")
|
||||
|| root.get("projects").and_then(|i| i.as_table()).is_none();
|
||||
if needs_table {
|
||||
root.insert("projects", toml_edit::table());
|
||||
created_projects_table = true;
|
||||
// If `projects` exists but isn't a standard table (e.g., it's an inline table),
|
||||
// convert it to an explicit table while preserving existing entries.
|
||||
let existing_projects = root.get("projects").cloned();
|
||||
if existing_projects.as_ref().is_none_or(|i| !i.is_table()) {
|
||||
let mut projects_tbl = toml_edit::Table::new();
|
||||
projects_tbl.set_implicit(true);
|
||||
|
||||
// If there was an existing inline table, migrate its entries to explicit tables.
|
||||
if let Some(inline_tbl) = existing_projects.as_ref().and_then(|i| i.as_inline_table()) {
|
||||
for (k, v) in inline_tbl.iter() {
|
||||
if let Some(inner_tbl) = v.as_inline_table() {
|
||||
let new_tbl = inner_tbl.clone().into_table();
|
||||
projects_tbl.insert(k, toml_edit::Item::Table(new_tbl));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
root.insert("projects", toml_edit::Item::Table(projects_tbl));
|
||||
}
|
||||
}
|
||||
let Some(projects_tbl) = doc["projects"].as_table_mut() else {
|
||||
@@ -304,12 +308,6 @@ pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> anyhow::Re
|
||||
));
|
||||
};
|
||||
|
||||
// If we created the `projects` table ourselves, keep it implicit so we
|
||||
// don't render a standalone `[projects]` header.
|
||||
if created_projects_table {
|
||||
projects_tbl.set_implicit(true);
|
||||
}
|
||||
|
||||
// Ensure the per-project entry is its own explicit table. If it exists but
|
||||
// is not a table (e.g., an inline table), replace it with an explicit table.
|
||||
let needs_proj_table = !projects_tbl.contains_key(project_key.as_str())
|
||||
@@ -328,6 +326,21 @@ pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> anyhow::Re
|
||||
};
|
||||
proj_tbl.set_implicit(false);
|
||||
proj_tbl["trust_level"] = toml_edit::value("trusted");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Patch `CODEX_HOME/config.toml` project state.
|
||||
/// Use with caution.
|
||||
pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> anyhow::Result<()> {
|
||||
let config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
// Parse existing config if present; otherwise start a new document.
|
||||
let mut doc = match std::fs::read_to_string(config_path.clone()) {
|
||||
Ok(s) => s.parse::<DocumentMut>()?,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => DocumentMut::new(),
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
set_project_trusted_inner(&mut doc, project_path)?;
|
||||
|
||||
// ensure codex_home exists
|
||||
std::fs::create_dir_all(codex_home)?;
|
||||
@@ -477,9 +490,7 @@ pub struct ConfigToml {
|
||||
pub experimental_instructions_file: Option<PathBuf>,
|
||||
|
||||
pub experimental_use_exec_command_tool: Option<bool>,
|
||||
|
||||
/// The value for the `originator` header included with Responses API requests.
|
||||
pub responses_originator_header_internal_override: Option<String>,
|
||||
pub experimental_use_unified_exec_tool: Option<bool>,
|
||||
|
||||
pub projects: Option<HashMap<String, ProjectConfig>>,
|
||||
|
||||
@@ -661,7 +672,11 @@ impl Config {
|
||||
tools_web_search_request: override_tools_web_search_request,
|
||||
} = overrides;
|
||||
|
||||
let config_profile = match config_profile_key.as_ref().or(cfg.profile.as_ref()) {
|
||||
let active_profile_name = config_profile_key
|
||||
.as_ref()
|
||||
.or(cfg.profile.as_ref())
|
||||
.cloned();
|
||||
let config_profile = match active_profile_name.as_ref() {
|
||||
Some(key) => cfg
|
||||
.profiles
|
||||
.get(key)
|
||||
@@ -773,10 +788,6 @@ impl Config {
|
||||
Self::get_base_instructions(experimental_instructions_path, &resolved_cwd)?;
|
||||
let base_instructions = base_instructions.or(file_base_instructions);
|
||||
|
||||
let responses_originator_header: String = cfg
|
||||
.responses_originator_header_internal_override
|
||||
.unwrap_or(DEFAULT_RESPONSES_ORIGINATOR_HEADER.to_owned());
|
||||
|
||||
let config = Self {
|
||||
model,
|
||||
model_family,
|
||||
@@ -826,12 +837,15 @@ impl Config {
|
||||
include_plan_tool: include_plan_tool.unwrap_or(false),
|
||||
include_apply_patch_tool: include_apply_patch_tool.unwrap_or(false),
|
||||
tools_web_search_request,
|
||||
responses_originator_header,
|
||||
preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT),
|
||||
use_experimental_streamable_shell_tool: cfg
|
||||
.experimental_use_exec_command_tool
|
||||
.unwrap_or(false),
|
||||
use_experimental_unified_exec_tool: cfg
|
||||
.experimental_use_unified_exec_tool
|
||||
.unwrap_or(true),
|
||||
include_view_image_tool,
|
||||
active_profile: active_profile_name,
|
||||
disable_paste_burst: cfg.disable_paste_burst.unwrap_or(false),
|
||||
};
|
||||
Ok(config)
|
||||
@@ -1203,10 +1217,11 @@ model_verbosity = "high"
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
responses_originator_header: "codex_cli_rs".to_string(),
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: true,
|
||||
include_view_image_tool: true,
|
||||
active_profile: Some("o3".to_string()),
|
||||
disable_paste_burst: false,
|
||||
},
|
||||
o3_profile_config
|
||||
@@ -1260,10 +1275,11 @@ model_verbosity = "high"
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
responses_originator_header: "codex_cli_rs".to_string(),
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: true,
|
||||
include_view_image_tool: true,
|
||||
active_profile: Some("gpt3".to_string()),
|
||||
disable_paste_burst: false,
|
||||
};
|
||||
|
||||
@@ -1332,10 +1348,11 @@ model_verbosity = "high"
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
responses_originator_header: "codex_cli_rs".to_string(),
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: true,
|
||||
include_view_image_tool: true,
|
||||
active_profile: Some("zdr".to_string()),
|
||||
disable_paste_burst: false,
|
||||
};
|
||||
|
||||
@@ -1390,10 +1407,11 @@ model_verbosity = "high"
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
responses_originator_header: "codex_cli_rs".to_string(),
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: true,
|
||||
include_view_image_tool: true,
|
||||
active_profile: Some("gpt5".to_string()),
|
||||
disable_paste_burst: false,
|
||||
};
|
||||
|
||||
@@ -1404,17 +1422,14 @@ model_verbosity = "high"
|
||||
|
||||
#[test]
|
||||
fn test_set_project_trusted_writes_explicit_tables() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let project_dir = TempDir::new().unwrap();
|
||||
let project_dir = Path::new("/some/path");
|
||||
let mut doc = DocumentMut::new();
|
||||
|
||||
// Call the function under test
|
||||
set_project_trusted(codex_home.path(), project_dir.path())?;
|
||||
set_project_trusted_inner(&mut doc, project_dir)?;
|
||||
|
||||
// Read back the generated config.toml and assert exact contents
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
let contents = std::fs::read_to_string(&config_path)?;
|
||||
let contents = doc.to_string();
|
||||
|
||||
let raw_path = project_dir.path().to_string_lossy();
|
||||
let raw_path = project_dir.to_string_lossy();
|
||||
let path_str = if raw_path.contains('\\') {
|
||||
format!("'{raw_path}'")
|
||||
} else {
|
||||
@@ -1432,12 +1447,10 @@ trust_level = "trusted"
|
||||
|
||||
#[test]
|
||||
fn test_set_project_trusted_converts_inline_to_explicit() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let project_dir = TempDir::new().unwrap();
|
||||
let project_dir = Path::new("/some/path");
|
||||
|
||||
// Seed config.toml with an inline project entry under [projects]
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
let raw_path = project_dir.path().to_string_lossy();
|
||||
let raw_path = project_dir.to_string_lossy();
|
||||
let path_str = if raw_path.contains('\\') {
|
||||
format!("'{raw_path}'")
|
||||
} else {
|
||||
@@ -1449,13 +1462,12 @@ trust_level = "trusted"
|
||||
{path_str} = {{ trust_level = "untrusted" }}
|
||||
"#
|
||||
);
|
||||
std::fs::create_dir_all(codex_home.path())?;
|
||||
std::fs::write(&config_path, initial)?;
|
||||
let mut doc = initial.parse::<DocumentMut>()?;
|
||||
|
||||
// Run the function; it should convert to explicit tables and set trusted
|
||||
set_project_trusted(codex_home.path(), project_dir.path())?;
|
||||
set_project_trusted_inner(&mut doc, project_dir)?;
|
||||
|
||||
let contents = std::fs::read_to_string(&config_path)?;
|
||||
let contents = doc.to_string();
|
||||
|
||||
// Assert exact output after conversion to explicit table
|
||||
let expected = format!(
|
||||
@@ -1470,5 +1482,37 @@ trust_level = "trusted"
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// No test enforcing the presence of a standalone [projects] header.
|
||||
#[test]
|
||||
fn test_set_project_trusted_migrates_top_level_inline_projects_preserving_entries()
|
||||
-> anyhow::Result<()> {
|
||||
let initial = r#"toplevel = "baz"
|
||||
projects = { "/Users/mbolin/code/codex4" = { trust_level = "trusted", foo = "bar" } , "/Users/mbolin/code/codex3" = { trust_level = "trusted" } }
|
||||
model = "foo""#;
|
||||
let mut doc = initial.parse::<DocumentMut>()?;
|
||||
|
||||
// Approve a new directory
|
||||
let new_project = Path::new("/Users/mbolin/code/codex2");
|
||||
set_project_trusted_inner(&mut doc, new_project)?;
|
||||
|
||||
let contents = doc.to_string();
|
||||
|
||||
// Since we created the [projects] table as part of migration, it is kept implicit.
|
||||
// Expect explicit per-project tables, preserving prior entries and appending the new one.
|
||||
let expected = r#"toplevel = "baz"
|
||||
model = "foo"
|
||||
|
||||
[projects."/Users/mbolin/code/codex4"]
|
||||
trust_level = "trusted"
|
||||
foo = "bar"
|
||||
|
||||
[projects."/Users/mbolin/code/codex3"]
|
||||
trust_level = "trusted"
|
||||
|
||||
[projects."/Users/mbolin/code/codex2"]
|
||||
trust_level = "trusted"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
582
codex-rs/core/src/config_edit.rs
Normal file
582
codex-rs/core/src/config_edit.rs
Normal file
@@ -0,0 +1,582 @@
|
||||
use crate::config::CONFIG_TOML_FILE;
|
||||
use anyhow::Result;
|
||||
use std::path::Path;
|
||||
use tempfile::NamedTempFile;
|
||||
use toml_edit::DocumentMut;
|
||||
|
||||
pub const CONFIG_KEY_MODEL: &str = "model";
|
||||
pub const CONFIG_KEY_EFFORT: &str = "model_reasoning_effort";
|
||||
|
||||
/// Persist overrides into `config.toml` using explicit key segments per
|
||||
/// override. This avoids ambiguity with keys that contain dots or spaces.
|
||||
pub async fn persist_overrides(
|
||||
codex_home: &Path,
|
||||
profile: Option<&str>,
|
||||
overrides: &[(&[&str], &str)],
|
||||
) -> Result<()> {
|
||||
let config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
|
||||
let mut doc = match tokio::fs::read_to_string(&config_path).await {
|
||||
Ok(s) => s.parse::<DocumentMut>()?,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
tokio::fs::create_dir_all(codex_home).await?;
|
||||
DocumentMut::new()
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
let effective_profile = if let Some(p) = profile {
|
||||
Some(p.to_owned())
|
||||
} else {
|
||||
doc.get("profile")
|
||||
.and_then(|i| i.as_str())
|
||||
.map(|s| s.to_string())
|
||||
};
|
||||
|
||||
for (segments, val) in overrides.iter().copied() {
|
||||
let value = toml_edit::value(val);
|
||||
if let Some(ref name) = effective_profile {
|
||||
if segments.first().copied() == Some("profiles") {
|
||||
apply_toml_edit_override_segments(&mut doc, segments, value);
|
||||
} else {
|
||||
let mut seg_buf: Vec<&str> = Vec::with_capacity(2 + segments.len());
|
||||
seg_buf.push("profiles");
|
||||
seg_buf.push(name.as_str());
|
||||
seg_buf.extend_from_slice(segments);
|
||||
apply_toml_edit_override_segments(&mut doc, &seg_buf, value);
|
||||
}
|
||||
} else {
|
||||
apply_toml_edit_override_segments(&mut doc, segments, value);
|
||||
}
|
||||
}
|
||||
|
||||
let tmp_file = NamedTempFile::new_in(codex_home)?;
|
||||
tokio::fs::write(tmp_file.path(), doc.to_string()).await?;
|
||||
tmp_file.persist(config_path)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Persist overrides where values may be optional. Any entries with `None`
|
||||
/// values are skipped. If all values are `None`, this becomes a no-op and
|
||||
/// returns `Ok(())` without touching the file.
|
||||
pub async fn persist_non_null_overrides(
|
||||
codex_home: &Path,
|
||||
profile: Option<&str>,
|
||||
overrides: &[(&[&str], Option<&str>)],
|
||||
) -> Result<()> {
|
||||
let filtered: Vec<(&[&str], &str)> = overrides
|
||||
.iter()
|
||||
.filter_map(|(k, v)| v.map(|vv| (*k, vv)))
|
||||
.collect();
|
||||
|
||||
if filtered.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
persist_overrides(codex_home, profile, &filtered).await
|
||||
}
|
||||
|
||||
/// Apply a single override onto a `toml_edit` document while preserving
|
||||
/// existing formatting/comments.
|
||||
/// The key is expressed as explicit segments to correctly handle keys that
|
||||
/// contain dots or spaces.
|
||||
fn apply_toml_edit_override_segments(
|
||||
doc: &mut DocumentMut,
|
||||
segments: &[&str],
|
||||
value: toml_edit::Item,
|
||||
) {
|
||||
use toml_edit::Item;
|
||||
|
||||
if segments.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut current = doc.as_table_mut();
|
||||
for seg in &segments[..segments.len() - 1] {
|
||||
if !current.contains_key(seg) {
|
||||
current[*seg] = Item::Table(toml_edit::Table::new());
|
||||
if let Some(t) = current[*seg].as_table_mut() {
|
||||
t.set_implicit(true);
|
||||
}
|
||||
}
|
||||
|
||||
let maybe_item = current.get_mut(seg);
|
||||
let Some(item) = maybe_item else { return };
|
||||
|
||||
if !item.is_table() {
|
||||
*item = Item::Table(toml_edit::Table::new());
|
||||
if let Some(t) = item.as_table_mut() {
|
||||
t.set_implicit(true);
|
||||
}
|
||||
}
|
||||
|
||||
let Some(tbl) = item.as_table_mut() else {
|
||||
return;
|
||||
};
|
||||
current = tbl;
|
||||
}
|
||||
|
||||
let last = segments[segments.len() - 1];
|
||||
current[last] = value;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::tempdir;
|
||||
|
||||
/// Verifies model and effort are written at top-level when no profile is set.
|
||||
#[tokio::test]
|
||||
async fn set_default_model_and_effort_top_level_when_no_profile() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
persist_overrides(
|
||||
codex_home,
|
||||
None,
|
||||
&[
|
||||
(&[CONFIG_KEY_MODEL], "gpt-5"),
|
||||
(&[CONFIG_KEY_EFFORT], "high"),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"model = "gpt-5"
|
||||
model_reasoning_effort = "high"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies values are written under the active profile when `profile` is set.
|
||||
#[tokio::test]
|
||||
async fn set_defaults_update_profile_when_profile_set() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
// Seed config with a profile selection but without profiles table
|
||||
let seed = "profile = \"o3\"\n";
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
persist_overrides(
|
||||
codex_home,
|
||||
None,
|
||||
&[
|
||||
(&[CONFIG_KEY_MODEL], "o3"),
|
||||
(&[CONFIG_KEY_EFFORT], "minimal"),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"profile = "o3"
|
||||
|
||||
[profiles.o3]
|
||||
model = "o3"
|
||||
model_reasoning_effort = "minimal"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies profile names with dots/spaces are preserved via explicit segments.
|
||||
#[tokio::test]
|
||||
async fn set_defaults_update_profile_with_dot_and_space() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
// Seed config with a profile name that contains a dot and a space
|
||||
let seed = "profile = \"my.team name\"\n";
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
persist_overrides(
|
||||
codex_home,
|
||||
None,
|
||||
&[
|
||||
(&[CONFIG_KEY_MODEL], "o3"),
|
||||
(&[CONFIG_KEY_EFFORT], "minimal"),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"profile = "my.team name"
|
||||
|
||||
[profiles."my.team name"]
|
||||
model = "o3"
|
||||
model_reasoning_effort = "minimal"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies explicit profile override writes under that profile even without active profile.
|
||||
#[tokio::test]
|
||||
async fn set_defaults_update_when_profile_override_supplied() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
// No profile key in config.toml
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), "")
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
// Persist with an explicit profile override
|
||||
persist_overrides(
|
||||
codex_home,
|
||||
Some("o3"),
|
||||
&[(&[CONFIG_KEY_MODEL], "o3"), (&[CONFIG_KEY_EFFORT], "high")],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"[profiles.o3]
|
||||
model = "o3"
|
||||
model_reasoning_effort = "high"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies nested tables are created as needed when applying overrides.
|
||||
#[tokio::test]
|
||||
async fn persist_overrides_creates_nested_tables() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
persist_overrides(
|
||||
codex_home,
|
||||
None,
|
||||
&[
|
||||
(&["a", "b", "c"], "v"),
|
||||
(&["x"], "y"),
|
||||
(&["profiles", "p1", CONFIG_KEY_MODEL], "gpt-5"),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"x = "y"
|
||||
|
||||
[a.b]
|
||||
c = "v"
|
||||
|
||||
[profiles.p1]
|
||||
model = "gpt-5"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies a scalar key becomes a table when nested keys are written.
|
||||
#[tokio::test]
|
||||
async fn persist_overrides_replaces_scalar_with_table() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
let seed = "foo = \"bar\"\n";
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
persist_overrides(codex_home, None, &[(&["foo", "bar", "baz"], "ok")])
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"[foo.bar]
|
||||
baz = "ok"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies comments and spacing are preserved when writing under active profile.
|
||||
#[tokio::test]
|
||||
async fn set_defaults_preserve_comments() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
// Seed a config with comments and spacing we expect to preserve
|
||||
let seed = r#"# Global comment
|
||||
# Another line
|
||||
|
||||
profile = "o3"
|
||||
|
||||
# Profile settings
|
||||
[profiles.o3]
|
||||
# keep me
|
||||
existing = "keep"
|
||||
"#;
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
// Apply defaults; since profile is set, it should write under [profiles.o3]
|
||||
persist_overrides(
|
||||
codex_home,
|
||||
None,
|
||||
&[(&[CONFIG_KEY_MODEL], "o3"), (&[CONFIG_KEY_EFFORT], "high")],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"# Global comment
|
||||
# Another line
|
||||
|
||||
profile = "o3"
|
||||
|
||||
# Profile settings
|
||||
[profiles.o3]
|
||||
# keep me
|
||||
existing = "keep"
|
||||
model = "o3"
|
||||
model_reasoning_effort = "high"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies comments and spacing are preserved when writing at top level.
|
||||
#[tokio::test]
|
||||
async fn set_defaults_preserve_global_comments() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
// Seed a config WITHOUT a profile, containing comments and spacing
|
||||
let seed = r#"# Top-level comments
|
||||
# should be preserved
|
||||
|
||||
existing = "keep"
|
||||
"#;
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
// Since there is no profile, the defaults should be written at top-level
|
||||
persist_overrides(
|
||||
codex_home,
|
||||
None,
|
||||
&[
|
||||
(&[CONFIG_KEY_MODEL], "gpt-5"),
|
||||
(&[CONFIG_KEY_EFFORT], "minimal"),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"# Top-level comments
|
||||
# should be preserved
|
||||
|
||||
existing = "keep"
|
||||
model = "gpt-5"
|
||||
model_reasoning_effort = "minimal"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies errors on invalid TOML propagate and file is not clobbered.
|
||||
#[tokio::test]
|
||||
async fn persist_overrides_errors_on_parse_failure() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
// Write an intentionally invalid TOML file
|
||||
let invalid = "invalid = [unclosed";
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), invalid)
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
// Attempting to persist should return an error and must not clobber the file.
|
||||
let res = persist_overrides(codex_home, None, &[(&["x"], "y")]).await;
|
||||
assert!(res.is_err(), "expected parse error to propagate");
|
||||
|
||||
// File should be unchanged
|
||||
let contents = read_config(codex_home).await;
|
||||
assert_eq!(contents, invalid);
|
||||
}
|
||||
|
||||
/// Verifies changing model only preserves existing effort at top-level.
|
||||
#[tokio::test]
|
||||
async fn changing_only_model_preserves_existing_effort_top_level() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
// Seed with an effort value only
|
||||
let seed = "model_reasoning_effort = \"minimal\"\n";
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
// Change only the model
|
||||
persist_overrides(codex_home, None, &[(&[CONFIG_KEY_MODEL], "o3")])
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"model_reasoning_effort = "minimal"
|
||||
model = "o3"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies changing effort only preserves existing model at top-level.
|
||||
#[tokio::test]
|
||||
async fn changing_only_effort_preserves_existing_model_top_level() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
// Seed with a model value only
|
||||
let seed = "model = \"gpt-5\"\n";
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
// Change only the effort
|
||||
persist_overrides(codex_home, None, &[(&[CONFIG_KEY_EFFORT], "high")])
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"model = "gpt-5"
|
||||
model_reasoning_effort = "high"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies changing model only preserves existing effort in active profile.
|
||||
#[tokio::test]
|
||||
async fn changing_only_model_preserves_effort_in_active_profile() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
// Seed with an active profile and an existing effort under that profile
|
||||
let seed = r#"profile = "p1"
|
||||
|
||||
[profiles.p1]
|
||||
model_reasoning_effort = "low"
|
||||
"#;
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
persist_overrides(codex_home, None, &[(&[CONFIG_KEY_MODEL], "o4-mini")])
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"profile = "p1"
|
||||
|
||||
[profiles.p1]
|
||||
model_reasoning_effort = "low"
|
||||
model = "o4-mini"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies changing effort only preserves existing model in a profile override.
|
||||
#[tokio::test]
|
||||
async fn changing_only_effort_preserves_model_in_profile_override() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
// No active profile key; we'll target an explicit override
|
||||
let seed = r#"[profiles.team]
|
||||
model = "gpt-5"
|
||||
"#;
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
.expect("seed write");
|
||||
|
||||
persist_overrides(
|
||||
codex_home,
|
||||
Some("team"),
|
||||
&[(&[CONFIG_KEY_EFFORT], "minimal")],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"[profiles.team]
|
||||
model = "gpt-5"
|
||||
model_reasoning_effort = "minimal"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies `persist_non_null_overrides` skips `None` entries and writes only present values at top-level.
|
||||
#[tokio::test]
|
||||
async fn persist_non_null_skips_none_top_level() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
persist_non_null_overrides(
|
||||
codex_home,
|
||||
None,
|
||||
&[
|
||||
(&[CONFIG_KEY_MODEL], Some("gpt-5")),
|
||||
(&[CONFIG_KEY_EFFORT], None),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = "model = \"gpt-5\"\n";
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
/// Verifies no-op behavior when all provided overrides are `None` (no file created/modified).
|
||||
#[tokio::test]
|
||||
async fn persist_non_null_noop_when_all_none() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
persist_non_null_overrides(
|
||||
codex_home,
|
||||
None,
|
||||
&[(&["a"], None), (&["profiles", "p", "x"], None)],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
// Should not create config.toml on a pure no-op
|
||||
assert!(!codex_home.join(CONFIG_TOML_FILE).exists());
|
||||
}
|
||||
|
||||
/// Verifies entries are written under the specified profile and `None` entries are skipped.
|
||||
#[tokio::test]
|
||||
async fn persist_non_null_respects_profile_override() {
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
persist_non_null_overrides(
|
||||
codex_home,
|
||||
Some("team"),
|
||||
&[
|
||||
(&[CONFIG_KEY_MODEL], Some("o3")),
|
||||
(&[CONFIG_KEY_EFFORT], None),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"[profiles.team]
|
||||
model = "o3"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
// Test helper moved to bottom per review guidance.
|
||||
async fn read_config(codex_home: &Path) -> String {
|
||||
let p = codex_home.join(CONFIG_TOML_FILE);
|
||||
tokio::fs::read_to_string(p).await.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
@@ -13,25 +13,13 @@ use crate::protocol::SessionConfiguredEvent;
|
||||
use crate::rollout::RolloutRecorder;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct ResumedHistory {
|
||||
pub conversation_id: ConversationId,
|
||||
pub history: Vec<ResponseItem>,
|
||||
pub rollout_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum InitialHistory {
|
||||
New,
|
||||
Resumed(ResumedHistory),
|
||||
Forked(Vec<ResponseItem>),
|
||||
}
|
||||
|
||||
/// Represents a newly created Codex conversation, including the first event
|
||||
/// (which is [`EventMsg::SessionConfigured`]).
|
||||
pub struct NewConversation {
|
||||
@@ -145,8 +133,15 @@ impl ConversationManager {
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
}
|
||||
|
||||
pub async fn remove_conversation(&self, conversation_id: ConversationId) {
|
||||
self.conversations.write().await.remove(&conversation_id);
|
||||
/// Removes the conversation from the manager's internal map, though the
|
||||
/// conversation is stored as `Arc<CodexConversation>`, it is possible that
|
||||
/// other references to it exist elsewhere. Returns the conversation if the
|
||||
/// conversation was found and removed.
|
||||
pub async fn remove_conversation(
|
||||
&self,
|
||||
conversation_id: &ConversationId,
|
||||
) -> Option<Arc<CodexConversation>> {
|
||||
self.conversations.write().await.remove(conversation_id)
|
||||
}
|
||||
|
||||
/// Fork an existing conversation by dropping the last `drop_last_messages`
|
||||
@@ -155,13 +150,13 @@ impl ConversationManager {
|
||||
/// caller's `config`). The new conversation will have a fresh id.
|
||||
pub async fn fork_conversation(
|
||||
&self,
|
||||
conversation_history: Vec<ResponseItem>,
|
||||
num_messages_to_drop: usize,
|
||||
config: Config,
|
||||
path: PathBuf,
|
||||
) -> CodexResult<NewConversation> {
|
||||
// Compute the prefix up to the cut point.
|
||||
let history =
|
||||
truncate_after_dropping_last_messages(conversation_history, num_messages_to_drop);
|
||||
let history = RolloutRecorder::get_rollout_history(&path).await?;
|
||||
let history = truncate_after_dropping_last_messages(history, num_messages_to_drop);
|
||||
|
||||
// Spawn a new conversation with the computed initial history.
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
@@ -176,31 +171,37 @@ impl ConversationManager {
|
||||
|
||||
/// Return a prefix of `items` obtained by dropping the last `n` user messages
|
||||
/// and all items that follow them.
|
||||
fn truncate_after_dropping_last_messages(items: Vec<ResponseItem>, n: usize) -> InitialHistory {
|
||||
fn truncate_after_dropping_last_messages(history: InitialHistory, n: usize) -> InitialHistory {
|
||||
if n == 0 {
|
||||
return InitialHistory::Forked(items);
|
||||
return InitialHistory::Forked(history.get_rollout_items());
|
||||
}
|
||||
|
||||
// Walk backwards counting only `user` Message items, find cut index.
|
||||
let mut count = 0usize;
|
||||
let mut cut_index = 0usize;
|
||||
for (idx, item) in items.iter().enumerate().rev() {
|
||||
if let ResponseItem::Message { role, .. } = item
|
||||
// Work directly on rollout items, and cut the vector at the nth-from-last user message input.
|
||||
let items: Vec<RolloutItem> = history.get_rollout_items();
|
||||
|
||||
// Find indices of user message inputs in rollout order.
|
||||
let mut user_positions: Vec<usize> = Vec::new();
|
||||
for (idx, item) in items.iter().enumerate() {
|
||||
if let RolloutItem::ResponseItem(ResponseItem::Message { role, .. }) = item
|
||||
&& role == "user"
|
||||
{
|
||||
count += 1;
|
||||
if count == n {
|
||||
// Cut everything from this user message to the end.
|
||||
cut_index = idx;
|
||||
break;
|
||||
}
|
||||
user_positions.push(idx);
|
||||
}
|
||||
}
|
||||
if cut_index == 0 {
|
||||
// No prefix remains after dropping; start a new conversation.
|
||||
|
||||
// If fewer than n user messages exist, treat as empty.
|
||||
if user_positions.len() < n {
|
||||
return InitialHistory::New;
|
||||
}
|
||||
|
||||
// Cut strictly before the nth-from-last user message (do not keep the nth itself).
|
||||
let cut_idx = user_positions[user_positions.len() - n];
|
||||
let rolled: Vec<RolloutItem> = items.into_iter().take(cut_idx).collect();
|
||||
|
||||
if rolled.is_empty() {
|
||||
InitialHistory::New
|
||||
} else {
|
||||
InitialHistory::Forked(items.into_iter().take(cut_index).collect())
|
||||
InitialHistory::Forked(rolled)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -255,13 +256,30 @@ mod tests {
|
||||
assistant_msg("a4"),
|
||||
];
|
||||
|
||||
let truncated = truncate_after_dropping_last_messages(items.clone(), 1);
|
||||
// Wrap as InitialHistory::Forked with response items only.
|
||||
let initial: Vec<RolloutItem> = items
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(RolloutItem::ResponseItem)
|
||||
.collect();
|
||||
let truncated = truncate_after_dropping_last_messages(InitialHistory::Forked(initial), 1);
|
||||
let got_items = truncated.get_rollout_items();
|
||||
let expected_items = vec![
|
||||
RolloutItem::ResponseItem(items[0].clone()),
|
||||
RolloutItem::ResponseItem(items[1].clone()),
|
||||
RolloutItem::ResponseItem(items[2].clone()),
|
||||
];
|
||||
assert_eq!(
|
||||
truncated,
|
||||
InitialHistory::Forked(vec![items[0].clone(), items[1].clone(), items[2].clone(),])
|
||||
serde_json::to_value(&got_items).unwrap(),
|
||||
serde_json::to_value(&expected_items).unwrap()
|
||||
);
|
||||
|
||||
let truncated2 = truncate_after_dropping_last_messages(items, 2);
|
||||
assert_eq!(truncated2, InitialHistory::New);
|
||||
let initial2: Vec<RolloutItem> = items
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(RolloutItem::ResponseItem)
|
||||
.collect();
|
||||
let truncated2 = truncate_after_dropping_last_messages(InitialHistory::Forked(initial2), 2);
|
||||
assert!(matches!(truncated2, InitialHistory::New));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,38 +1,123 @@
|
||||
pub const DEFAULT_ORIGINATOR: &str = "codex_cli_rs";
|
||||
use reqwest::header::HeaderValue;
|
||||
use std::sync::LazyLock;
|
||||
use std::sync::Mutex;
|
||||
|
||||
pub fn get_codex_user_agent(originator: Option<&str>) -> String {
|
||||
/// Set this to add a suffix to the User-Agent string.
|
||||
///
|
||||
/// It is not ideal that we're using a global singleton for this.
|
||||
/// This is primarily designed to differentiate MCP clients from each other.
|
||||
/// Because there can only be one MCP server per process, it should be safe for this to be a global static.
|
||||
/// However, future users of this should use this with caution as a result.
|
||||
/// In addition, we want to be confident that this value is used for ALL clients and doing that requires a
|
||||
/// lot of wiring and it's easy to miss code paths by doing so.
|
||||
/// See https://github.com/openai/codex/pull/3388/files for an example of what that would look like.
|
||||
/// Finally, we want to make sure this is set for ALL mcp clients without needing to know a special env var
|
||||
/// or having to set data that they already specified in the mcp initialize request somewhere else.
|
||||
///
|
||||
/// A space is automatically added between the suffix and the rest of the User-Agent string.
|
||||
/// The full user agent string is returned from the mcp initialize response.
|
||||
/// Parenthesis will be added by Codex. This should only specify what goes inside of the parenthesis.
|
||||
pub static USER_AGENT_SUFFIX: LazyLock<Mutex<Option<String>>> = LazyLock::new(|| Mutex::new(None));
|
||||
|
||||
pub const CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR: &str = "CODEX_INTERNAL_ORIGINATOR_OVERRIDE";
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Originator {
|
||||
pub value: String,
|
||||
pub header_value: HeaderValue,
|
||||
}
|
||||
|
||||
pub static ORIGINATOR: LazyLock<Originator> = LazyLock::new(|| {
|
||||
let default = "codex_cli_rs";
|
||||
let value = std::env::var(CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR)
|
||||
.unwrap_or_else(|_| default.to_string());
|
||||
|
||||
match HeaderValue::from_str(&value) {
|
||||
Ok(header_value) => Originator {
|
||||
value,
|
||||
header_value,
|
||||
},
|
||||
Err(e) => {
|
||||
tracing::error!("Unable to turn originator override {value} into header value: {e}");
|
||||
Originator {
|
||||
value: default.to_string(),
|
||||
header_value: HeaderValue::from_static(default),
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
pub fn get_codex_user_agent() -> String {
|
||||
let build_version = env!("CARGO_PKG_VERSION");
|
||||
let os_info = os_info::get();
|
||||
format!(
|
||||
let prefix = format!(
|
||||
"{}/{build_version} ({} {}; {}) {}",
|
||||
originator.unwrap_or(DEFAULT_ORIGINATOR),
|
||||
ORIGINATOR.value.as_str(),
|
||||
os_info.os_type(),
|
||||
os_info.version(),
|
||||
os_info.architecture().unwrap_or("unknown"),
|
||||
crate::terminal::user_agent()
|
||||
)
|
||||
);
|
||||
let suffix = USER_AGENT_SUFFIX
|
||||
.lock()
|
||||
.ok()
|
||||
.and_then(|guard| guard.clone());
|
||||
let suffix = suffix
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.filter(|value| !value.is_empty())
|
||||
.map_or_else(String::new, |value| format!(" ({value})"));
|
||||
|
||||
let candidate = format!("{prefix}{suffix}");
|
||||
sanitize_user_agent(candidate, &prefix)
|
||||
}
|
||||
|
||||
/// Sanitize the user agent string.
|
||||
///
|
||||
/// Invalid characters are replaced with an underscore.
|
||||
///
|
||||
/// If the user agent fails to parse, it falls back to fallback and then to ORIGINATOR.
|
||||
fn sanitize_user_agent(candidate: String, fallback: &str) -> String {
|
||||
if HeaderValue::from_str(candidate.as_str()).is_ok() {
|
||||
return candidate;
|
||||
}
|
||||
|
||||
let sanitized: String = candidate
|
||||
.chars()
|
||||
.map(|ch| if matches!(ch, ' '..='~') { ch } else { '_' })
|
||||
.collect();
|
||||
if !sanitized.is_empty() && HeaderValue::from_str(sanitized.as_str()).is_ok() {
|
||||
tracing::warn!(
|
||||
"Sanitized Codex user agent because provided suffix contained invalid header characters"
|
||||
);
|
||||
sanitized
|
||||
} else if HeaderValue::from_str(fallback).is_ok() {
|
||||
tracing::warn!(
|
||||
"Falling back to base Codex user agent because provided suffix could not be sanitized"
|
||||
);
|
||||
fallback.to_string()
|
||||
} else {
|
||||
tracing::warn!(
|
||||
"Falling back to default Codex originator because base user agent string is invalid"
|
||||
);
|
||||
ORIGINATOR.value.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a reqwest client with default `originator` and `User-Agent` headers set.
|
||||
pub fn create_client(originator: &str) -> reqwest::Client {
|
||||
pub fn create_client() -> reqwest::Client {
|
||||
use reqwest::header::HeaderMap;
|
||||
use reqwest::header::HeaderValue;
|
||||
|
||||
let mut headers = HeaderMap::new();
|
||||
let originator_value = HeaderValue::from_str(originator)
|
||||
.unwrap_or_else(|_| HeaderValue::from_static(DEFAULT_ORIGINATOR));
|
||||
headers.insert("originator", originator_value);
|
||||
let ua = get_codex_user_agent(Some(originator));
|
||||
headers.insert("originator", ORIGINATOR.header_value.clone());
|
||||
let ua = get_codex_user_agent();
|
||||
|
||||
match reqwest::Client::builder()
|
||||
reqwest::Client::builder()
|
||||
// Set UA via dedicated helper to avoid header validation pitfalls
|
||||
.user_agent(ua)
|
||||
.default_headers(headers)
|
||||
.build()
|
||||
{
|
||||
Ok(client) => client,
|
||||
Err(_) => reqwest::Client::new(),
|
||||
}
|
||||
.unwrap_or_else(|_| reqwest::Client::new())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -41,7 +126,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_get_codex_user_agent() {
|
||||
let user_agent = get_codex_user_agent(None);
|
||||
let user_agent = get_codex_user_agent();
|
||||
assert!(user_agent.starts_with("codex_cli_rs/"));
|
||||
}
|
||||
|
||||
@@ -53,8 +138,7 @@ mod tests {
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
let originator = "test_originator";
|
||||
let client = create_client(originator);
|
||||
let client = create_client();
|
||||
|
||||
// Spin up a local mock server and capture a request.
|
||||
let server = MockServer::start().await;
|
||||
@@ -82,21 +166,43 @@ mod tests {
|
||||
let originator_header = headers
|
||||
.get("originator")
|
||||
.expect("originator header missing");
|
||||
assert_eq!(originator_header.to_str().unwrap(), originator);
|
||||
assert_eq!(originator_header.to_str().unwrap(), "codex_cli_rs");
|
||||
|
||||
// User-Agent matches the computed Codex UA for that originator
|
||||
let expected_ua = get_codex_user_agent(Some(originator));
|
||||
let expected_ua = get_codex_user_agent();
|
||||
let ua_header = headers
|
||||
.get("user-agent")
|
||||
.expect("user-agent header missing");
|
||||
assert_eq!(ua_header.to_str().unwrap(), expected_ua);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_suffix_is_sanitized() {
|
||||
let prefix = "codex_cli_rs/0.0.0";
|
||||
let suffix = "bad\rsuffix";
|
||||
|
||||
assert_eq!(
|
||||
sanitize_user_agent(format!("{prefix} ({suffix})"), prefix),
|
||||
"codex_cli_rs/0.0.0 (bad_suffix)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_suffix_is_sanitized2() {
|
||||
let prefix = "codex_cli_rs/0.0.0";
|
||||
let suffix = "bad\0suffix";
|
||||
|
||||
assert_eq!(
|
||||
sanitize_user_agent(format!("{prefix} ({suffix})"), prefix),
|
||||
"codex_cli_rs/0.0.0 (bad_suffix)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(target_os = "macos")]
|
||||
fn test_macos() {
|
||||
use regex_lite::Regex;
|
||||
let user_agent = get_codex_user_agent(None);
|
||||
let user_agent = get_codex_user_agent();
|
||||
let re = Regex::new(
|
||||
r"^codex_cli_rs/\d+\.\d+\.\d+ \(Mac OS \d+\.\d+\.\d+; (x86_64|arm64)\) (\S+)$",
|
||||
)
|
||||
|
||||
@@ -26,6 +26,7 @@ pub(crate) struct EnvironmentContext {
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
pub sandbox_mode: Option<SandboxMode>,
|
||||
pub network_access: Option<NetworkAccess>,
|
||||
pub writable_roots: Option<Vec<PathBuf>>,
|
||||
pub shell: Option<Shell>,
|
||||
}
|
||||
|
||||
@@ -57,6 +58,16 @@ impl EnvironmentContext {
|
||||
}
|
||||
None => None,
|
||||
},
|
||||
writable_roots: match sandbox_policy {
|
||||
Some(SandboxPolicy::WorkspaceWrite { writable_roots, .. }) => {
|
||||
if writable_roots.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(writable_roots.clone())
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
},
|
||||
shell,
|
||||
}
|
||||
}
|
||||
@@ -72,6 +83,7 @@ impl EnvironmentContext {
|
||||
/// <cwd>...</cwd>
|
||||
/// <approval_policy>...</approval_policy>
|
||||
/// <sandbox_mode>...</sandbox_mode>
|
||||
/// <writable_roots>...</writable_roots>
|
||||
/// <network_access>...</network_access>
|
||||
/// <shell>...</shell>
|
||||
/// </environment_context>
|
||||
@@ -94,6 +106,16 @@ impl EnvironmentContext {
|
||||
" <network_access>{network_access}</network_access>"
|
||||
));
|
||||
}
|
||||
if let Some(writable_roots) = self.writable_roots {
|
||||
lines.push(" <writable_roots>".to_string());
|
||||
for writable_root in writable_roots {
|
||||
lines.push(format!(
|
||||
" <root>{}</root>",
|
||||
writable_root.to_string_lossy()
|
||||
));
|
||||
}
|
||||
lines.push(" </writable_roots>".to_string());
|
||||
}
|
||||
if let Some(shell) = self.shell
|
||||
&& let Some(shell_name) = shell.name()
|
||||
{
|
||||
@@ -115,3 +137,77 @@ impl From<EnvironmentContext> for ResponseItem {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
fn workspace_write_policy(writable_roots: Vec<&str>, network_access: bool) -> SandboxPolicy {
|
||||
SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: writable_roots.into_iter().map(PathBuf::from).collect(),
|
||||
network_access,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_workspace_write_environment_context() {
|
||||
let context = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo", "/tmp"], false)),
|
||||
None,
|
||||
);
|
||||
|
||||
let expected = r#"<environment_context>
|
||||
<cwd>/repo</cwd>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>workspace-write</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
<writable_roots>
|
||||
<root>/repo</root>
|
||||
<root>/tmp</root>
|
||||
</writable_roots>
|
||||
</environment_context>"#;
|
||||
|
||||
assert_eq!(context.serialize_to_xml(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_read_only_environment_context() {
|
||||
let context = EnvironmentContext::new(
|
||||
None,
|
||||
Some(AskForApproval::Never),
|
||||
Some(SandboxPolicy::ReadOnly),
|
||||
None,
|
||||
);
|
||||
|
||||
let expected = r#"<environment_context>
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>read-only</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
</environment_context>"#;
|
||||
|
||||
assert_eq!(context.serialize_to_xml(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_full_access_environment_context() {
|
||||
let context = EnvironmentContext::new(
|
||||
None,
|
||||
Some(AskForApproval::OnFailure),
|
||||
Some(SandboxPolicy::DangerFullAccess),
|
||||
None,
|
||||
);
|
||||
|
||||
let expected = r#"<environment_context>
|
||||
<approval_policy>on-failure</approval_policy>
|
||||
<sandbox_mode>danger-full-access</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
</environment_context>"#;
|
||||
|
||||
assert_eq!(context.serialize_to_xml(), expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,31 +25,56 @@ pub(crate) fn map_response_item_to_event_messages(
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let events: Vec<EventMsg> = content
|
||||
.iter()
|
||||
.filter_map(|content_item| match content_item {
|
||||
ContentItem::OutputText { text } => {
|
||||
Some(EventMsg::AgentMessage(AgentMessageEvent {
|
||||
message: text.clone(),
|
||||
}))
|
||||
}
|
||||
let mut events: Vec<EventMsg> = Vec::new();
|
||||
let mut message_parts: Vec<String> = Vec::new();
|
||||
let mut images: Vec<String> = Vec::new();
|
||||
let mut kind: Option<InputMessageKind> = None;
|
||||
|
||||
for content_item in content.iter() {
|
||||
match content_item {
|
||||
ContentItem::InputText { text } => {
|
||||
let trimmed = text.trim_start();
|
||||
let kind = if trimmed.starts_with("<environment_context>") {
|
||||
Some(InputMessageKind::EnvironmentContext)
|
||||
} else if trimmed.starts_with("<user_instructions>") {
|
||||
Some(InputMessageKind::UserInstructions)
|
||||
} else {
|
||||
Some(InputMessageKind::Plain)
|
||||
};
|
||||
Some(EventMsg::UserMessage(UserMessageEvent {
|
||||
message: text.clone(),
|
||||
kind,
|
||||
}))
|
||||
if kind.is_none() {
|
||||
let trimmed = text.trim_start();
|
||||
kind = if trimmed.starts_with("<environment_context>") {
|
||||
Some(InputMessageKind::EnvironmentContext)
|
||||
} else if trimmed.starts_with("<user_instructions>") {
|
||||
Some(InputMessageKind::UserInstructions)
|
||||
} else {
|
||||
Some(InputMessageKind::Plain)
|
||||
};
|
||||
}
|
||||
message_parts.push(text.clone());
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
ContentItem::InputImage { image_url } => {
|
||||
images.push(image_url.clone());
|
||||
}
|
||||
ContentItem::OutputText { text } => {
|
||||
events.push(EventMsg::AgentMessage(AgentMessageEvent {
|
||||
message: text.clone(),
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !message_parts.is_empty() || !images.is_empty() {
|
||||
let message = if message_parts.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
message_parts.join("")
|
||||
};
|
||||
let images = if images.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(images)
|
||||
};
|
||||
|
||||
events.push(EventMsg::UserMessage(UserMessageEvent {
|
||||
message,
|
||||
kind,
|
||||
images,
|
||||
}));
|
||||
}
|
||||
|
||||
events
|
||||
}
|
||||
|
||||
@@ -96,3 +121,47 @@ pub(crate) fn map_response_item_to_event_messages(
|
||||
| ResponseItem::Other => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::map_response_item_to_event_messages;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::InputMessageKind;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn maps_user_message_with_text_and_two_images() {
|
||||
let img1 = "https://example.com/one.png".to_string();
|
||||
let img2 = "https://example.com/two.jpg".to_string();
|
||||
|
||||
let item = ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![
|
||||
ContentItem::InputText {
|
||||
text: "Hello world".to_string(),
|
||||
},
|
||||
ContentItem::InputImage {
|
||||
image_url: img1.clone(),
|
||||
},
|
||||
ContentItem::InputImage {
|
||||
image_url: img2.clone(),
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let events = map_response_item_to_event_messages(&item, false);
|
||||
assert_eq!(events.len(), 1, "expected a single user message event");
|
||||
|
||||
match &events[0] {
|
||||
EventMsg::UserMessage(user) => {
|
||||
assert_eq!(user.message, "Hello world");
|
||||
assert!(matches!(user.kind, Some(InputMessageKind::Plain)));
|
||||
assert_eq!(user.images, Some(vec![img1.clone(), img2.clone()]));
|
||||
}
|
||||
other => panic!("expected UserMessage, got {other:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,9 @@ pub(crate) struct ExecCommandSession {
|
||||
|
||||
/// JoinHandle for the child wait task.
|
||||
wait_handle: StdMutex<Option<JoinHandle<()>>>,
|
||||
|
||||
/// Tracks whether the underlying process has exited.
|
||||
exit_status: std::sync::Arc<std::sync::atomic::AtomicBool>,
|
||||
}
|
||||
|
||||
impl ExecCommandSession {
|
||||
@@ -34,6 +37,7 @@ impl ExecCommandSession {
|
||||
reader_handle: JoinHandle<()>,
|
||||
writer_handle: JoinHandle<()>,
|
||||
wait_handle: JoinHandle<()>,
|
||||
exit_status: std::sync::Arc<std::sync::atomic::AtomicBool>,
|
||||
) -> Self {
|
||||
Self {
|
||||
writer_tx,
|
||||
@@ -42,6 +46,7 @@ impl ExecCommandSession {
|
||||
reader_handle: StdMutex::new(Some(reader_handle)),
|
||||
writer_handle: StdMutex::new(Some(writer_handle)),
|
||||
wait_handle: StdMutex::new(Some(wait_handle)),
|
||||
exit_status,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,6 +57,10 @@ impl ExecCommandSession {
|
||||
pub(crate) fn output_receiver(&self) -> broadcast::Receiver<Vec<u8>> {
|
||||
self.output_tx.subscribe()
|
||||
}
|
||||
|
||||
pub(crate) fn has_exited(&self) -> bool {
|
||||
self.exit_status.load(std::sync::atomic::Ordering::SeqCst)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ExecCommandSession {
|
||||
|
||||
@@ -6,6 +6,7 @@ mod session_manager;
|
||||
|
||||
pub use exec_command_params::ExecCommandParams;
|
||||
pub use exec_command_params::WriteStdinParams;
|
||||
pub(crate) use exec_command_session::ExecCommandSession;
|
||||
pub use responses_api::EXEC_COMMAND_TOOL_NAME;
|
||||
pub use responses_api::WRITE_STDIN_TOOL_NAME;
|
||||
pub use responses_api::create_exec_command_tool_for_responses_api;
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::io::ErrorKind;
|
||||
use std::io::Read;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex as StdMutex;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
|
||||
use portable_pty::CommandBuilder;
|
||||
@@ -19,6 +20,7 @@ use crate::exec_command::exec_command_params::ExecCommandParams;
|
||||
use crate::exec_command::exec_command_params::WriteStdinParams;
|
||||
use crate::exec_command::exec_command_session::ExecCommandSession;
|
||||
use crate::exec_command::session_id::SessionId;
|
||||
use crate::truncate::truncate_middle;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
@@ -327,11 +329,14 @@ async fn create_exec_command_session(
|
||||
|
||||
// Keep the child alive until it exits, then signal exit code.
|
||||
let (exit_tx, exit_rx) = oneshot::channel::<i32>();
|
||||
let exit_status = Arc::new(AtomicBool::new(false));
|
||||
let wait_exit_status = exit_status.clone();
|
||||
let wait_handle = tokio::task::spawn_blocking(move || {
|
||||
let code = match child.wait() {
|
||||
Ok(status) => status.exit_code() as i32,
|
||||
Err(_) => -1,
|
||||
};
|
||||
wait_exit_status.store(true, std::sync::atomic::Ordering::SeqCst);
|
||||
let _ = exit_tx.send(code);
|
||||
});
|
||||
|
||||
@@ -343,116 +348,11 @@ async fn create_exec_command_session(
|
||||
reader_handle,
|
||||
writer_handle,
|
||||
wait_handle,
|
||||
exit_status,
|
||||
);
|
||||
Ok((session, exit_rx))
|
||||
}
|
||||
|
||||
/// Truncate the middle of a UTF-8 string to at most `max_bytes` bytes,
|
||||
/// preserving the beginning and the end. Returns the possibly truncated
|
||||
/// string and `Some(original_token_count)` (estimated at 4 bytes/token)
|
||||
/// if truncation occurred; otherwise returns the original string and `None`.
|
||||
fn truncate_middle(s: &str, max_bytes: usize) -> (String, Option<u64>) {
|
||||
// No truncation needed
|
||||
if s.len() <= max_bytes {
|
||||
return (s.to_string(), None);
|
||||
}
|
||||
let est_tokens = (s.len() as u64).div_ceil(4);
|
||||
if max_bytes == 0 {
|
||||
// Cannot keep any content; still return a full marker (never truncated).
|
||||
return (format!("…{est_tokens} tokens truncated…"), Some(est_tokens));
|
||||
}
|
||||
|
||||
// Helper to truncate a string to a given byte length on a char boundary.
|
||||
fn truncate_on_boundary(input: &str, max_len: usize) -> &str {
|
||||
if input.len() <= max_len {
|
||||
return input;
|
||||
}
|
||||
let mut end = max_len;
|
||||
while end > 0 && !input.is_char_boundary(end) {
|
||||
end -= 1;
|
||||
}
|
||||
&input[..end]
|
||||
}
|
||||
|
||||
// Given a left/right budget, prefer newline boundaries; otherwise fall back
|
||||
// to UTF-8 char boundaries.
|
||||
fn pick_prefix_end(s: &str, left_budget: usize) -> usize {
|
||||
if let Some(head) = s.get(..left_budget)
|
||||
&& let Some(i) = head.rfind('\n')
|
||||
{
|
||||
return i + 1; // keep the newline so suffix starts on a fresh line
|
||||
}
|
||||
truncate_on_boundary(s, left_budget).len()
|
||||
}
|
||||
|
||||
fn pick_suffix_start(s: &str, right_budget: usize) -> usize {
|
||||
let start_tail = s.len().saturating_sub(right_budget);
|
||||
if let Some(tail) = s.get(start_tail..)
|
||||
&& let Some(i) = tail.find('\n')
|
||||
{
|
||||
return start_tail + i + 1; // start after newline
|
||||
}
|
||||
// Fall back to a char boundary at or after start_tail.
|
||||
let mut idx = start_tail.min(s.len());
|
||||
while idx < s.len() && !s.is_char_boundary(idx) {
|
||||
idx += 1;
|
||||
}
|
||||
idx
|
||||
}
|
||||
|
||||
// Refine marker length and budgets until stable. Marker is never truncated.
|
||||
let mut guess_tokens = est_tokens; // worst-case: everything truncated
|
||||
for _ in 0..4 {
|
||||
let marker = format!("…{guess_tokens} tokens truncated…");
|
||||
let marker_len = marker.len();
|
||||
let keep_budget = max_bytes.saturating_sub(marker_len);
|
||||
if keep_budget == 0 {
|
||||
// No room for any content within the cap; return a full, untruncated marker
|
||||
// that reflects the entire truncated content.
|
||||
return (format!("…{est_tokens} tokens truncated…"), Some(est_tokens));
|
||||
}
|
||||
|
||||
let left_budget = keep_budget / 2;
|
||||
let right_budget = keep_budget - left_budget;
|
||||
let prefix_end = pick_prefix_end(s, left_budget);
|
||||
let mut suffix_start = pick_suffix_start(s, right_budget);
|
||||
if suffix_start < prefix_end {
|
||||
suffix_start = prefix_end;
|
||||
}
|
||||
let kept_content_bytes = prefix_end + (s.len() - suffix_start);
|
||||
let truncated_content_bytes = s.len().saturating_sub(kept_content_bytes);
|
||||
let new_tokens = (truncated_content_bytes as u64).div_ceil(4);
|
||||
if new_tokens == guess_tokens {
|
||||
let mut out = String::with_capacity(marker_len + kept_content_bytes + 1);
|
||||
out.push_str(&s[..prefix_end]);
|
||||
out.push_str(&marker);
|
||||
// Place marker on its own line for symmetry when we keep line boundaries.
|
||||
out.push('\n');
|
||||
out.push_str(&s[suffix_start..]);
|
||||
return (out, Some(est_tokens));
|
||||
}
|
||||
guess_tokens = new_tokens;
|
||||
}
|
||||
|
||||
// Fallback: use last guess to build output.
|
||||
let marker = format!("…{guess_tokens} tokens truncated…");
|
||||
let marker_len = marker.len();
|
||||
let keep_budget = max_bytes.saturating_sub(marker_len);
|
||||
if keep_budget == 0 {
|
||||
return (format!("…{est_tokens} tokens truncated…"), Some(est_tokens));
|
||||
}
|
||||
let left_budget = keep_budget / 2;
|
||||
let right_budget = keep_budget - left_budget;
|
||||
let prefix_end = pick_prefix_end(s, left_budget);
|
||||
let suffix_start = pick_suffix_start(s, right_budget);
|
||||
let mut out = String::with_capacity(marker_len + prefix_end + (s.len() - suffix_start) + 1);
|
||||
out.push_str(&s[..prefix_end]);
|
||||
out.push_str(&marker);
|
||||
out.push('\n');
|
||||
out.push_str(&s[suffix_start..]);
|
||||
(out, Some(est_tokens))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -616,50 +516,4 @@ Output:
|
||||
abc"#;
|
||||
assert_eq!(expected, text);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncate_middle_no_newlines_fallback() {
|
||||
// A long string with no newlines that exceeds the cap.
|
||||
let s = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
||||
let max_bytes = 16; // force truncation
|
||||
let (out, original) = truncate_middle(s, max_bytes);
|
||||
// For very small caps, we return the full, untruncated marker,
|
||||
// even if it exceeds the cap.
|
||||
assert_eq!(out, "…16 tokens truncated…");
|
||||
// Original string length is 62 bytes => ceil(62/4) = 16 tokens.
|
||||
assert_eq!(original, Some(16));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncate_middle_prefers_newline_boundaries() {
|
||||
// Build a multi-line string of 20 numbered lines (each "NNN\n").
|
||||
let mut s = String::new();
|
||||
for i in 1..=20 {
|
||||
s.push_str(&format!("{i:03}\n"));
|
||||
}
|
||||
// Total length: 20 lines * 4 bytes per line = 80 bytes.
|
||||
assert_eq!(s.len(), 80);
|
||||
|
||||
// Choose a cap that forces truncation while leaving room for
|
||||
// a few lines on each side after accounting for the marker.
|
||||
let max_bytes = 64;
|
||||
// Expect exact output: first 4 lines, marker, last 4 lines, and correct token estimate (80/4 = 20).
|
||||
assert_eq!(
|
||||
truncate_middle(&s, max_bytes),
|
||||
(
|
||||
r#"001
|
||||
002
|
||||
003
|
||||
004
|
||||
…12 tokens truncated…
|
||||
017
|
||||
018
|
||||
019
|
||||
020
|
||||
"#
|
||||
.to_string(),
|
||||
Some(20)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_protocol::mcp_protocol::GitSha;
|
||||
use codex_protocol::protocol::GitInfo;
|
||||
use futures::future::join_all;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
@@ -43,19 +44,6 @@ pub fn get_git_repo_root(base_dir: &Path) -> Option<PathBuf> {
|
||||
/// Timeout for git commands to prevent freezing on large repositories
|
||||
const GIT_COMMAND_TIMEOUT: TokioDuration = TokioDuration::from_secs(5);
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct GitInfo {
|
||||
/// Current commit hash (SHA)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub commit_hash: Option<String>,
|
||||
/// Current branch name
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub branch: Option<String>,
|
||||
/// Repository URL (if available from remote)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub repository_url: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct GitDiffToRemote {
|
||||
pub sha: GitSha,
|
||||
|
||||
@@ -16,6 +16,7 @@ mod codex_conversation;
|
||||
pub mod token_data;
|
||||
pub use codex_conversation::CodexConversation;
|
||||
pub mod config;
|
||||
pub mod config_edit;
|
||||
pub mod config_profile;
|
||||
pub mod config_types;
|
||||
mod conversation_history;
|
||||
@@ -34,6 +35,8 @@ mod mcp_tool_call;
|
||||
mod message_history;
|
||||
mod model_provider_info;
|
||||
pub mod parse_command;
|
||||
mod truncate;
|
||||
mod unified_exec;
|
||||
mod user_instructions;
|
||||
pub use model_provider_info::BUILT_IN_OSS_MODEL_PROVIDER_ID;
|
||||
pub use model_provider_info::ModelProviderInfo;
|
||||
@@ -42,6 +45,7 @@ pub use model_provider_info::built_in_model_providers;
|
||||
pub use model_provider_info::create_oss_provider_with_base_url;
|
||||
mod conversation_manager;
|
||||
mod event_mapping;
|
||||
pub use codex_protocol::protocol::InitialHistory;
|
||||
pub use conversation_manager::ConversationManager;
|
||||
pub use conversation_manager::NewConversation;
|
||||
// Re-export common auth types for workspace consumers
|
||||
@@ -61,7 +65,9 @@ pub mod spawn;
|
||||
pub mod terminal;
|
||||
mod tool_apply_patch;
|
||||
pub mod turn_diff_tracker;
|
||||
pub use rollout::ARCHIVED_SESSIONS_SUBDIR;
|
||||
pub use rollout::RolloutRecorder;
|
||||
pub use rollout::SESSIONS_SUBDIR;
|
||||
pub use rollout::SessionMeta;
|
||||
pub use rollout::list::ConversationItem;
|
||||
pub use rollout::list::ConversationsPage;
|
||||
|
||||
@@ -163,6 +163,10 @@ impl McpConnectionManager {
|
||||
name: "codex-mcp-client".to_owned(),
|
||||
version: env!("CARGO_PKG_VERSION").to_owned(),
|
||||
title: Some("Codex".into()),
|
||||
// This field is used by Codex when it is an MCP
|
||||
// server: it should not be used when Codex is
|
||||
// an MCP client.
|
||||
user_agent: None,
|
||||
},
|
||||
protocol_version: mcp_types::MCP_SCHEMA_VERSION.to_owned(),
|
||||
};
|
||||
|
||||
@@ -103,7 +103,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
slug, "gpt-4.1",
|
||||
needs_special_apply_patch_instructions: true,
|
||||
)
|
||||
} else if slug.starts_with("gpt-oss") {
|
||||
} else if slug.starts_with("gpt-oss") || slug.starts_with("openai/gpt-oss") {
|
||||
model_family!(slug, "gpt-oss", apply_patch_tool_type: Some(ApplyPatchToolType::Function))
|
||||
} else if slug.starts_with("gpt-4o") {
|
||||
simple_model_family!(slug, "gpt-4o")
|
||||
|
||||
@@ -8,7 +8,6 @@ use std::collections::HashMap;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::plan_tool::PLAN_TOOL;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::tool_apply_patch::ApplyPatchToolType;
|
||||
use crate::tool_apply_patch::create_apply_patch_freeform_tool;
|
||||
use crate::tool_apply_patch::create_apply_patch_json_tool;
|
||||
@@ -58,7 +57,7 @@ pub(crate) enum OpenAiTool {
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ConfigShellToolType {
|
||||
DefaultShell,
|
||||
ShellWithRequest { sandbox_policy: SandboxPolicy },
|
||||
ShellWithRequest,
|
||||
LocalShell,
|
||||
StreamableShell,
|
||||
}
|
||||
@@ -70,17 +69,18 @@ pub(crate) struct ToolsConfig {
|
||||
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
|
||||
pub web_search_request: bool,
|
||||
pub include_view_image_tool: bool,
|
||||
pub experimental_unified_exec_tool: bool,
|
||||
}
|
||||
|
||||
pub(crate) struct ToolsConfigParams<'a> {
|
||||
pub(crate) model_family: &'a ModelFamily,
|
||||
pub(crate) approval_policy: AskForApproval,
|
||||
pub(crate) sandbox_policy: SandboxPolicy,
|
||||
pub(crate) include_plan_tool: bool,
|
||||
pub(crate) include_apply_patch_tool: bool,
|
||||
pub(crate) include_web_search_request: bool,
|
||||
pub(crate) use_streamable_shell_tool: bool,
|
||||
pub(crate) include_view_image_tool: bool,
|
||||
pub(crate) experimental_unified_exec_tool: bool,
|
||||
}
|
||||
|
||||
impl ToolsConfig {
|
||||
@@ -88,12 +88,12 @@ impl ToolsConfig {
|
||||
let ToolsConfigParams {
|
||||
model_family,
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
include_plan_tool,
|
||||
include_apply_patch_tool,
|
||||
include_web_search_request,
|
||||
use_streamable_shell_tool,
|
||||
include_view_image_tool,
|
||||
experimental_unified_exec_tool,
|
||||
} = params;
|
||||
let mut shell_type = if *use_streamable_shell_tool {
|
||||
ConfigShellToolType::StreamableShell
|
||||
@@ -103,9 +103,7 @@ impl ToolsConfig {
|
||||
ConfigShellToolType::DefaultShell
|
||||
};
|
||||
if matches!(approval_policy, AskForApproval::OnRequest) && !use_streamable_shell_tool {
|
||||
shell_type = ConfigShellToolType::ShellWithRequest {
|
||||
sandbox_policy: sandbox_policy.clone(),
|
||||
}
|
||||
shell_type = ConfigShellToolType::ShellWithRequest;
|
||||
}
|
||||
|
||||
let apply_patch_tool_type = match model_family.apply_patch_tool_type {
|
||||
@@ -126,6 +124,7 @@ impl ToolsConfig {
|
||||
apply_patch_tool_type,
|
||||
web_search_request: *include_web_search_request,
|
||||
include_view_image_tool: *include_view_image_tool,
|
||||
experimental_unified_exec_tool: *experimental_unified_exec_tool,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -200,7 +199,56 @@ fn create_shell_tool() -> OpenAiTool {
|
||||
})
|
||||
}
|
||||
|
||||
fn create_shell_tool_for_sandbox(sandbox_policy: &SandboxPolicy) -> OpenAiTool {
|
||||
fn create_unified_exec_tool() -> OpenAiTool {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"input".to_string(),
|
||||
JsonSchema::Array {
|
||||
items: Box::new(JsonSchema::String { description: None }),
|
||||
description: Some(
|
||||
"When no session_id is provided, treat the array as the command and arguments \
|
||||
to launch. When session_id is set, concatenate the strings (in order) and write \
|
||||
them to the session's stdin."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"session_id".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"Identifier for an existing interactive session. If omitted, a new command \
|
||||
is spawned."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"timeout_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some(
|
||||
"Maximum time in milliseconds to wait for output after writing the input."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "unified_exec".to_string(),
|
||||
description:
|
||||
"Runs a command in a PTY. Provide a session_id to reuse an existing interactive session.".to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["input".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
const SHELL_TOOL_DESCRIPTION: &str = r#"Runs a shell command and returns its output"#;
|
||||
|
||||
fn create_shell_tool_for_sandbox() -> OpenAiTool {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"command".to_string(),
|
||||
@@ -212,82 +260,29 @@ fn create_shell_tool_for_sandbox(sandbox_policy: &SandboxPolicy) -> OpenAiTool {
|
||||
properties.insert(
|
||||
"workdir".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("The working directory to execute the command in".to_string()),
|
||||
description: Some("Working directory to execute the command in.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"timeout_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The timeout for the command in milliseconds".to_string()),
|
||||
description: Some("Timeout for the command in milliseconds.".to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
if matches!(sandbox_policy, SandboxPolicy::WorkspaceWrite { .. }) {
|
||||
properties.insert(
|
||||
properties.insert(
|
||||
"with_escalated_permissions".to_string(),
|
||||
JsonSchema::Boolean {
|
||||
description: Some("Whether to request escalated permissions. Set to true if command needs to be run without sandbox restrictions".to_string()),
|
||||
description: Some("Request escalated permissions, only for when a command would otherwise be blocked by the sandbox.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
properties.insert(
|
||||
"justification".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Only set if with_escalated_permissions is true. 1-sentence explanation of why we want to run this command.".to_string()),
|
||||
description: Some("Required if and only if with_escalated_permissions == true. One sentence explaining why escalation is needed (e.g., write outside CWD, network fetch, git commit).".to_string()),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let description = match sandbox_policy {
|
||||
SandboxPolicy::WorkspaceWrite {
|
||||
network_access,
|
||||
writable_roots,
|
||||
..
|
||||
} => {
|
||||
format!(
|
||||
r#"
|
||||
The shell tool is used to execute shell commands.
|
||||
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands will require escalated privileges:
|
||||
- Types of actions that require escalated privileges:
|
||||
- Writing files other than those in the writable roots
|
||||
- writable roots:
|
||||
{}{}
|
||||
- Examples of commands that require escalated privileges:
|
||||
- git commit
|
||||
- npm install or pnpm install
|
||||
- cargo build
|
||||
- cargo test
|
||||
- When invoking a command that will require escalated privileges:
|
||||
- Provide the with_escalated_permissions parameter with the boolean value true
|
||||
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter."#,
|
||||
writable_roots.iter().map(|wr| format!(" - {}", wr.to_string_lossy())).collect::<Vec<String>>().join("\n"),
|
||||
if !network_access {
|
||||
"\n - Commands that require network access\n"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
)
|
||||
}
|
||||
SandboxPolicy::DangerFullAccess => {
|
||||
"Runs a shell command and returns its output.".to_string()
|
||||
}
|
||||
SandboxPolicy::ReadOnly => {
|
||||
r#"
|
||||
The shell tool is used to execute shell commands.
|
||||
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands (including apply_patch) will require escalated permissions:
|
||||
- Types of actions that require escalated privileges:
|
||||
- Writing files
|
||||
- Applying patches
|
||||
- Examples of commands that require escalated privileges:
|
||||
- apply_patch
|
||||
- git commit
|
||||
- npm install or pnpm install
|
||||
- cargo build
|
||||
- cargo test
|
||||
- When invoking a command that will require escalated privileges:
|
||||
- Provide the with_escalated_permissions parameter with the boolean value true
|
||||
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter"#.to_string()
|
||||
}
|
||||
};
|
||||
let description = SHELL_TOOL_DESCRIPTION.to_string();
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "shell".to_string(),
|
||||
@@ -300,7 +295,6 @@ The shell tool is used to execute shell commands.
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_view_image_tool() -> OpenAiTool {
|
||||
// Support only local filesystem path.
|
||||
let mut properties = BTreeMap::new();
|
||||
@@ -534,23 +528,27 @@ pub(crate) fn get_openai_tools(
|
||||
) -> Vec<OpenAiTool> {
|
||||
let mut tools: Vec<OpenAiTool> = Vec::new();
|
||||
|
||||
match &config.shell_type {
|
||||
ConfigShellToolType::DefaultShell => {
|
||||
tools.push(create_shell_tool());
|
||||
}
|
||||
ConfigShellToolType::ShellWithRequest { sandbox_policy } => {
|
||||
tools.push(create_shell_tool_for_sandbox(sandbox_policy));
|
||||
}
|
||||
ConfigShellToolType::LocalShell => {
|
||||
tools.push(OpenAiTool::LocalShell {});
|
||||
}
|
||||
ConfigShellToolType::StreamableShell => {
|
||||
tools.push(OpenAiTool::Function(
|
||||
crate::exec_command::create_exec_command_tool_for_responses_api(),
|
||||
));
|
||||
tools.push(OpenAiTool::Function(
|
||||
crate::exec_command::create_write_stdin_tool_for_responses_api(),
|
||||
));
|
||||
if config.experimental_unified_exec_tool {
|
||||
tools.push(create_unified_exec_tool());
|
||||
} else {
|
||||
match &config.shell_type {
|
||||
ConfigShellToolType::DefaultShell => {
|
||||
tools.push(create_shell_tool());
|
||||
}
|
||||
ConfigShellToolType::ShellWithRequest => {
|
||||
tools.push(create_shell_tool_for_sandbox());
|
||||
}
|
||||
ConfigShellToolType::LocalShell => {
|
||||
tools.push(OpenAiTool::LocalShell {});
|
||||
}
|
||||
ConfigShellToolType::StreamableShell => {
|
||||
tools.push(OpenAiTool::Function(
|
||||
crate::exec_command::create_exec_command_tool_for_responses_api(),
|
||||
));
|
||||
tools.push(OpenAiTool::Function(
|
||||
crate::exec_command::create_write_stdin_tool_for_responses_api(),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -577,10 +575,8 @@ pub(crate) fn get_openai_tools(
|
||||
if config.include_view_image_tool {
|
||||
tools.push(create_view_image_tool());
|
||||
}
|
||||
|
||||
if let Some(mcp_tools) = mcp_tools {
|
||||
// Ensure deterministic ordering to maximize prompt cache hits.
|
||||
// HashMap iteration order is non-deterministic, so sort by fully-qualified tool name.
|
||||
let mut entries: Vec<(String, mcp_types::Tool)> = mcp_tools.into_iter().collect();
|
||||
entries.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
|
||||
@@ -636,18 +632,18 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: true,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
include_view_image_tool: true,
|
||||
experimental_unified_exec_tool: true,
|
||||
});
|
||||
let tools = get_openai_tools(&config, Some(HashMap::new()));
|
||||
|
||||
assert_eq_tool_names(
|
||||
&tools,
|
||||
&["local_shell", "update_plan", "web_search", "view_image"],
|
||||
&["unified_exec", "update_plan", "web_search", "view_image"],
|
||||
);
|
||||
}
|
||||
|
||||
@@ -657,18 +653,18 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: true,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
include_view_image_tool: true,
|
||||
experimental_unified_exec_tool: true,
|
||||
});
|
||||
let tools = get_openai_tools(&config, Some(HashMap::new()));
|
||||
|
||||
assert_eq_tool_names(
|
||||
&tools,
|
||||
&["shell", "update_plan", "web_search", "view_image"],
|
||||
&["unified_exec", "update_plan", "web_search", "view_image"],
|
||||
);
|
||||
}
|
||||
|
||||
@@ -678,12 +674,12 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
include_view_image_tool: true,
|
||||
experimental_unified_exec_tool: true,
|
||||
});
|
||||
let tools = get_openai_tools(
|
||||
&config,
|
||||
@@ -726,7 +722,7 @@ mod tests {
|
||||
assert_eq_tool_names(
|
||||
&tools,
|
||||
&[
|
||||
"shell",
|
||||
"unified_exec",
|
||||
"web_search",
|
||||
"view_image",
|
||||
"test_server/do_something_cool",
|
||||
@@ -783,12 +779,12 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: false,
|
||||
use_streamable_shell_tool: false,
|
||||
include_view_image_tool: true,
|
||||
experimental_unified_exec_tool: true,
|
||||
});
|
||||
|
||||
// Intentionally construct a map with keys that would sort alphabetically.
|
||||
@@ -841,11 +837,11 @@ mod tests {
|
||||
]);
|
||||
|
||||
let tools = get_openai_tools(&config, Some(tools_map));
|
||||
// Expect shell first, followed by MCP tools sorted by fully-qualified name.
|
||||
// Expect unified_exec first, followed by MCP tools sorted by fully-qualified name.
|
||||
assert_eq_tool_names(
|
||||
&tools,
|
||||
&[
|
||||
"shell",
|
||||
"unified_exec",
|
||||
"view_image",
|
||||
"test_server/cool",
|
||||
"test_server/do",
|
||||
@@ -860,12 +856,12 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
include_view_image_tool: true,
|
||||
experimental_unified_exec_tool: true,
|
||||
});
|
||||
|
||||
let tools = get_openai_tools(
|
||||
@@ -893,7 +889,7 @@ mod tests {
|
||||
|
||||
assert_eq_tool_names(
|
||||
&tools,
|
||||
&["shell", "web_search", "view_image", "dash/search"],
|
||||
&["unified_exec", "web_search", "view_image", "dash/search"],
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
@@ -922,12 +918,12 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
include_view_image_tool: true,
|
||||
experimental_unified_exec_tool: true,
|
||||
});
|
||||
|
||||
let tools = get_openai_tools(
|
||||
@@ -953,7 +949,7 @@ mod tests {
|
||||
|
||||
assert_eq_tool_names(
|
||||
&tools,
|
||||
&["shell", "web_search", "view_image", "dash/paginate"],
|
||||
&["unified_exec", "web_search", "view_image", "dash/paginate"],
|
||||
);
|
||||
assert_eq!(
|
||||
tools[3],
|
||||
@@ -979,12 +975,12 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
include_view_image_tool: true,
|
||||
experimental_unified_exec_tool: true,
|
||||
});
|
||||
|
||||
let tools = get_openai_tools(
|
||||
@@ -1008,7 +1004,10 @@ mod tests {
|
||||
)])),
|
||||
);
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "web_search", "view_image", "dash/tags"]);
|
||||
assert_eq_tool_names(
|
||||
&tools,
|
||||
&["unified_exec", "web_search", "view_image", "dash/tags"],
|
||||
);
|
||||
assert_eq!(
|
||||
tools[3],
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
@@ -1036,12 +1035,12 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
include_view_image_tool: true,
|
||||
experimental_unified_exec_tool: true,
|
||||
});
|
||||
|
||||
let tools = get_openai_tools(
|
||||
@@ -1065,7 +1064,10 @@ mod tests {
|
||||
)])),
|
||||
);
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "web_search", "view_image", "dash/value"]);
|
||||
assert_eq_tool_names(
|
||||
&tools,
|
||||
&["unified_exec", "web_search", "view_image", "dash/value"],
|
||||
);
|
||||
assert_eq!(
|
||||
tools[3],
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
@@ -1086,13 +1088,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool_for_sandbox_workspace_write() {
|
||||
let sandbox_policy = SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec!["workspace".into()],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
};
|
||||
let tool = super::create_shell_tool_for_sandbox(&sandbox_policy);
|
||||
let tool = super::create_shell_tool_for_sandbox();
|
||||
let OpenAiTool::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
@@ -1101,29 +1097,13 @@ mod tests {
|
||||
};
|
||||
assert_eq!(name, "shell");
|
||||
|
||||
let expected = r#"
|
||||
The shell tool is used to execute shell commands.
|
||||
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands will require escalated privileges:
|
||||
- Types of actions that require escalated privileges:
|
||||
- Writing files other than those in the writable roots
|
||||
- writable roots:
|
||||
- workspace
|
||||
- Commands that require network access
|
||||
|
||||
- Examples of commands that require escalated privileges:
|
||||
- git commit
|
||||
- npm install or pnpm install
|
||||
- cargo build
|
||||
- cargo test
|
||||
- When invoking a command that will require escalated privileges:
|
||||
- Provide the with_escalated_permissions parameter with the boolean value true
|
||||
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter."#;
|
||||
let expected = super::SHELL_TOOL_DESCRIPTION;
|
||||
assert_eq!(description, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool_for_sandbox_readonly() {
|
||||
let tool = super::create_shell_tool_for_sandbox(&SandboxPolicy::ReadOnly);
|
||||
let tool = super::create_shell_tool_for_sandbox();
|
||||
let OpenAiTool::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
@@ -1132,27 +1112,13 @@ The shell tool is used to execute shell commands.
|
||||
};
|
||||
assert_eq!(name, "shell");
|
||||
|
||||
let expected = r#"
|
||||
The shell tool is used to execute shell commands.
|
||||
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands (including apply_patch) will require escalated permissions:
|
||||
- Types of actions that require escalated privileges:
|
||||
- Writing files
|
||||
- Applying patches
|
||||
- Examples of commands that require escalated privileges:
|
||||
- apply_patch
|
||||
- git commit
|
||||
- npm install or pnpm install
|
||||
- cargo build
|
||||
- cargo test
|
||||
- When invoking a command that will require escalated privileges:
|
||||
- Provide the with_escalated_permissions parameter with the boolean value true
|
||||
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter"#;
|
||||
let expected = super::SHELL_TOOL_DESCRIPTION;
|
||||
assert_eq!(description, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool_for_sandbox_danger_full_access() {
|
||||
let tool = super::create_shell_tool_for_sandbox(&SandboxPolicy::DangerFullAccess);
|
||||
let tool = super::create_shell_tool_for_sandbox();
|
||||
let OpenAiTool::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
@@ -1161,6 +1127,7 @@ The shell tool is used to execute shell commands.
|
||||
};
|
||||
assert_eq!(name, "shell");
|
||||
|
||||
assert_eq!(description, "Runs a shell command and returns its output.");
|
||||
let expected = super::SHELL_TOOL_DESCRIPTION;
|
||||
assert_eq!(description, expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,9 @@ use time::macros::format_description;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::SESSIONS_SUBDIR;
|
||||
use crate::protocol::EventMsg;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::RolloutLine;
|
||||
|
||||
/// Returned page of conversation summaries.
|
||||
#[derive(Debug, Default, PartialEq)]
|
||||
@@ -34,7 +37,7 @@ pub struct ConversationItem {
|
||||
}
|
||||
|
||||
/// Hard cap to bound worst‑case work per request.
|
||||
const MAX_SCAN_FILES: usize = 10_000;
|
||||
const MAX_SCAN_FILES: usize = 100;
|
||||
const HEAD_RECORD_LIMIT: usize = 10;
|
||||
|
||||
/// Pagination cursor identifying a file by timestamp and UUID.
|
||||
@@ -167,10 +170,16 @@ async fn traverse_directories_for_paths(
|
||||
if items.len() == page_size {
|
||||
break 'outer;
|
||||
}
|
||||
let head = read_first_jsonl_records(&path, HEAD_RECORD_LIMIT)
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
items.push(ConversationItem { path, head });
|
||||
// Read head and simultaneously detect message events within the same
|
||||
// first N JSONL records to avoid a second file read.
|
||||
let (head, saw_session_meta, saw_user_event) =
|
||||
read_head_and_flags(&path, HEAD_RECORD_LIMIT)
|
||||
.await
|
||||
.unwrap_or((Vec::new(), false, false));
|
||||
// Apply filters: must have session meta and at least one user message event
|
||||
if saw_session_meta && saw_user_event {
|
||||
items.push(ConversationItem { path, head });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -273,16 +282,19 @@ fn parse_timestamp_uuid_from_filename(name: &str) -> Option<(OffsetDateTime, Uui
|
||||
Some((ts, uuid))
|
||||
}
|
||||
|
||||
async fn read_first_jsonl_records(
|
||||
async fn read_head_and_flags(
|
||||
path: &Path,
|
||||
max_records: usize,
|
||||
) -> io::Result<Vec<serde_json::Value>> {
|
||||
) -> io::Result<(Vec<serde_json::Value>, bool, bool)> {
|
||||
use tokio::io::AsyncBufReadExt;
|
||||
|
||||
let file = tokio::fs::File::open(path).await?;
|
||||
let reader = tokio::io::BufReader::new(file);
|
||||
let mut lines = reader.lines();
|
||||
let mut head: Vec<serde_json::Value> = Vec::new();
|
||||
let mut saw_session_meta = false;
|
||||
let mut saw_user_event = false;
|
||||
|
||||
while head.len() < max_records {
|
||||
let line_opt = lines.next_line().await?;
|
||||
let Some(line) = line_opt else { break };
|
||||
@@ -290,9 +302,29 @@ async fn read_first_jsonl_records(
|
||||
if trimmed.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if let Ok(v) = serde_json::from_str::<serde_json::Value>(trimmed) {
|
||||
head.push(v);
|
||||
|
||||
let parsed: Result<RolloutLine, _> = serde_json::from_str(trimmed);
|
||||
let Ok(rollout_line) = parsed else { continue };
|
||||
|
||||
match rollout_line.item {
|
||||
RolloutItem::SessionMeta(session_meta_line) => {
|
||||
if let Ok(val) = serde_json::to_value(session_meta_line) {
|
||||
head.push(val);
|
||||
saw_session_meta = true;
|
||||
}
|
||||
}
|
||||
RolloutItem::ResponseItem(item) => {
|
||||
if let Ok(val) = serde_json::to_value(item) {
|
||||
head.push(val);
|
||||
}
|
||||
}
|
||||
RolloutItem::EventMsg(ev) => {
|
||||
if matches!(ev, EventMsg::UserMessage(_)) {
|
||||
saw_user_event = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(head)
|
||||
|
||||
Ok((head, saw_session_meta, saw_user_event))
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
//! Rollout module: persistence and discovery of session rollout files.
|
||||
|
||||
pub(crate) const SESSIONS_SUBDIR: &str = "sessions";
|
||||
pub const SESSIONS_SUBDIR: &str = "sessions";
|
||||
pub const ARCHIVED_SESSIONS_SUBDIR: &str = "archived_sessions";
|
||||
|
||||
pub mod list;
|
||||
pub(crate) mod policy;
|
||||
pub mod recorder;
|
||||
|
||||
pub use codex_protocol::protocol::SessionMeta;
|
||||
pub use recorder::RolloutRecorder;
|
||||
pub use recorder::RolloutRecorderParams;
|
||||
pub use recorder::SessionMeta;
|
||||
pub use recorder::SessionStateSnapshot;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests;
|
||||
|
||||
@@ -1,8 +1,21 @@
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::RolloutItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
/// Whether a rollout `item` should be persisted in rollout files.
|
||||
#[inline]
|
||||
pub(crate) fn is_persisted_response_item(item: &RolloutItem) -> bool {
|
||||
match item {
|
||||
RolloutItem::ResponseItem(item) => should_persist_response_item(item),
|
||||
RolloutItem::EventMsg(ev) => should_persist_event_msg(ev),
|
||||
// Always persist session meta
|
||||
RolloutItem::SessionMeta(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether a `ResponseItem` should be persisted in rollout files.
|
||||
#[inline]
|
||||
pub(crate) fn is_persisted_response_item(item: &ResponseItem) -> bool {
|
||||
pub(crate) fn should_persist_response_item(item: &ResponseItem) -> bool {
|
||||
match item {
|
||||
ResponseItem::Message { .. }
|
||||
| ResponseItem::Reasoning { .. }
|
||||
@@ -14,3 +27,44 @@ pub(crate) fn is_persisted_response_item(item: &ResponseItem) -> bool {
|
||||
ResponseItem::WebSearchCall { .. } | ResponseItem::Other => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether an `EventMsg` should be persisted in rollout files.
|
||||
#[inline]
|
||||
pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool {
|
||||
match ev {
|
||||
EventMsg::UserMessage(_)
|
||||
| EventMsg::AgentMessage(_)
|
||||
| EventMsg::AgentReasoning(_)
|
||||
| EventMsg::AgentReasoningRawContent(_)
|
||||
| EventMsg::TokenCount(_) => true,
|
||||
EventMsg::Error(_)
|
||||
| EventMsg::TaskStarted(_)
|
||||
| EventMsg::TaskComplete(_)
|
||||
| EventMsg::AgentMessageDelta(_)
|
||||
| EventMsg::AgentReasoningDelta(_)
|
||||
| EventMsg::AgentReasoningRawContentDelta(_)
|
||||
| EventMsg::AgentReasoningSectionBreak(_)
|
||||
| EventMsg::SessionConfigured(_)
|
||||
| EventMsg::McpToolCallBegin(_)
|
||||
| EventMsg::McpToolCallEnd(_)
|
||||
| EventMsg::WebSearchBegin(_)
|
||||
| EventMsg::WebSearchEnd(_)
|
||||
| EventMsg::ExecCommandBegin(_)
|
||||
| EventMsg::ExecCommandOutputDelta(_)
|
||||
| EventMsg::ExecCommandEnd(_)
|
||||
| EventMsg::ExecApprovalRequest(_)
|
||||
| EventMsg::ApplyPatchApprovalRequest(_)
|
||||
| EventMsg::BackgroundEvent(_)
|
||||
| EventMsg::StreamError(_)
|
||||
| EventMsg::PatchApplyBegin(_)
|
||||
| EventMsg::PatchApplyEnd(_)
|
||||
| EventMsg::TurnDiff(_)
|
||||
| EventMsg::GetHistoryEntryResponse(_)
|
||||
| EventMsg::McpListToolsResponse(_)
|
||||
| EventMsg::ListCustomPromptsResponse(_)
|
||||
| EventMsg::PlanUpdate(_)
|
||||
| EventMsg::TurnAborted(_)
|
||||
| EventMsg::ShutdownComplete
|
||||
| EventMsg::ConversationPath(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,26 +26,15 @@ use super::list::Cursor;
|
||||
use super::list::get_conversations;
|
||||
use super::policy::is_persisted_response_item;
|
||||
use crate::config::Config;
|
||||
use crate::conversation_manager::InitialHistory;
|
||||
use crate::conversation_manager::ResumedHistory;
|
||||
use crate::git_info::GitInfo;
|
||||
use crate::default_client::ORIGINATOR;
|
||||
use crate::git_info::collect_git_info;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Default)]
|
||||
pub struct SessionMeta {
|
||||
pub id: ConversationId,
|
||||
pub timestamp: String,
|
||||
pub instructions: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct SessionMetaWithGit {
|
||||
#[serde(flatten)]
|
||||
meta: SessionMeta,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
git: Option<GitInfo>,
|
||||
}
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::protocol::ResumedHistory;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::RolloutLine;
|
||||
use codex_protocol::protocol::SessionMeta;
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Clone)]
|
||||
pub struct SessionStateSnapshot {}
|
||||
@@ -72,6 +61,7 @@ pub struct SavedSession {
|
||||
#[derive(Clone)]
|
||||
pub struct RolloutRecorder {
|
||||
tx: Sender<RolloutCmd>,
|
||||
pub(crate) rollout_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -86,9 +76,14 @@ pub enum RolloutRecorderParams {
|
||||
}
|
||||
|
||||
enum RolloutCmd {
|
||||
AddItems(Vec<ResponseItem>),
|
||||
UpdateState(SessionStateSnapshot),
|
||||
Shutdown { ack: oneshot::Sender<()> },
|
||||
AddItems(Vec<RolloutItem>),
|
||||
/// Ensure all prior writes are processed; respond when flushed.
|
||||
Flush {
|
||||
ack: oneshot::Sender<()>,
|
||||
},
|
||||
Shutdown {
|
||||
ack: oneshot::Sender<()>,
|
||||
},
|
||||
}
|
||||
|
||||
impl RolloutRecorderParams {
|
||||
@@ -105,7 +100,6 @@ impl RolloutRecorderParams {
|
||||
}
|
||||
|
||||
impl RolloutRecorder {
|
||||
#[allow(dead_code)]
|
||||
/// List conversations (rollout files) under the provided Codex home directory.
|
||||
pub async fn list_conversations(
|
||||
codex_home: &Path,
|
||||
@@ -119,13 +113,14 @@ impl RolloutRecorder {
|
||||
/// cannot be created or the rollout file cannot be opened we return the
|
||||
/// error so the caller can decide whether to disable persistence.
|
||||
pub async fn new(config: &Config, params: RolloutRecorderParams) -> std::io::Result<Self> {
|
||||
let (file, meta) = match params {
|
||||
let (file, rollout_path, meta) = match params {
|
||||
RolloutRecorderParams::Create {
|
||||
conversation_id,
|
||||
instructions,
|
||||
} => {
|
||||
let LogFileInfo {
|
||||
file,
|
||||
path,
|
||||
conversation_id: session_id,
|
||||
timestamp,
|
||||
} = create_log_file(config, conversation_id)?;
|
||||
@@ -140,9 +135,13 @@ impl RolloutRecorder {
|
||||
|
||||
(
|
||||
tokio::fs::File::from_std(file),
|
||||
path,
|
||||
Some(SessionMeta {
|
||||
timestamp,
|
||||
id: session_id,
|
||||
timestamp,
|
||||
cwd: config.cwd.clone(),
|
||||
originator: ORIGINATOR.value.clone(),
|
||||
cli_version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
instructions,
|
||||
}),
|
||||
)
|
||||
@@ -150,8 +149,9 @@ impl RolloutRecorder {
|
||||
RolloutRecorderParams::Resume { path } => (
|
||||
tokio::fs::OpenOptions::new()
|
||||
.append(true)
|
||||
.open(path)
|
||||
.open(&path)
|
||||
.await?,
|
||||
path,
|
||||
None,
|
||||
),
|
||||
};
|
||||
@@ -169,10 +169,10 @@ impl RolloutRecorder {
|
||||
// driver instead of blocking the runtime.
|
||||
tokio::task::spawn(rollout_writer(file, rx, meta, cwd));
|
||||
|
||||
Ok(Self { tx })
|
||||
Ok(Self { tx, rollout_path })
|
||||
}
|
||||
|
||||
pub(crate) async fn record_items(&self, items: &[ResponseItem]) -> std::io::Result<()> {
|
||||
pub(crate) async fn record_items(&self, items: &[RolloutItem]) -> std::io::Result<()> {
|
||||
let mut filtered = Vec::new();
|
||||
for item in items {
|
||||
// Note that function calls may look a bit strange if they are
|
||||
@@ -191,60 +191,59 @@ impl RolloutRecorder {
|
||||
.map_err(|e| IoError::other(format!("failed to queue rollout items: {e}")))
|
||||
}
|
||||
|
||||
pub(crate) async fn record_state(&self, state: SessionStateSnapshot) -> std::io::Result<()> {
|
||||
/// Flush all queued writes and wait until they are committed by the writer task.
|
||||
pub async fn flush(&self) -> std::io::Result<()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
self.tx
|
||||
.send(RolloutCmd::UpdateState(state))
|
||||
.send(RolloutCmd::Flush { ack: tx })
|
||||
.await
|
||||
.map_err(|e| IoError::other(format!("failed to queue rollout state: {e}")))
|
||||
.map_err(|e| IoError::other(format!("failed to queue rollout flush: {e}")))?;
|
||||
rx.await
|
||||
.map_err(|e| IoError::other(format!("failed waiting for rollout flush: {e}")))
|
||||
}
|
||||
|
||||
pub async fn get_rollout_history(path: &Path) -> std::io::Result<InitialHistory> {
|
||||
pub(crate) async fn get_rollout_history(path: &Path) -> std::io::Result<InitialHistory> {
|
||||
info!("Resuming rollout from {path:?}");
|
||||
tracing::error!("Resuming rollout from {path:?}");
|
||||
let text = tokio::fs::read_to_string(path).await?;
|
||||
let mut lines = text.lines();
|
||||
let first_line = lines
|
||||
.next()
|
||||
.ok_or_else(|| IoError::other("empty session file"))?;
|
||||
let conversation_id = match serde_json::from_str::<SessionMeta>(first_line) {
|
||||
Ok(rollout_session_meta) => {
|
||||
tracing::error!(
|
||||
"Parsed conversation ID from rollout file: {:?}",
|
||||
rollout_session_meta.id
|
||||
);
|
||||
Some(rollout_session_meta.id)
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(IoError::other(format!(
|
||||
"failed to parse first line of rollout file as SessionMeta: {e}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
if text.trim().is_empty() {
|
||||
return Err(IoError::other("empty session file"));
|
||||
}
|
||||
|
||||
let mut items = Vec::new();
|
||||
for line in lines {
|
||||
let mut items: Vec<RolloutItem> = Vec::new();
|
||||
let mut conversation_id: Option<ConversationId> = None;
|
||||
for line in text.lines() {
|
||||
if line.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
let v: Value = match serde_json::from_str(line) {
|
||||
Ok(v) => v,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if v.get("record_type")
|
||||
.and_then(|rt| rt.as_str())
|
||||
.map(|s| s == "state")
|
||||
.unwrap_or(false)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
match serde_json::from_value::<ResponseItem>(v.clone()) {
|
||||
Ok(item) => {
|
||||
if is_persisted_response_item(&item) {
|
||||
items.push(item);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("failed to parse item: {v:?}, error: {e}");
|
||||
warn!("failed to parse line as JSON: {line:?}, error: {e}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Parse the rollout line structure
|
||||
match serde_json::from_value::<RolloutLine>(v.clone()) {
|
||||
Ok(rollout_line) => match rollout_line.item {
|
||||
RolloutItem::SessionMeta(session_meta_line) => {
|
||||
// Use the FIRST SessionMeta encountered in the file as the canonical
|
||||
// conversation id and main session information. Keep all items intact.
|
||||
if conversation_id.is_none() {
|
||||
conversation_id = Some(session_meta_line.meta.id);
|
||||
}
|
||||
items.push(RolloutItem::SessionMeta(session_meta_line));
|
||||
}
|
||||
RolloutItem::ResponseItem(item) => {
|
||||
items.push(RolloutItem::ResponseItem(item));
|
||||
}
|
||||
RolloutItem::EventMsg(_ev) => {
|
||||
items.push(RolloutItem::EventMsg(_ev));
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
warn!("failed to parse rollout line: {v:?}, error: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -269,6 +268,10 @@ impl RolloutRecorder {
|
||||
}))
|
||||
}
|
||||
|
||||
pub(crate) fn get_rollout_path(&self) -> PathBuf {
|
||||
self.rollout_path.clone()
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) -> std::io::Result<()> {
|
||||
let (tx_done, rx_done) = oneshot::channel();
|
||||
match self.tx.send(RolloutCmd::Shutdown { ack: tx_done }).await {
|
||||
@@ -289,6 +292,9 @@ struct LogFileInfo {
|
||||
/// Opened file handle to the rollout file.
|
||||
file: File,
|
||||
|
||||
/// Full path to the rollout file.
|
||||
path: PathBuf,
|
||||
|
||||
/// Session ID (also embedded in filename).
|
||||
conversation_id: ConversationId,
|
||||
|
||||
@@ -328,6 +334,7 @@ fn create_log_file(
|
||||
|
||||
Ok(LogFileInfo {
|
||||
file,
|
||||
path,
|
||||
conversation_id,
|
||||
timestamp,
|
||||
})
|
||||
@@ -344,13 +351,15 @@ async fn rollout_writer(
|
||||
// If we have a meta, collect git info asynchronously and write meta first
|
||||
if let Some(session_meta) = meta.take() {
|
||||
let git_info = collect_git_info(&cwd).await;
|
||||
let session_meta_with_git = SessionMetaWithGit {
|
||||
let session_meta_line = SessionMetaLine {
|
||||
meta: session_meta,
|
||||
git: git_info,
|
||||
};
|
||||
|
||||
// Write the SessionMeta as the first item in the file
|
||||
writer.write_line(&session_meta_with_git).await?;
|
||||
// Write the SessionMeta as the first item in the file, wrapped in a rollout line
|
||||
writer
|
||||
.write_rollout_item(RolloutItem::SessionMeta(session_meta_line))
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Process rollout commands
|
||||
@@ -359,23 +368,17 @@ async fn rollout_writer(
|
||||
RolloutCmd::AddItems(items) => {
|
||||
for item in items {
|
||||
if is_persisted_response_item(&item) {
|
||||
writer.write_line(&item).await?;
|
||||
writer.write_rollout_item(item).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
RolloutCmd::UpdateState(state) => {
|
||||
#[derive(Serialize)]
|
||||
struct StateLine<'a> {
|
||||
record_type: &'static str,
|
||||
#[serde(flatten)]
|
||||
state: &'a SessionStateSnapshot,
|
||||
RolloutCmd::Flush { ack } => {
|
||||
// Ensure underlying file is flushed and then ack.
|
||||
if let Err(e) = writer.file.flush().await {
|
||||
let _ = ack.send(());
|
||||
return Err(e);
|
||||
}
|
||||
writer
|
||||
.write_line(&StateLine {
|
||||
record_type: "state",
|
||||
state: &state,
|
||||
})
|
||||
.await?;
|
||||
let _ = ack.send(());
|
||||
}
|
||||
RolloutCmd::Shutdown { ack } => {
|
||||
let _ = ack.send(());
|
||||
@@ -391,6 +394,20 @@ struct JsonlWriter {
|
||||
}
|
||||
|
||||
impl JsonlWriter {
|
||||
async fn write_rollout_item(&mut self, rollout_item: RolloutItem) -> std::io::Result<()> {
|
||||
let timestamp_format: &[FormatItem] = format_description!(
|
||||
"[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond digits:3]Z"
|
||||
);
|
||||
let timestamp = OffsetDateTime::now_utc()
|
||||
.format(timestamp_format)
|
||||
.map_err(|e| IoError::other(format!("failed to format timestamp: {e}")))?;
|
||||
|
||||
let line = RolloutLine {
|
||||
timestamp,
|
||||
item: rollout_item,
|
||||
};
|
||||
self.write_line(&line).await
|
||||
}
|
||||
async fn write_line(&mut self, item: &impl serde::Serialize) -> std::io::Result<()> {
|
||||
let mut json = serde_json::to_string(item)?;
|
||||
json.push('\n');
|
||||
|
||||
@@ -42,10 +42,30 @@ fn write_session_file(
|
||||
|
||||
let meta = serde_json::json!({
|
||||
"timestamp": ts_str,
|
||||
"id": uuid.to_string()
|
||||
"type": "session_meta",
|
||||
"payload": {
|
||||
"id": uuid,
|
||||
"timestamp": ts_str,
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
}
|
||||
});
|
||||
writeln!(file, "{meta}")?;
|
||||
|
||||
// Include at least one user message event to satisfy listing filters
|
||||
let user_event = serde_json::json!({
|
||||
"timestamp": ts_str,
|
||||
"type": "event_msg",
|
||||
"payload": {
|
||||
"type": "user_message",
|
||||
"message": "Hello from user",
|
||||
"kind": "plain"
|
||||
}
|
||||
});
|
||||
writeln!(file, "{user_event}")?;
|
||||
|
||||
for i in 0..num_records {
|
||||
let rec = serde_json::json!({
|
||||
"record_type": "response",
|
||||
@@ -93,24 +113,30 @@ async fn test_list_conversations_latest_first() {
|
||||
.join("01")
|
||||
.join(format!("rollout-2025-01-01T12-00-00-{u1}.jsonl"));
|
||||
|
||||
let head_3 = vec![
|
||||
serde_json::json!({"timestamp": "2025-01-03T12-00-00", "id": u3.to_string()}),
|
||||
serde_json::json!({"record_type": "response", "index": 0}),
|
||||
serde_json::json!({"record_type": "response", "index": 1}),
|
||||
serde_json::json!({"record_type": "response", "index": 2}),
|
||||
];
|
||||
let head_2 = vec![
|
||||
serde_json::json!({"timestamp": "2025-01-02T12-00-00", "id": u2.to_string()}),
|
||||
serde_json::json!({"record_type": "response", "index": 0}),
|
||||
serde_json::json!({"record_type": "response", "index": 1}),
|
||||
serde_json::json!({"record_type": "response", "index": 2}),
|
||||
];
|
||||
let head_1 = vec![
|
||||
serde_json::json!({"timestamp": "2025-01-01T12-00-00", "id": u1.to_string()}),
|
||||
serde_json::json!({"record_type": "response", "index": 0}),
|
||||
serde_json::json!({"record_type": "response", "index": 1}),
|
||||
serde_json::json!({"record_type": "response", "index": 2}),
|
||||
];
|
||||
let head_3 = vec![serde_json::json!({
|
||||
"id": u3,
|
||||
"timestamp": "2025-01-03T12-00-00",
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
})];
|
||||
let head_2 = vec![serde_json::json!({
|
||||
"id": u2,
|
||||
"timestamp": "2025-01-02T12-00-00",
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
})];
|
||||
let head_1 = vec![serde_json::json!({
|
||||
"id": u1,
|
||||
"timestamp": "2025-01-01T12-00-00",
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
})];
|
||||
|
||||
let expected_cursor: Cursor =
|
||||
serde_json::from_str(&format!("\"2025-01-01T12-00-00|{u1}\"")).unwrap();
|
||||
@@ -170,14 +196,22 @@ async fn test_pagination_cursor() {
|
||||
.join("03")
|
||||
.join("04")
|
||||
.join(format!("rollout-2025-03-04T09-00-00-{u4}.jsonl"));
|
||||
let head_5 = vec![
|
||||
serde_json::json!({"timestamp": "2025-03-05T09-00-00", "id": u5.to_string()}),
|
||||
serde_json::json!({"record_type": "response", "index": 0}),
|
||||
];
|
||||
let head_4 = vec![
|
||||
serde_json::json!({"timestamp": "2025-03-04T09-00-00", "id": u4.to_string()}),
|
||||
serde_json::json!({"record_type": "response", "index": 0}),
|
||||
];
|
||||
let head_5 = vec![serde_json::json!({
|
||||
"id": u5,
|
||||
"timestamp": "2025-03-05T09-00-00",
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
})];
|
||||
let head_4 = vec![serde_json::json!({
|
||||
"id": u4,
|
||||
"timestamp": "2025-03-04T09-00-00",
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
})];
|
||||
let expected_cursor1: Cursor =
|
||||
serde_json::from_str(&format!("\"2025-03-04T09-00-00|{u4}\"")).unwrap();
|
||||
let expected_page1 = ConversationsPage {
|
||||
@@ -212,14 +246,22 @@ async fn test_pagination_cursor() {
|
||||
.join("03")
|
||||
.join("02")
|
||||
.join(format!("rollout-2025-03-02T09-00-00-{u2}.jsonl"));
|
||||
let head_3 = vec![
|
||||
serde_json::json!({"timestamp": "2025-03-03T09-00-00", "id": u3.to_string()}),
|
||||
serde_json::json!({"record_type": "response", "index": 0}),
|
||||
];
|
||||
let head_2 = vec![
|
||||
serde_json::json!({"timestamp": "2025-03-02T09-00-00", "id": u2.to_string()}),
|
||||
serde_json::json!({"record_type": "response", "index": 0}),
|
||||
];
|
||||
let head_3 = vec![serde_json::json!({
|
||||
"id": u3,
|
||||
"timestamp": "2025-03-03T09-00-00",
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
})];
|
||||
let head_2 = vec![serde_json::json!({
|
||||
"id": u2,
|
||||
"timestamp": "2025-03-02T09-00-00",
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
})];
|
||||
let expected_cursor2: Cursor =
|
||||
serde_json::from_str(&format!("\"2025-03-02T09-00-00|{u2}\"")).unwrap();
|
||||
let expected_page2 = ConversationsPage {
|
||||
@@ -248,10 +290,14 @@ async fn test_pagination_cursor() {
|
||||
.join("03")
|
||||
.join("01")
|
||||
.join(format!("rollout-2025-03-01T09-00-00-{u1}.jsonl"));
|
||||
let head_1 = vec![
|
||||
serde_json::json!({"timestamp": "2025-03-01T09-00-00", "id": u1.to_string()}),
|
||||
serde_json::json!({"record_type": "response", "index": 0}),
|
||||
];
|
||||
let head_1 = vec![serde_json::json!({
|
||||
"id": u1,
|
||||
"timestamp": "2025-03-01T09-00-00",
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
})];
|
||||
let expected_cursor3: Cursor =
|
||||
serde_json::from_str(&format!("\"2025-03-01T09-00-00|{u1}\"")).unwrap();
|
||||
let expected_page3 = ConversationsPage {
|
||||
@@ -287,11 +333,14 @@ async fn test_get_conversation_contents() {
|
||||
.join("04")
|
||||
.join("01")
|
||||
.join(format!("rollout-2025-04-01T10-30-00-{uuid}.jsonl"));
|
||||
let expected_head = vec![
|
||||
serde_json::json!({"timestamp": ts, "id": uuid.to_string()}),
|
||||
serde_json::json!({"record_type": "response", "index": 0}),
|
||||
serde_json::json!({"record_type": "response", "index": 1}),
|
||||
];
|
||||
let expected_head = vec![serde_json::json!({
|
||||
"id": uuid,
|
||||
"timestamp": ts,
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
})];
|
||||
let expected_cursor: Cursor = serde_json::from_str(&format!("\"{ts}|{uuid}\"")).unwrap();
|
||||
let expected_page = ConversationsPage {
|
||||
items: vec![ConversationItem {
|
||||
@@ -305,10 +354,15 @@ async fn test_get_conversation_contents() {
|
||||
assert_eq!(page, expected_page);
|
||||
|
||||
// Entire file contents equality
|
||||
let meta = serde_json::json!({"timestamp": ts, "id": uuid.to_string()});
|
||||
let meta = serde_json::json!({"timestamp": ts, "type": "session_meta", "payload": {"id": uuid, "timestamp": ts, "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version"}});
|
||||
let user_event = serde_json::json!({
|
||||
"timestamp": ts,
|
||||
"type": "event_msg",
|
||||
"payload": {"type": "user_message", "message": "Hello from user", "kind": "plain"}
|
||||
});
|
||||
let rec0 = serde_json::json!({"record_type": "response", "index": 0});
|
||||
let rec1 = serde_json::json!({"record_type": "response", "index": 1});
|
||||
let expected_content = format!("{meta}\n{rec0}\n{rec1}\n");
|
||||
let expected_content = format!("{meta}\n{user_event}\n{rec0}\n{rec1}\n");
|
||||
assert_eq!(content, expected_content);
|
||||
}
|
||||
|
||||
@@ -341,7 +395,14 @@ async fn test_stable_ordering_same_second_pagination() {
|
||||
.join("01")
|
||||
.join(format!("rollout-2025-07-01T00-00-00-{u2}.jsonl"));
|
||||
let head = |u: Uuid| -> Vec<serde_json::Value> {
|
||||
vec![serde_json::json!({"timestamp": ts, "id": u.to_string()})]
|
||||
vec![serde_json::json!({
|
||||
"id": u,
|
||||
"timestamp": ts,
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
})]
|
||||
};
|
||||
let expected_cursor1: Cursor = serde_json::from_str(&format!("\"{ts}|{u2}\"")).unwrap();
|
||||
let expected_page1 = ConversationsPage {
|
||||
|
||||
@@ -69,3 +69,8 @@
|
||||
; Added on top of Chrome profile
|
||||
; Needed for python multiprocessing on MacOS for the SemLock
|
||||
(allow ipc-posix-sem)
|
||||
|
||||
; needed to look up user info, see https://crbug.com/792228
|
||||
(allow mach-lookup
|
||||
(global-name "com.apple.system.opendirectoryd.libinfo")
|
||||
)
|
||||
|
||||
180
codex-rs/core/src/truncate.rs
Normal file
180
codex-rs/core/src/truncate.rs
Normal file
@@ -0,0 +1,180 @@
|
||||
//! Utilities for truncating large chunks of output while preserving a prefix
|
||||
//! and suffix on UTF-8 boundaries.
|
||||
|
||||
/// Truncate the middle of a UTF-8 string to at most `max_bytes` bytes,
|
||||
/// preserving the beginning and the end. Returns the possibly truncated
|
||||
/// string and `Some(original_token_count)` (estimated at 4 bytes/token)
|
||||
/// if truncation occurred; otherwise returns the original string and `None`.
|
||||
pub(crate) fn truncate_middle(s: &str, max_bytes: usize) -> (String, Option<u64>) {
|
||||
if s.len() <= max_bytes {
|
||||
return (s.to_string(), None);
|
||||
}
|
||||
|
||||
let est_tokens = (s.len() as u64).div_ceil(4);
|
||||
if max_bytes == 0 {
|
||||
return (format!("…{est_tokens} tokens truncated…"), Some(est_tokens));
|
||||
}
|
||||
|
||||
fn truncate_on_boundary(input: &str, max_len: usize) -> &str {
|
||||
if input.len() <= max_len {
|
||||
return input;
|
||||
}
|
||||
let mut end = max_len;
|
||||
while end > 0 && !input.is_char_boundary(end) {
|
||||
end -= 1;
|
||||
}
|
||||
&input[..end]
|
||||
}
|
||||
|
||||
fn pick_prefix_end(s: &str, left_budget: usize) -> usize {
|
||||
if let Some(head) = s.get(..left_budget)
|
||||
&& let Some(i) = head.rfind('\n')
|
||||
{
|
||||
return i + 1;
|
||||
}
|
||||
truncate_on_boundary(s, left_budget).len()
|
||||
}
|
||||
|
||||
fn pick_suffix_start(s: &str, right_budget: usize) -> usize {
|
||||
let start_tail = s.len().saturating_sub(right_budget);
|
||||
if let Some(tail) = s.get(start_tail..)
|
||||
&& let Some(i) = tail.find('\n')
|
||||
{
|
||||
return start_tail + i + 1;
|
||||
}
|
||||
|
||||
let mut idx = start_tail.min(s.len());
|
||||
while idx < s.len() && !s.is_char_boundary(idx) {
|
||||
idx += 1;
|
||||
}
|
||||
idx
|
||||
}
|
||||
|
||||
let mut guess_tokens = est_tokens;
|
||||
for _ in 0..4 {
|
||||
let marker = format!("…{guess_tokens} tokens truncated…");
|
||||
let marker_len = marker.len();
|
||||
let keep_budget = max_bytes.saturating_sub(marker_len);
|
||||
if keep_budget == 0 {
|
||||
return (format!("…{est_tokens} tokens truncated…"), Some(est_tokens));
|
||||
}
|
||||
|
||||
let left_budget = keep_budget / 2;
|
||||
let right_budget = keep_budget - left_budget;
|
||||
let prefix_end = pick_prefix_end(s, left_budget);
|
||||
let mut suffix_start = pick_suffix_start(s, right_budget);
|
||||
if suffix_start < prefix_end {
|
||||
suffix_start = prefix_end;
|
||||
}
|
||||
|
||||
let kept_content_bytes = prefix_end + (s.len() - suffix_start);
|
||||
let truncated_content_bytes = s.len().saturating_sub(kept_content_bytes);
|
||||
let new_tokens = (truncated_content_bytes as u64).div_ceil(4);
|
||||
|
||||
if new_tokens == guess_tokens {
|
||||
let mut out = String::with_capacity(marker_len + kept_content_bytes + 1);
|
||||
out.push_str(&s[..prefix_end]);
|
||||
out.push_str(&marker);
|
||||
out.push('\n');
|
||||
out.push_str(&s[suffix_start..]);
|
||||
return (out, Some(est_tokens));
|
||||
}
|
||||
|
||||
guess_tokens = new_tokens;
|
||||
}
|
||||
|
||||
let marker = format!("…{guess_tokens} tokens truncated…");
|
||||
let marker_len = marker.len();
|
||||
let keep_budget = max_bytes.saturating_sub(marker_len);
|
||||
if keep_budget == 0 {
|
||||
return (format!("…{est_tokens} tokens truncated…"), Some(est_tokens));
|
||||
}
|
||||
|
||||
let left_budget = keep_budget / 2;
|
||||
let right_budget = keep_budget - left_budget;
|
||||
let prefix_end = pick_prefix_end(s, left_budget);
|
||||
let suffix_start = pick_suffix_start(s, right_budget);
|
||||
|
||||
let mut out = String::with_capacity(marker_len + prefix_end + (s.len() - suffix_start) + 1);
|
||||
out.push_str(&s[..prefix_end]);
|
||||
out.push_str(&marker);
|
||||
out.push('\n');
|
||||
out.push_str(&s[suffix_start..]);
|
||||
(out, Some(est_tokens))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::truncate_middle;
|
||||
|
||||
#[test]
|
||||
fn truncate_middle_no_newlines_fallback() {
|
||||
let s = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ*";
|
||||
let max_bytes = 32;
|
||||
let (out, original) = truncate_middle(s, max_bytes);
|
||||
assert!(out.starts_with("abc"));
|
||||
assert!(out.contains("tokens truncated"));
|
||||
assert!(out.ends_with("XYZ*"));
|
||||
assert_eq!(original, Some((s.len() as u64).div_ceil(4)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncate_middle_prefers_newline_boundaries() {
|
||||
let mut s = String::new();
|
||||
for i in 1..=20 {
|
||||
s.push_str(&format!("{i:03}\n"));
|
||||
}
|
||||
assert_eq!(s.len(), 80);
|
||||
|
||||
let max_bytes = 64;
|
||||
let (out, tokens) = truncate_middle(&s, max_bytes);
|
||||
assert!(out.starts_with("001\n002\n003\n004\n"));
|
||||
assert!(out.contains("tokens truncated"));
|
||||
assert!(out.ends_with("017\n018\n019\n020\n"));
|
||||
assert_eq!(tokens, Some(20));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncate_middle_handles_utf8_content() {
|
||||
let s = "😀😀😀😀😀😀😀😀😀😀\nsecond line with ascii text\n";
|
||||
let max_bytes = 32;
|
||||
let (out, tokens) = truncate_middle(s, max_bytes);
|
||||
|
||||
assert!(out.contains("tokens truncated"));
|
||||
assert!(!out.contains('\u{fffd}'));
|
||||
assert_eq!(tokens, Some((s.len() as u64).div_ceil(4)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncate_middle_prefers_newline_boundaries_2() {
|
||||
// Build a multi-line string of 20 numbered lines (each "NNN\n").
|
||||
let mut s = String::new();
|
||||
for i in 1..=20 {
|
||||
s.push_str(&format!("{i:03}\n"));
|
||||
}
|
||||
// Total length: 20 lines * 4 bytes per line = 80 bytes.
|
||||
assert_eq!(s.len(), 80);
|
||||
|
||||
// Choose a cap that forces truncation while leaving room for
|
||||
// a few lines on each side after accounting for the marker.
|
||||
let max_bytes = 64;
|
||||
// Expect exact output: first 4 lines, marker, last 4 lines, and correct token estimate (80/4 = 20).
|
||||
assert_eq!(
|
||||
truncate_middle(&s, max_bytes),
|
||||
(
|
||||
r#"001
|
||||
002
|
||||
003
|
||||
004
|
||||
…12 tokens truncated…
|
||||
017
|
||||
018
|
||||
019
|
||||
020
|
||||
"#
|
||||
.to_string(),
|
||||
Some(20)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
22
codex-rs/core/src/unified_exec/errors.rs
Normal file
22
codex-rs/core/src/unified_exec/errors.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub(crate) enum UnifiedExecError {
|
||||
#[error("Failed to create unified exec session: {pty_error}")]
|
||||
CreateSession {
|
||||
#[source]
|
||||
pty_error: anyhow::Error,
|
||||
},
|
||||
#[error("Unknown session id {session_id}")]
|
||||
UnknownSessionId { session_id: i32 },
|
||||
#[error("failed to write to stdin")]
|
||||
WriteToStdin,
|
||||
#[error("missing command line for unified exec request")]
|
||||
MissingCommandLine,
|
||||
}
|
||||
|
||||
impl UnifiedExecError {
|
||||
pub(crate) fn create_session(error: anyhow::Error) -> Self {
|
||||
Self::CreateSession { pty_error: error }
|
||||
}
|
||||
}
|
||||
653
codex-rs/core/src/unified_exec/mod.rs
Normal file
653
codex-rs/core/src/unified_exec/mod.rs
Normal file
@@ -0,0 +1,653 @@
|
||||
use portable_pty::CommandBuilder;
|
||||
use portable_pty::PtySize;
|
||||
use portable_pty::native_pty_system;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Read;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex as StdMutex;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::AtomicI32;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::Notify;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::exec_command::ExecCommandSession;
|
||||
use crate::truncate::truncate_middle;
|
||||
|
||||
mod errors;
|
||||
|
||||
pub(crate) use errors::UnifiedExecError;
|
||||
|
||||
const DEFAULT_TIMEOUT_MS: u64 = 1_000;
|
||||
const MAX_TIMEOUT_MS: u64 = 60_000;
|
||||
const UNIFIED_EXEC_OUTPUT_MAX_BYTES: usize = 128 * 1024; // 128 KiB
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct UnifiedExecRequest<'a> {
|
||||
pub session_id: Option<i32>,
|
||||
pub input_chunks: &'a [String],
|
||||
pub timeout_ms: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct UnifiedExecResult {
|
||||
pub session_id: Option<i32>,
|
||||
pub output: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct UnifiedExecSessionManager {
|
||||
next_session_id: AtomicI32,
|
||||
sessions: Mutex<HashMap<i32, ManagedUnifiedExecSession>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ManagedUnifiedExecSession {
|
||||
session: ExecCommandSession,
|
||||
output_buffer: OutputBuffer,
|
||||
/// Notifies waiters whenever new output has been appended to
|
||||
/// `output_buffer`, allowing clients to poll for fresh data.
|
||||
output_notify: Arc<Notify>,
|
||||
output_task: JoinHandle<()>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct OutputBufferState {
|
||||
chunks: VecDeque<Vec<u8>>,
|
||||
total_bytes: usize,
|
||||
}
|
||||
|
||||
impl OutputBufferState {
|
||||
fn push_chunk(&mut self, chunk: Vec<u8>) {
|
||||
self.total_bytes = self.total_bytes.saturating_add(chunk.len());
|
||||
self.chunks.push_back(chunk);
|
||||
|
||||
let mut excess = self
|
||||
.total_bytes
|
||||
.saturating_sub(UNIFIED_EXEC_OUTPUT_MAX_BYTES);
|
||||
|
||||
while excess > 0 {
|
||||
match self.chunks.front_mut() {
|
||||
Some(front) if excess >= front.len() => {
|
||||
excess -= front.len();
|
||||
self.total_bytes = self.total_bytes.saturating_sub(front.len());
|
||||
self.chunks.pop_front();
|
||||
}
|
||||
Some(front) => {
|
||||
front.drain(..excess);
|
||||
self.total_bytes = self.total_bytes.saturating_sub(excess);
|
||||
break;
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn drain(&mut self) -> Vec<Vec<u8>> {
|
||||
let drained: Vec<Vec<u8>> = self.chunks.drain(..).collect();
|
||||
self.total_bytes = 0;
|
||||
drained
|
||||
}
|
||||
}
|
||||
|
||||
type OutputBuffer = Arc<Mutex<OutputBufferState>>;
|
||||
type OutputHandles = (OutputBuffer, Arc<Notify>);
|
||||
|
||||
impl ManagedUnifiedExecSession {
|
||||
fn new(session: ExecCommandSession) -> Self {
|
||||
let output_buffer = Arc::new(Mutex::new(OutputBufferState::default()));
|
||||
let output_notify = Arc::new(Notify::new());
|
||||
let mut receiver = session.output_receiver();
|
||||
let buffer_clone = Arc::clone(&output_buffer);
|
||||
let notify_clone = Arc::clone(&output_notify);
|
||||
let output_task = tokio::spawn(async move {
|
||||
while let Ok(chunk) = receiver.recv().await {
|
||||
let mut guard = buffer_clone.lock().await;
|
||||
guard.push_chunk(chunk);
|
||||
drop(guard);
|
||||
notify_clone.notify_waiters();
|
||||
}
|
||||
});
|
||||
|
||||
Self {
|
||||
session,
|
||||
output_buffer,
|
||||
output_notify,
|
||||
output_task,
|
||||
}
|
||||
}
|
||||
|
||||
fn writer_sender(&self) -> mpsc::Sender<Vec<u8>> {
|
||||
self.session.writer_sender()
|
||||
}
|
||||
|
||||
fn output_handles(&self) -> OutputHandles {
|
||||
(
|
||||
Arc::clone(&self.output_buffer),
|
||||
Arc::clone(&self.output_notify),
|
||||
)
|
||||
}
|
||||
|
||||
fn has_exited(&self) -> bool {
|
||||
self.session.has_exited()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ManagedUnifiedExecSession {
|
||||
fn drop(&mut self) {
|
||||
self.output_task.abort();
|
||||
}
|
||||
}
|
||||
|
||||
impl UnifiedExecSessionManager {
|
||||
pub async fn handle_request(
|
||||
&self,
|
||||
request: UnifiedExecRequest<'_>,
|
||||
) -> Result<UnifiedExecResult, UnifiedExecError> {
|
||||
let (timeout_ms, timeout_warning) = match request.timeout_ms {
|
||||
Some(requested) if requested > MAX_TIMEOUT_MS => (
|
||||
MAX_TIMEOUT_MS,
|
||||
Some(format!(
|
||||
"Warning: requested timeout {requested}ms exceeds maximum of {MAX_TIMEOUT_MS}ms; clamping to {MAX_TIMEOUT_MS}ms.\n"
|
||||
)),
|
||||
),
|
||||
Some(requested) => (requested, None),
|
||||
None => (DEFAULT_TIMEOUT_MS, None),
|
||||
};
|
||||
|
||||
let mut new_session: Option<ManagedUnifiedExecSession> = None;
|
||||
let session_id;
|
||||
let writer_tx;
|
||||
let output_buffer;
|
||||
let output_notify;
|
||||
|
||||
if let Some(existing_id) = request.session_id {
|
||||
let mut sessions = self.sessions.lock().await;
|
||||
match sessions.get(&existing_id) {
|
||||
Some(session) => {
|
||||
if session.has_exited() {
|
||||
sessions.remove(&existing_id);
|
||||
return Err(UnifiedExecError::UnknownSessionId {
|
||||
session_id: existing_id,
|
||||
});
|
||||
}
|
||||
let (buffer, notify) = session.output_handles();
|
||||
session_id = existing_id;
|
||||
writer_tx = session.writer_sender();
|
||||
output_buffer = buffer;
|
||||
output_notify = notify;
|
||||
}
|
||||
None => {
|
||||
return Err(UnifiedExecError::UnknownSessionId {
|
||||
session_id: existing_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
drop(sessions);
|
||||
} else {
|
||||
let command = request.input_chunks.to_vec();
|
||||
let new_id = self.next_session_id.fetch_add(1, Ordering::SeqCst);
|
||||
let session = create_unified_exec_session(&command).await?;
|
||||
let managed_session = ManagedUnifiedExecSession::new(session);
|
||||
let (buffer, notify) = managed_session.output_handles();
|
||||
writer_tx = managed_session.writer_sender();
|
||||
output_buffer = buffer;
|
||||
output_notify = notify;
|
||||
session_id = new_id;
|
||||
new_session = Some(managed_session);
|
||||
};
|
||||
|
||||
if request.session_id.is_some() {
|
||||
let joined_input = request.input_chunks.join(" ");
|
||||
if !joined_input.is_empty() && writer_tx.send(joined_input.into_bytes()).await.is_err()
|
||||
{
|
||||
return Err(UnifiedExecError::WriteToStdin);
|
||||
}
|
||||
}
|
||||
|
||||
let mut collected: Vec<u8> = Vec::with_capacity(4096);
|
||||
let start = Instant::now();
|
||||
let deadline = start + Duration::from_millis(timeout_ms);
|
||||
|
||||
loop {
|
||||
let drained_chunks;
|
||||
let mut wait_for_output = None;
|
||||
{
|
||||
let mut guard = output_buffer.lock().await;
|
||||
drained_chunks = guard.drain();
|
||||
if drained_chunks.is_empty() {
|
||||
wait_for_output = Some(output_notify.notified());
|
||||
}
|
||||
}
|
||||
|
||||
if drained_chunks.is_empty() {
|
||||
let remaining = deadline.saturating_duration_since(Instant::now());
|
||||
if remaining == Duration::ZERO {
|
||||
break;
|
||||
}
|
||||
|
||||
let notified = wait_for_output.unwrap_or_else(|| output_notify.notified());
|
||||
tokio::pin!(notified);
|
||||
tokio::select! {
|
||||
_ = &mut notified => {}
|
||||
_ = tokio::time::sleep(remaining) => break,
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
for chunk in drained_chunks {
|
||||
collected.extend_from_slice(&chunk);
|
||||
}
|
||||
|
||||
if Instant::now() >= deadline {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let (output, _maybe_tokens) = truncate_middle(
|
||||
&String::from_utf8_lossy(&collected),
|
||||
UNIFIED_EXEC_OUTPUT_MAX_BYTES,
|
||||
);
|
||||
let output = if let Some(warning) = timeout_warning {
|
||||
format!("{warning}{output}")
|
||||
} else {
|
||||
output
|
||||
};
|
||||
|
||||
let should_store_session = if let Some(session) = new_session.as_ref() {
|
||||
!session.has_exited()
|
||||
} else if request.session_id.is_some() {
|
||||
let mut sessions = self.sessions.lock().await;
|
||||
if let Some(existing) = sessions.get(&session_id) {
|
||||
if existing.has_exited() {
|
||||
sessions.remove(&session_id);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
if should_store_session {
|
||||
if let Some(session) = new_session {
|
||||
self.sessions.lock().await.insert(session_id, session);
|
||||
}
|
||||
Ok(UnifiedExecResult {
|
||||
session_id: Some(session_id),
|
||||
output,
|
||||
})
|
||||
} else {
|
||||
Ok(UnifiedExecResult {
|
||||
session_id: None,
|
||||
output,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_unified_exec_session(
|
||||
command: &[String],
|
||||
) -> Result<ExecCommandSession, UnifiedExecError> {
|
||||
if command.is_empty() {
|
||||
return Err(UnifiedExecError::MissingCommandLine);
|
||||
}
|
||||
|
||||
let pty_system = native_pty_system();
|
||||
|
||||
let pair = pty_system
|
||||
.openpty(PtySize {
|
||||
rows: 24,
|
||||
cols: 80,
|
||||
pixel_width: 0,
|
||||
pixel_height: 0,
|
||||
})
|
||||
.map_err(UnifiedExecError::create_session)?;
|
||||
|
||||
// Safe thanks to the check at the top of the function.
|
||||
let mut command_builder = CommandBuilder::new(command[0].clone());
|
||||
for arg in &command[1..] {
|
||||
command_builder.arg(arg);
|
||||
}
|
||||
|
||||
let mut child = pair
|
||||
.slave
|
||||
.spawn_command(command_builder)
|
||||
.map_err(UnifiedExecError::create_session)?;
|
||||
let killer = child.clone_killer();
|
||||
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel::<Vec<u8>>(128);
|
||||
let (output_tx, _) = tokio::sync::broadcast::channel::<Vec<u8>>(256);
|
||||
|
||||
let mut reader = pair
|
||||
.master
|
||||
.try_clone_reader()
|
||||
.map_err(UnifiedExecError::create_session)?;
|
||||
let output_tx_clone = output_tx.clone();
|
||||
let reader_handle = tokio::task::spawn_blocking(move || {
|
||||
let mut buf = [0u8; 8192];
|
||||
loop {
|
||||
match reader.read(&mut buf) {
|
||||
Ok(0) => break,
|
||||
Ok(n) => {
|
||||
let _ = output_tx_clone.send(buf[..n].to_vec());
|
||||
}
|
||||
Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
|
||||
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
|
||||
std::thread::sleep(Duration::from_millis(5));
|
||||
continue;
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let writer = pair
|
||||
.master
|
||||
.take_writer()
|
||||
.map_err(UnifiedExecError::create_session)?;
|
||||
let writer = Arc::new(StdMutex::new(writer));
|
||||
let writer_handle = tokio::spawn({
|
||||
let writer = writer.clone();
|
||||
async move {
|
||||
while let Some(bytes) = writer_rx.recv().await {
|
||||
let writer = writer.clone();
|
||||
let _ = tokio::task::spawn_blocking(move || {
|
||||
if let Ok(mut guard) = writer.lock() {
|
||||
use std::io::Write;
|
||||
let _ = guard.write_all(&bytes);
|
||||
let _ = guard.flush();
|
||||
}
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let exit_status = Arc::new(AtomicBool::new(false));
|
||||
let wait_exit_status = Arc::clone(&exit_status);
|
||||
let wait_handle = tokio::task::spawn_blocking(move || {
|
||||
let _ = child.wait();
|
||||
wait_exit_status.store(true, Ordering::SeqCst);
|
||||
});
|
||||
|
||||
Ok(ExecCommandSession::new(
|
||||
writer_tx,
|
||||
output_tx,
|
||||
killer,
|
||||
reader_handle,
|
||||
writer_handle,
|
||||
wait_handle,
|
||||
exit_status,
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn push_chunk_trims_only_excess_bytes() {
|
||||
let mut buffer = OutputBufferState::default();
|
||||
buffer.push_chunk(vec![b'a'; UNIFIED_EXEC_OUTPUT_MAX_BYTES]);
|
||||
buffer.push_chunk(vec![b'b']);
|
||||
buffer.push_chunk(vec![b'c']);
|
||||
|
||||
assert_eq!(buffer.total_bytes, UNIFIED_EXEC_OUTPUT_MAX_BYTES);
|
||||
assert_eq!(buffer.chunks.len(), 3);
|
||||
assert_eq!(
|
||||
buffer.chunks.front().unwrap().len(),
|
||||
UNIFIED_EXEC_OUTPUT_MAX_BYTES - 2
|
||||
);
|
||||
assert_eq!(buffer.chunks.pop_back().unwrap(), vec![b'c']);
|
||||
assert_eq!(buffer.chunks.pop_back().unwrap(), vec![b'b']);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn unified_exec_persists_across_requests_jif() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
|
||||
let open_shell = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["bash".to_string(), "-i".to_string()],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
let session_id = open_shell.session_id.expect("expected session_id");
|
||||
|
||||
manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_id),
|
||||
input_chunks: &[
|
||||
"export".to_string(),
|
||||
"CODEX_INTERACTIVE_SHELL_VAR=codex\n".to_string(),
|
||||
],
|
||||
timeout_ms: Some(2_500),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let out_2 = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_id),
|
||||
input_chunks: &["echo $CODEX_INTERACTIVE_SHELL_VAR\n".to_string()],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
assert!(out_2.output.contains("codex"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn multi_unified_exec_sessions() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
|
||||
let shell_a = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["/bin/bash".to_string(), "-i".to_string()],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
let session_a = shell_a.session_id.expect("expected session id");
|
||||
|
||||
manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_a),
|
||||
input_chunks: &["export CODEX_INTERACTIVE_SHELL_VAR=codex\n".to_string()],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let out_2 = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &[
|
||||
"echo".to_string(),
|
||||
"$CODEX_INTERACTIVE_SHELL_VAR\n".to_string(),
|
||||
],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
assert!(!out_2.output.contains("codex"));
|
||||
|
||||
let out_3 = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_a),
|
||||
input_chunks: &["echo $CODEX_INTERACTIVE_SHELL_VAR\n".to_string()],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
assert!(out_3.output.contains("codex"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
async fn unified_exec_timeouts() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
|
||||
let open_shell = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["bash".to_string(), "-i".to_string()],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
let session_id = open_shell.session_id.expect("expected session id");
|
||||
|
||||
manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_id),
|
||||
input_chunks: &[
|
||||
"export".to_string(),
|
||||
"CODEX_INTERACTIVE_SHELL_VAR=codex\n".to_string(),
|
||||
],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let out_2 = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_id),
|
||||
input_chunks: &["sleep 5 && echo $CODEX_INTERACTIVE_SHELL_VAR\n".to_string()],
|
||||
timeout_ms: Some(10),
|
||||
})
|
||||
.await?;
|
||||
assert!(!out_2.output.contains("codex"));
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(7)).await;
|
||||
|
||||
let empty = Vec::new();
|
||||
let out_3 = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_id),
|
||||
input_chunks: &empty,
|
||||
timeout_ms: Some(100),
|
||||
})
|
||||
.await?;
|
||||
|
||||
assert!(out_3.output.contains("codex"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
async fn requests_with_large_timeout_are_capped() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
|
||||
let result = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["echo".to_string(), "codex".to_string()],
|
||||
timeout_ms: Some(120_000),
|
||||
})
|
||||
.await?;
|
||||
|
||||
assert!(result.output.starts_with(
|
||||
"Warning: requested timeout 120000ms exceeds maximum of 60000ms; clamping to 60000ms.\n"
|
||||
));
|
||||
assert!(result.output.contains("codex"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
async fn completed_commands_do_not_persist_sessions() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
let result = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["/bin/echo".to_string(), "codex".to_string()],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
|
||||
assert!(result.session_id.is_none());
|
||||
assert!(result.output.contains("codex"));
|
||||
|
||||
assert!(manager.sessions.lock().await.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
async fn correct_path_resolution() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
let result = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["echo".to_string(), "codex".to_string()],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
|
||||
assert!(result.session_id.is_none());
|
||||
assert!(result.output.contains("codex"));
|
||||
|
||||
assert!(manager.sessions.lock().await.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn reusing_completed_session_returns_unknown_session() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
|
||||
let open_shell = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["/bin/bash".to_string(), "-i".to_string()],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
let session_id = open_shell.session_id.expect("expected session id");
|
||||
|
||||
manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_id),
|
||||
input_chunks: &["exit\n".to_string()],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
|
||||
let err = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: Some(session_id),
|
||||
input_chunks: &[],
|
||||
timeout_ms: Some(100),
|
||||
})
|
||||
.await
|
||||
.expect_err("expected unknown session error");
|
||||
|
||||
match err {
|
||||
UnifiedExecError::UnknownSessionId { session_id: err_id } => {
|
||||
assert_eq!(err_id, session_id);
|
||||
}
|
||||
other => panic!("expected UnknownSessionId, got {other:?}"),
|
||||
}
|
||||
|
||||
assert!(!manager.sessions.lock().await.contains_key(&session_id));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
use assert_cmd::Command as AssertCommand;
|
||||
use codex_core::RolloutRecorder;
|
||||
use codex_core::protocol::GitInfo;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
@@ -77,6 +79,22 @@ async fn chat_mode_stream_cli() {
|
||||
assert_eq!(hi_lines, 1, "Expected exactly one line with 'hi'");
|
||||
|
||||
server.verify().await;
|
||||
|
||||
// Verify a new session rollout was created and is discoverable via list_conversations
|
||||
let page = RolloutRecorder::list_conversations(home.path(), 10, None)
|
||||
.await
|
||||
.expect("list conversations");
|
||||
assert!(
|
||||
!page.items.is_empty(),
|
||||
"expected at least one session to be listed"
|
||||
);
|
||||
// First line of head must be the SessionMeta payload (id/timestamp)
|
||||
let head0 = page.items[0].head.first().expect("missing head record");
|
||||
assert!(head0.get("id").is_some(), "head[0] missing id");
|
||||
assert!(
|
||||
head0.get("timestamp").is_some(),
|
||||
"head[0] missing timestamp"
|
||||
);
|
||||
}
|
||||
|
||||
/// Verify that passing `-c experimental_instructions_file=...` to the CLI
|
||||
@@ -297,8 +315,10 @@ async fn integration_creates_and_checks_session_file() {
|
||||
Ok(v) => v,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if item.get("type").and_then(|t| t.as_str()) == Some("message")
|
||||
&& let Some(c) = item.get("content")
|
||||
if item.get("type").and_then(|t| t.as_str()) == Some("response_item")
|
||||
&& let Some(payload) = item.get("payload")
|
||||
&& payload.get("type").and_then(|t| t.as_str()) == Some("message")
|
||||
&& let Some(c) = payload.get("content")
|
||||
&& c.to_string().contains(&marker)
|
||||
{
|
||||
matching_path = Some(path.to_path_buf());
|
||||
@@ -361,9 +381,16 @@ async fn integration_creates_and_checks_session_file() {
|
||||
.unwrap_or_else(|_| panic!("missing session meta line"));
|
||||
let meta: serde_json::Value = serde_json::from_str(meta_line)
|
||||
.unwrap_or_else(|_| panic!("Failed to parse session meta line as JSON"));
|
||||
assert!(meta.get("id").is_some(), "SessionMeta missing id");
|
||||
assert_eq!(
|
||||
meta.get("type").and_then(|v| v.as_str()),
|
||||
Some("session_meta")
|
||||
);
|
||||
let payload = meta
|
||||
.get("payload")
|
||||
.unwrap_or_else(|| panic!("Missing payload in meta line"));
|
||||
assert!(payload.get("id").is_some(), "SessionMeta missing id");
|
||||
assert!(
|
||||
meta.get("timestamp").is_some(),
|
||||
payload.get("timestamp").is_some(),
|
||||
"SessionMeta missing timestamp"
|
||||
);
|
||||
|
||||
@@ -375,8 +402,10 @@ async fn integration_creates_and_checks_session_file() {
|
||||
let Ok(item) = serde_json::from_str::<serde_json::Value>(line) else {
|
||||
continue;
|
||||
};
|
||||
if item.get("type").and_then(|t| t.as_str()) == Some("message")
|
||||
&& let Some(c) = item.get("content")
|
||||
if item.get("type").and_then(|t| t.as_str()) == Some("response_item")
|
||||
&& let Some(payload) = item.get("payload")
|
||||
&& payload.get("type").and_then(|t| t.as_str()) == Some("message")
|
||||
&& let Some(c) = payload.get("content")
|
||||
&& c.to_string().contains(&marker)
|
||||
{
|
||||
found_message = true;
|
||||
@@ -589,7 +618,7 @@ async fn integration_git_info_unit_test() {
|
||||
|
||||
// 5. Test serialization to ensure it works in SessionMeta
|
||||
let serialized = serde_json::to_string(&git_info).unwrap();
|
||||
let deserialized: codex_core::git_info::GitInfo = serde_json::from_str(&serialized).unwrap();
|
||||
let deserialized: GitInfo = serde_json::from_str(&serialized).unwrap();
|
||||
|
||||
assert_eq!(git_info.commit_hash, deserialized.commit_hash);
|
||||
assert_eq!(git_info.branch, deserialized.branch);
|
||||
|
||||
@@ -123,10 +123,22 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
|
||||
let tmpdir = TempDir::new().unwrap();
|
||||
let session_path = tmpdir.path().join("resume-session.jsonl");
|
||||
let mut f = std::fs::File::create(&session_path).unwrap();
|
||||
let convo_id = Uuid::new_v4();
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
json!({"meta":"test","instructions":"be nice", "id": Uuid::new_v4(), "timestamp": "2024-01-01T00:00:00Z"})
|
||||
json!({
|
||||
"timestamp": "2024-01-01T00:00:00.000Z",
|
||||
"type": "session_meta",
|
||||
"payload": {
|
||||
"id": convo_id,
|
||||
"timestamp": "2024-01-01T00:00:00Z",
|
||||
"instructions": "be nice",
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
}
|
||||
})
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -138,7 +150,17 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
|
||||
text: "resumed user message".to_string(),
|
||||
}],
|
||||
};
|
||||
writeln!(f, "{}", serde_json::to_string(&prior_user).unwrap()).unwrap();
|
||||
let prior_user_json = serde_json::to_value(&prior_user).unwrap();
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
json!({
|
||||
"timestamp": "2024-01-01T00:00:01.000Z",
|
||||
"type": "response_item",
|
||||
"payload": prior_user_json
|
||||
})
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Prior item: system message (excluded from API history)
|
||||
let prior_system = codex_protocol::models::ResponseItem::Message {
|
||||
@@ -148,7 +170,17 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
|
||||
text: "resumed system instruction".to_string(),
|
||||
}],
|
||||
};
|
||||
writeln!(f, "{}", serde_json::to_string(&prior_system).unwrap()).unwrap();
|
||||
let prior_system_json = serde_json::to_value(&prior_system).unwrap();
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
json!({
|
||||
"timestamp": "2024-01-01T00:00:02.000Z",
|
||||
"type": "response_item",
|
||||
"payload": prior_system_json
|
||||
})
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Prior item: assistant message
|
||||
let prior_item = codex_protocol::models::ResponseItem::Message {
|
||||
@@ -158,7 +190,17 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
|
||||
text: "resumed assistant message".to_string(),
|
||||
}],
|
||||
};
|
||||
writeln!(f, "{}", serde_json::to_string(&prior_item).unwrap()).unwrap();
|
||||
let prior_item_json = serde_json::to_value(&prior_item).unwrap();
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
json!({
|
||||
"timestamp": "2024-01-01T00:00:03.000Z",
|
||||
"type": "response_item",
|
||||
"payload": prior_item_json
|
||||
})
|
||||
)
|
||||
.unwrap();
|
||||
drop(f);
|
||||
|
||||
// Mock server that will receive the resumed request
|
||||
@@ -196,16 +238,13 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
|
||||
.await
|
||||
.expect("create new conversation");
|
||||
|
||||
// 1) Assert initial_messages contains the prior user + assistant messages as EventMsg entries
|
||||
// 1) Assert initial_messages only includes existing EventMsg entries; response items are not converted
|
||||
let initial_msgs = session_configured
|
||||
.initial_messages
|
||||
.clone()
|
||||
.expect("expected initial messages for resumed session");
|
||||
.expect("expected initial messages option for resumed session");
|
||||
let initial_json = serde_json::to_value(&initial_msgs).unwrap();
|
||||
let expected_initial_json = json!([
|
||||
{ "type": "user_message", "message": "resumed user message", "kind": "plain" },
|
||||
{ "type": "agent_message", "message": "resumed assistant message" }
|
||||
]);
|
||||
let expected_initial_json = json!([]);
|
||||
assert_eq!(initial_json, expected_initial_json);
|
||||
|
||||
// 2) Submit new input; the request body must include the prior item followed by the new user input.
|
||||
@@ -238,7 +277,25 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
|
||||
"content": [{ "type": "input_text", "text": "hello" }]
|
||||
}
|
||||
]);
|
||||
assert_eq!(request_body["input"], expected_input);
|
||||
let input_array = request_body
|
||||
.get("input")
|
||||
.and_then(|v| v.as_array())
|
||||
.cloned()
|
||||
.expect("input array in request body");
|
||||
let filtered: Vec<serde_json::Value> = input_array
|
||||
.into_iter()
|
||||
.filter(|item| {
|
||||
let text = item
|
||||
.get("content")
|
||||
.and_then(|c| c.as_array())
|
||||
.and_then(|a| a.first())
|
||||
.and_then(|o| o.get("text"))
|
||||
.and_then(|t| t.as_str())
|
||||
.unwrap_or("");
|
||||
!text.contains("<environment_context>")
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(serde_json::json!(filtered), expected_input);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
@@ -371,56 +428,6 @@ async fn includes_base_instructions_override_in_request() {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn originator_config_override_is_used() {
|
||||
// Mock server
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let first = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(sse_completed("resp1"), "text/event-stream");
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(first)
|
||||
.expect(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = model_provider;
|
||||
config.responses_originator_header = "my_override".to_owned();
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello".into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let request = &server.received_requests().await.unwrap()[0];
|
||||
let request_originator = request.headers.get("originator").unwrap();
|
||||
assert_eq!(request_originator.to_str().unwrap(), "my_override");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn chatgpt_auth_sends_correct_request() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
@@ -546,15 +553,12 @@ async fn prefers_chatgpt_token_when_config_prefers_chatgpt() {
|
||||
config.model_provider = model_provider;
|
||||
config.preferred_auth_method = AuthMode::ChatGPT;
|
||||
|
||||
let auth_manager = match CodexAuth::from_codex_home(
|
||||
codex_home.path(),
|
||||
config.preferred_auth_method,
|
||||
&config.responses_originator_header,
|
||||
) {
|
||||
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {e}"),
|
||||
};
|
||||
let auth_manager =
|
||||
match CodexAuth::from_codex_home(codex_home.path(), config.preferred_auth_method) {
|
||||
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {e}"),
|
||||
};
|
||||
let conversation_manager = ConversationManager::new(auth_manager);
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
@@ -622,15 +626,12 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
|
||||
config.model_provider = model_provider;
|
||||
config.preferred_auth_method = AuthMode::ApiKey;
|
||||
|
||||
let auth_manager = match CodexAuth::from_codex_home(
|
||||
codex_home.path(),
|
||||
config.preferred_auth_method,
|
||||
&config.responses_originator_header,
|
||||
) {
|
||||
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {e}"),
|
||||
};
|
||||
let auth_manager =
|
||||
match CodexAuth::from_codex_home(codex_home.path(), config.preferred_auth_method) {
|
||||
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {e}"),
|
||||
};
|
||||
let conversation_manager = ConversationManager::new(auth_manager);
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
@@ -967,34 +968,6 @@ async fn history_dedupes_streamed_and_final_messages_across_turns() {
|
||||
assert_eq!(requests.len(), 3, "expected 3 requests (one per turn)");
|
||||
|
||||
// Replace full-array compare with tail-only raw JSON compare using a single hard-coded value.
|
||||
let r3_tail_expected = json!([
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{"type":"input_text","text":"U1"}]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [{"type":"output_text","text":"Hey there!\n"}]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{"type":"input_text","text":"U2"}]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [{"type":"output_text","text":"Hey there!\n"}]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{"type":"input_text","text":"U3"}]
|
||||
}
|
||||
]);
|
||||
|
||||
let r3_input_array = requests[2]
|
||||
.body_json::<serde_json::Value>()
|
||||
.unwrap()
|
||||
@@ -1002,12 +975,60 @@ async fn history_dedupes_streamed_and_final_messages_across_turns() {
|
||||
.and_then(|v| v.as_array())
|
||||
.cloned()
|
||||
.expect("r3 missing input array");
|
||||
// skipping earlier context and developer messages
|
||||
let tail_len = r3_tail_expected.as_array().unwrap().len();
|
||||
let actual_tail = &r3_input_array[r3_input_array.len() - tail_len..];
|
||||
// We only assert on the last 5 items of the input history for request 3.
|
||||
// With per-turn environment context injected, the last 5 should be:
|
||||
// [env_ctx, U2, assistant("Hey there!\n"), env_ctx, U3]
|
||||
let actual_tail = &r3_input_array[r3_input_array.len() - 5..];
|
||||
|
||||
// env_ctx 1
|
||||
assert_eq!(actual_tail[0]["type"], serde_json::json!("message"));
|
||||
assert_eq!(actual_tail[0]["role"], serde_json::json!("user"));
|
||||
let env_text_1 = &actual_tail[0]["content"][0]["text"];
|
||||
assert!(
|
||||
env_text_1
|
||||
.as_str()
|
||||
.expect("env text should be string")
|
||||
.contains("<environment_context>")
|
||||
);
|
||||
|
||||
// U2
|
||||
assert_eq!(
|
||||
serde_json::Value::Array(actual_tail.to_vec()),
|
||||
r3_tail_expected,
|
||||
"request 3 tail mismatch",
|
||||
actual_tail[1],
|
||||
serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": "U2" } ]
|
||||
})
|
||||
);
|
||||
|
||||
// assistant response
|
||||
assert_eq!(
|
||||
actual_tail[2],
|
||||
serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [ { "type": "output_text", "text": "Hey there!\n" } ]
|
||||
})
|
||||
);
|
||||
|
||||
// env_ctx 2
|
||||
assert_eq!(actual_tail[3]["type"], serde_json::json!("message"));
|
||||
assert_eq!(actual_tail[3]["role"], serde_json::json!("user"));
|
||||
let env_text_2 = &actual_tail[3]["content"][0]["text"];
|
||||
assert!(
|
||||
env_text_2
|
||||
.as_str()
|
||||
.expect("env text should be string")
|
||||
.contains("<environment_context>")
|
||||
);
|
||||
|
||||
// U3
|
||||
assert_eq!(
|
||||
actual_tail[4],
|
||||
serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": "U3" } ]
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::ContentItem;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::NewConversation;
|
||||
use codex_core::ResponseItem;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::protocol::ConversationHistoryResponseEvent;
|
||||
use codex_core::protocol::ConversationPathResponseEvent;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::RolloutItem;
|
||||
use codex_core::protocol::RolloutLine;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::wait_for_event;
|
||||
use tempfile::TempDir;
|
||||
@@ -71,84 +75,121 @@ async fn fork_conversation_twice_drops_to_first_message() {
|
||||
let _ = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
}
|
||||
|
||||
// Request history from the base conversation.
|
||||
codex.submit(Op::GetHistory).await.unwrap();
|
||||
// Request history from the base conversation to obtain rollout path.
|
||||
codex.submit(Op::GetPath).await.unwrap();
|
||||
let base_history =
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ConversationHistory(_))).await;
|
||||
|
||||
// Capture entries from the base history and compute expected prefixes after each fork.
|
||||
let entries_after_three = match &base_history {
|
||||
EventMsg::ConversationHistory(ConversationHistoryResponseEvent { entries, .. }) => {
|
||||
entries.clone()
|
||||
}
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ConversationPath(_))).await;
|
||||
let base_path = match &base_history {
|
||||
EventMsg::ConversationPath(ConversationPathResponseEvent { path, .. }) => path.clone(),
|
||||
_ => panic!("expected ConversationHistory event"),
|
||||
};
|
||||
// History layout for this test:
|
||||
// [0] user instructions,
|
||||
// [1] environment context,
|
||||
// [2] "first" user message,
|
||||
// [3] "second" user message,
|
||||
// [4] "third" user message.
|
||||
|
||||
// Fork 1: drops the last user message and everything after.
|
||||
let expected_after_first = vec![
|
||||
entries_after_three[0].clone(),
|
||||
entries_after_three[1].clone(),
|
||||
entries_after_three[2].clone(),
|
||||
entries_after_three[3].clone(),
|
||||
];
|
||||
// GetHistory flushes before returning the path; no wait needed.
|
||||
|
||||
// Fork 2: drops the last user message and everything after.
|
||||
// [0] user instructions,
|
||||
// [1] environment context,
|
||||
// [2] "first" user message,
|
||||
let expected_after_second = vec![
|
||||
entries_after_three[0].clone(),
|
||||
entries_after_three[1].clone(),
|
||||
entries_after_three[2].clone(),
|
||||
];
|
||||
// Helper: read rollout items (excluding SessionMeta) from a JSONL path.
|
||||
let read_items = |p: &std::path::Path| -> Vec<RolloutItem> {
|
||||
let text = std::fs::read_to_string(p).expect("read rollout file");
|
||||
let mut items: Vec<RolloutItem> = Vec::new();
|
||||
for line in text.lines() {
|
||||
if line.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
let v: serde_json::Value = serde_json::from_str(line).expect("jsonl line");
|
||||
let rl: RolloutLine = serde_json::from_value(v).expect("rollout line");
|
||||
match rl.item {
|
||||
RolloutItem::SessionMeta(_) => {}
|
||||
other => items.push(other),
|
||||
}
|
||||
}
|
||||
items
|
||||
};
|
||||
|
||||
// Fork once with n=1 → drops the last user message and everything after.
|
||||
// Compute expected prefixes after each fork by truncating base rollout at nth-from-last user input.
|
||||
let base_items = read_items(&base_path);
|
||||
let find_user_input_positions = |items: &[RolloutItem]| -> Vec<usize> {
|
||||
let mut pos = Vec::new();
|
||||
for (i, it) in items.iter().enumerate() {
|
||||
if let RolloutItem::ResponseItem(ResponseItem::Message { role, content, .. }) = it
|
||||
&& role == "user"
|
||||
{
|
||||
// Consider any user message as an input boundary; recorder stores both EventMsg and ResponseItem.
|
||||
// We specifically look for input items, which are represented as ContentItem::InputText.
|
||||
if content
|
||||
.iter()
|
||||
.any(|c| matches!(c, ContentItem::InputText { .. }))
|
||||
{
|
||||
pos.push(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
pos
|
||||
};
|
||||
let user_inputs = find_user_input_positions(&base_items);
|
||||
|
||||
// After dropping last user input (n=1), cut strictly before that input if present, else empty.
|
||||
let cut1 = user_inputs
|
||||
.get(user_inputs.len().saturating_sub(1))
|
||||
.copied()
|
||||
.unwrap_or(0);
|
||||
let expected_after_first: Vec<RolloutItem> = base_items[..cut1].to_vec();
|
||||
|
||||
// After dropping again (n=1 on fork1), compute expected relative to fork1's rollout.
|
||||
|
||||
// Fork once with n=1 → drops the last user input and everything after.
|
||||
let NewConversation {
|
||||
conversation: codex_fork1,
|
||||
..
|
||||
} = conversation_manager
|
||||
.fork_conversation(entries_after_three.clone(), 1, config_for_fork.clone())
|
||||
.fork_conversation(1, config_for_fork.clone(), base_path.clone())
|
||||
.await
|
||||
.expect("fork 1");
|
||||
|
||||
codex_fork1.submit(Op::GetHistory).await.unwrap();
|
||||
codex_fork1.submit(Op::GetPath).await.unwrap();
|
||||
let fork1_history = wait_for_event(&codex_fork1, |ev| {
|
||||
matches!(ev, EventMsg::ConversationHistory(_))
|
||||
matches!(ev, EventMsg::ConversationPath(_))
|
||||
})
|
||||
.await;
|
||||
let entries_after_first_fork = match &fork1_history {
|
||||
EventMsg::ConversationHistory(ConversationHistoryResponseEvent { entries, .. }) => {
|
||||
assert!(matches!(
|
||||
fork1_history,
|
||||
EventMsg::ConversationHistory(ConversationHistoryResponseEvent { ref entries, .. }) if *entries == expected_after_first
|
||||
));
|
||||
entries.clone()
|
||||
}
|
||||
let fork1_path = match &fork1_history {
|
||||
EventMsg::ConversationPath(ConversationPathResponseEvent { path, .. }) => path.clone(),
|
||||
_ => panic!("expected ConversationHistory event after first fork"),
|
||||
};
|
||||
|
||||
// GetHistory on fork1 flushed; the file is ready.
|
||||
let fork1_items = read_items(&fork1_path);
|
||||
pretty_assertions::assert_eq!(
|
||||
serde_json::to_value(&fork1_items).unwrap(),
|
||||
serde_json::to_value(&expected_after_first).unwrap()
|
||||
);
|
||||
|
||||
// Fork again with n=1 → drops the (new) last user message, leaving only the first.
|
||||
let NewConversation {
|
||||
conversation: codex_fork2,
|
||||
..
|
||||
} = conversation_manager
|
||||
.fork_conversation(entries_after_first_fork.clone(), 1, config_for_fork.clone())
|
||||
.fork_conversation(1, config_for_fork.clone(), fork1_path.clone())
|
||||
.await
|
||||
.expect("fork 2");
|
||||
|
||||
codex_fork2.submit(Op::GetHistory).await.unwrap();
|
||||
codex_fork2.submit(Op::GetPath).await.unwrap();
|
||||
let fork2_history = wait_for_event(&codex_fork2, |ev| {
|
||||
matches!(ev, EventMsg::ConversationHistory(_))
|
||||
matches!(ev, EventMsg::ConversationPath(_))
|
||||
})
|
||||
.await;
|
||||
assert!(matches!(
|
||||
fork2_history,
|
||||
EventMsg::ConversationHistory(ConversationHistoryResponseEvent { ref entries, .. }) if *entries == expected_after_second
|
||||
));
|
||||
let fork2_path = match &fork2_history {
|
||||
EventMsg::ConversationPath(ConversationPathResponseEvent { path, .. }) => path.clone(),
|
||||
_ => panic!("expected ConversationHistory event after second fork"),
|
||||
};
|
||||
// GetHistory on fork2 flushed; the file is ready.
|
||||
let fork1_items = read_items(&fork1_path);
|
||||
let fork1_user_inputs = find_user_input_positions(&fork1_items);
|
||||
let cut_last_on_fork1 = fork1_user_inputs
|
||||
.get(fork1_user_inputs.len().saturating_sub(1))
|
||||
.copied()
|
||||
.unwrap_or(0);
|
||||
let expected_after_second: Vec<RolloutItem> = fork1_items[..cut_last_on_fork1].to_vec();
|
||||
let fork2_items = read_items(&fork2_path);
|
||||
pretty_assertions::assert_eq!(
|
||||
serde_json::to_value(&fork2_items).unwrap(),
|
||||
serde_json::to_value(&expected_after_second).unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -191,7 +191,8 @@ async fn prompt_tools_are_consistent_across_requests() {
|
||||
let expected_instructions: &str = include_str!("../../prompt.md");
|
||||
// our internal implementation is responsible for keeping tools in sync
|
||||
// with the OpenAI schema, so we just verify the tool presence here
|
||||
let expected_tools_names: &[&str] = &["shell", "update_plan", "apply_patch", "view_image"];
|
||||
let expected_tools_names: &[&str] =
|
||||
&["unified_exec", "update_plan", "apply_patch", "view_image"];
|
||||
let body0 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
assert_eq!(
|
||||
body0["instructions"],
|
||||
@@ -271,7 +272,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
|
||||
let shell = default_user_shell().await;
|
||||
|
||||
let expected_env_text = format!(
|
||||
let expected_env_text_init = format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
@@ -284,13 +285,28 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
None => String::new(),
|
||||
}
|
||||
);
|
||||
// Per-turn environment context omits the shell tag.
|
||||
let expected_env_text_turn = format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>read-only</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
</environment_context>"#,
|
||||
cwd.path().to_string_lossy(),
|
||||
);
|
||||
let expected_ui_text =
|
||||
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
|
||||
|
||||
let expected_env_msg = serde_json::json!({
|
||||
let expected_env_msg_init = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": expected_env_text } ]
|
||||
"content": [ { "type": "input_text", "text": expected_env_text_init } ]
|
||||
});
|
||||
let expected_env_msg_turn = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": expected_env_text_turn } ]
|
||||
});
|
||||
let expected_ui_msg = serde_json::json!({
|
||||
"type": "message",
|
||||
@@ -306,7 +322,12 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
assert_eq!(
|
||||
body1["input"],
|
||||
serde_json::json!([expected_ui_msg, expected_env_msg, expected_user_message_1])
|
||||
serde_json::json!([
|
||||
expected_ui_msg,
|
||||
expected_env_msg_init,
|
||||
expected_env_msg_turn,
|
||||
expected_user_message_1
|
||||
])
|
||||
);
|
||||
|
||||
let expected_user_message_2 = serde_json::json!({
|
||||
@@ -318,7 +339,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
let expected_body2 = serde_json::json!(
|
||||
[
|
||||
body1["input"].as_array().unwrap().as_slice(),
|
||||
[expected_user_message_2].as_slice(),
|
||||
[expected_env_msg_turn, expected_user_message_2].as_slice(),
|
||||
]
|
||||
.concat()
|
||||
);
|
||||
@@ -426,11 +447,17 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
|
||||
// After overriding the turn context, the environment context should be emitted again
|
||||
// reflecting the new approval policy and sandbox settings. Omit cwd because it did
|
||||
// not change.
|
||||
let expected_env_text_2 = r#"<environment_context>
|
||||
let expected_env_text_2 = format!(
|
||||
r#"<environment_context>
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>workspace-write</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
</environment_context>"#;
|
||||
<writable_roots>
|
||||
<root>{}</root>
|
||||
</writable_roots>
|
||||
</environment_context>"#,
|
||||
writable.path().to_string_lossy()
|
||||
);
|
||||
let expected_env_msg_2 = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
@@ -540,10 +567,24 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": "hello 2" } ]
|
||||
});
|
||||
let expected_env_text_2 = format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>workspace-write</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
</environment_context>"#,
|
||||
new_cwd.path().to_string_lossy()
|
||||
);
|
||||
let expected_env_msg_2 = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": expected_env_text_2 } ]
|
||||
});
|
||||
let expected_body2 = serde_json::json!(
|
||||
[
|
||||
body1["input"].as_array().unwrap().as_slice(),
|
||||
[expected_user_message_2].as_slice(),
|
||||
[expected_env_msg_2, expected_user_message_2].as_slice(),
|
||||
]
|
||||
.concat()
|
||||
);
|
||||
|
||||
@@ -159,6 +159,41 @@ async fn read_only_forbids_all_writes() {
|
||||
.await;
|
||||
}
|
||||
|
||||
/// Verify that user lookups via `pwd.getpwuid(os.getuid())` work under the
|
||||
/// seatbelt sandbox. Prior to allowing the necessary mach‑lookup for
|
||||
/// OpenDirectory libinfo, this would fail with `KeyError: getpwuid(): uid not found`.
|
||||
#[tokio::test]
|
||||
async fn python_getpwuid_works_under_seatbelt() {
|
||||
if std::env::var(CODEX_SANDBOX_ENV_VAR) == Ok("seatbelt".to_string()) {
|
||||
eprintln!("{CODEX_SANDBOX_ENV_VAR} is set to 'seatbelt', skipping test.");
|
||||
return;
|
||||
}
|
||||
|
||||
// ReadOnly is sufficient here since we are only exercising user lookup.
|
||||
let policy = SandboxPolicy::ReadOnly;
|
||||
|
||||
let mut child = spawn_command_under_seatbelt(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
// Print the passwd struct; success implies lookup worked.
|
||||
"import pwd, os; print(pwd.getpwuid(os.getuid()))".to_string(),
|
||||
],
|
||||
&policy,
|
||||
std::env::current_dir().expect("should be able to get current dir"),
|
||||
StdioPolicy::RedirectForShellTool,
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.expect("should be able to spawn python under seatbelt");
|
||||
|
||||
let status = child
|
||||
.wait()
|
||||
.await
|
||||
.expect("should be able to wait for child process");
|
||||
assert!(status.success(), "python exited with {status:?}");
|
||||
}
|
||||
|
||||
#[expect(clippy::expect_used)]
|
||||
fn create_test_scenario(tmp: &TempDir) -> TestScenario {
|
||||
let repo_parent = tmp.path().to_path_buf();
|
||||
|
||||
@@ -25,7 +25,6 @@ codex-common = { path = "../common", features = [
|
||||
"sandbox_summary",
|
||||
] }
|
||||
codex-core = { path = "../core" }
|
||||
codex-login = { path = "../login" }
|
||||
codex-ollama = { path = "../ollama" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
owo-colors = "4.2.0"
|
||||
|
||||
@@ -523,6 +523,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
history_log_id: _,
|
||||
history_entry_count: _,
|
||||
initial_messages: _,
|
||||
rollout_path: _,
|
||||
} = session_configured_event;
|
||||
|
||||
ts_println!(
|
||||
@@ -558,7 +559,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
}
|
||||
},
|
||||
EventMsg::ShutdownComplete => return CodexStatus::Shutdown,
|
||||
EventMsg::ConversationHistory(_) => {}
|
||||
EventMsg::ConversationPath(_) => {}
|
||||
EventMsg::UserMessage(_) => {}
|
||||
}
|
||||
CodexStatus::Running
|
||||
|
||||
@@ -190,7 +190,6 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
let conversation_manager = ConversationManager::new(AuthManager::shared(
|
||||
config.codex_home.clone(),
|
||||
config.preferred_auth_method,
|
||||
config.responses_originator_header.clone(),
|
||||
));
|
||||
let NewConversation {
|
||||
conversation_id: _,
|
||||
|
||||
@@ -15,9 +15,7 @@ path = "src/lib.rs"
|
||||
workspace = true
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
anyhow = "1"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
codex-common = { path = "../common", features = ["cli"] }
|
||||
codex-core = { path = "../core" }
|
||||
landlock = "0.4.1"
|
||||
libc = "0.2.175"
|
||||
|
||||
@@ -17,7 +17,6 @@ serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
sha2 = "0.10"
|
||||
tempfile = "3"
|
||||
thiserror = "2.0.16"
|
||||
tiny_http = "0.12"
|
||||
tokio = { version = "1", features = [
|
||||
"io-std",
|
||||
@@ -31,5 +30,4 @@ urlencoding = "2.1"
|
||||
webbrowser = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = "1.4.1"
|
||||
tempfile = "3"
|
||||
|
||||
@@ -16,6 +16,7 @@ use base64::Engine;
|
||||
use chrono::Utc;
|
||||
use codex_core::auth::AuthDotJson;
|
||||
use codex_core::auth::get_auth_file;
|
||||
use codex_core::default_client::ORIGINATOR;
|
||||
use codex_core::token_data::TokenData;
|
||||
use codex_core::token_data::parse_id_token;
|
||||
use rand::RngCore;
|
||||
@@ -35,11 +36,10 @@ pub struct ServerOptions {
|
||||
pub port: u16,
|
||||
pub open_browser: bool,
|
||||
pub force_state: Option<String>,
|
||||
pub originator: String,
|
||||
}
|
||||
|
||||
impl ServerOptions {
|
||||
pub fn new(codex_home: PathBuf, client_id: String, originator: String) -> Self {
|
||||
pub fn new(codex_home: PathBuf, client_id: String) -> Self {
|
||||
Self {
|
||||
codex_home,
|
||||
client_id: client_id.to_string(),
|
||||
@@ -47,7 +47,6 @@ impl ServerOptions {
|
||||
port: DEFAULT_PORT,
|
||||
open_browser: true,
|
||||
force_state: None,
|
||||
originator,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -103,14 +102,7 @@ pub fn run_login_server(opts: ServerOptions) -> io::Result<LoginServer> {
|
||||
let server = Arc::new(server);
|
||||
|
||||
let redirect_uri = format!("http://localhost:{actual_port}/auth/callback");
|
||||
let auth_url = build_authorize_url(
|
||||
&opts.issuer,
|
||||
&opts.client_id,
|
||||
&redirect_uri,
|
||||
&pkce,
|
||||
&state,
|
||||
&opts.originator,
|
||||
);
|
||||
let auth_url = build_authorize_url(&opts.issuer, &opts.client_id, &redirect_uri, &pkce, &state);
|
||||
|
||||
if opts.open_browser {
|
||||
let _ = webbrowser::open(&auth_url);
|
||||
@@ -311,7 +303,6 @@ fn build_authorize_url(
|
||||
redirect_uri: &str,
|
||||
pkce: &PkceCodes,
|
||||
state: &str,
|
||||
originator: &str,
|
||||
) -> String {
|
||||
let query = vec![
|
||||
("response_type", "code"),
|
||||
@@ -323,7 +314,7 @@ fn build_authorize_url(
|
||||
("id_token_add_organizations", "true"),
|
||||
("codex_cli_simplified_flow", "true"),
|
||||
("state", state),
|
||||
("originator", originator),
|
||||
("originator", ORIGINATOR.value.as_str()),
|
||||
];
|
||||
let qs = query
|
||||
.into_iter()
|
||||
|
||||
@@ -102,7 +102,6 @@ async fn end_to_end_login_flow_persists_auth_json() {
|
||||
port: 0,
|
||||
open_browser: false,
|
||||
force_state: Some(state),
|
||||
originator: "test_originator".to_string(),
|
||||
};
|
||||
let server = run_login_server(opts).unwrap();
|
||||
let login_port = server.actual_port;
|
||||
@@ -161,7 +160,6 @@ async fn creates_missing_codex_home_dir() {
|
||||
port: 0,
|
||||
open_browser: false,
|
||||
force_state: Some(state),
|
||||
originator: "test_originator".to_string(),
|
||||
};
|
||||
let server = run_login_server(opts).unwrap();
|
||||
let login_port = server.actual_port;
|
||||
@@ -202,7 +200,6 @@ async fn cancels_previous_login_server_when_port_is_in_use() {
|
||||
port: 0,
|
||||
open_browser: false,
|
||||
force_state: Some("cancel_state".to_string()),
|
||||
originator: "test_originator".to_string(),
|
||||
};
|
||||
|
||||
let first_server = run_login_server(first_opts).unwrap();
|
||||
@@ -221,7 +218,6 @@ async fn cancels_previous_login_server_when_port_is_in_use() {
|
||||
port: login_port,
|
||||
open_browser: false,
|
||||
force_state: Some("cancel_state_2".to_string()),
|
||||
originator: "test_originator".to_string(),
|
||||
};
|
||||
|
||||
let second_server = run_login_server(second_opts).unwrap();
|
||||
|
||||
@@ -64,6 +64,9 @@ async fn main() -> Result<()> {
|
||||
name: "codex-mcp-client".to_owned(),
|
||||
version: env!("CARGO_PKG_VERSION").to_owned(),
|
||||
title: Some("Codex".to_string()),
|
||||
// This field is used by Codex when it is an MCP server: it should
|
||||
// not be used when Codex is an MCP client.
|
||||
user_agent: None,
|
||||
},
|
||||
protocol_version: MCP_SCHEMA_VERSION.to_owned(),
|
||||
};
|
||||
|
||||
@@ -26,7 +26,6 @@ schemars = "0.8.22"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
shlex = "1.3.0"
|
||||
strum_macros = "0.27.2"
|
||||
tokio = { version = "1", features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
@@ -41,8 +40,9 @@ uuid = { version = "1", features = ["serde", "v4"] }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "2"
|
||||
base64 = "0.22"
|
||||
mcp_test_support = { path = "tests/common" }
|
||||
os_info = "3.12.0"
|
||||
pretty_assertions = "1.4.1"
|
||||
tempfile = "3"
|
||||
tokio-test = "0.4"
|
||||
wiremock = "0.6"
|
||||
|
||||
@@ -11,6 +11,8 @@ use codex_core::NewConversation;
|
||||
use codex_core::RolloutRecorder;
|
||||
use codex_core::SessionMeta;
|
||||
use codex_core::auth::CLIENT_ID;
|
||||
use codex_core::auth::get_auth_file;
|
||||
use codex_core::auth::try_read_auth_json;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::ConfigToml;
|
||||
@@ -35,6 +37,8 @@ use codex_protocol::mcp_protocol::AddConversationListenerParams;
|
||||
use codex_protocol::mcp_protocol::AddConversationSubscriptionResponse;
|
||||
use codex_protocol::mcp_protocol::ApplyPatchApprovalParams;
|
||||
use codex_protocol::mcp_protocol::ApplyPatchApprovalResponse;
|
||||
use codex_protocol::mcp_protocol::ArchiveConversationParams;
|
||||
use codex_protocol::mcp_protocol::ArchiveConversationResponse;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use codex_protocol::mcp_protocol::AuthStatusChangeNotification;
|
||||
use codex_protocol::mcp_protocol::ClientRequest;
|
||||
@@ -65,6 +69,7 @@ use codex_protocol::mcp_protocol::SendUserMessageResponse;
|
||||
use codex_protocol::mcp_protocol::SendUserTurnParams;
|
||||
use codex_protocol::mcp_protocol::SendUserTurnResponse;
|
||||
use codex_protocol::mcp_protocol::ServerNotification;
|
||||
use codex_protocol::mcp_protocol::UserInfoResponse;
|
||||
use codex_protocol::mcp_protocol::UserSavedConfig;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
@@ -73,12 +78,16 @@ use codex_protocol::protocol::USER_MESSAGE_BEGIN;
|
||||
use mcp_types::JSONRPCErrorError;
|
||||
use mcp_types::RequestId;
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::OsStr;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::select;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::oneshot;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing::warn;
|
||||
use uuid::Uuid;
|
||||
|
||||
// Duration before a ChatGPT login attempt is abandoned.
|
||||
@@ -142,6 +151,9 @@ impl CodexMessageProcessor {
|
||||
ClientRequest::ResumeConversation { request_id, params } => {
|
||||
self.handle_resume_conversation(request_id, params).await;
|
||||
}
|
||||
ClientRequest::ArchiveConversation { request_id, params } => {
|
||||
self.archive_conversation(request_id, params).await;
|
||||
}
|
||||
ClientRequest::SendUserMessage { request_id, params } => {
|
||||
self.send_user_message(request_id, params).await;
|
||||
}
|
||||
@@ -178,6 +190,9 @@ impl CodexMessageProcessor {
|
||||
ClientRequest::GetUserAgent { request_id } => {
|
||||
self.get_user_agent(request_id).await;
|
||||
}
|
||||
ClientRequest::UserInfo { request_id } => {
|
||||
self.get_user_info(request_id).await;
|
||||
}
|
||||
ClientRequest::ExecOneOffCommand { request_id, params } => {
|
||||
self.exec_one_off_command(request_id, params).await;
|
||||
}
|
||||
@@ -189,11 +204,7 @@ impl CodexMessageProcessor {
|
||||
|
||||
let opts = LoginServerOptions {
|
||||
open_browser: false,
|
||||
..LoginServerOptions::new(
|
||||
config.codex_home.clone(),
|
||||
CLIENT_ID.to_string(),
|
||||
config.responses_originator_header.clone(),
|
||||
)
|
||||
..LoginServerOptions::new(config.codex_home.clone(), CLIENT_ID.to_string())
|
||||
};
|
||||
|
||||
enum LoginChatGptReply {
|
||||
@@ -394,7 +405,7 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
|
||||
async fn get_user_agent(&self, request_id: RequestId) {
|
||||
let user_agent = get_codex_user_agent(Some(&self.config.responses_originator_header));
|
||||
let user_agent = get_codex_user_agent();
|
||||
let response = GetUserAgentResponse { user_agent };
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
@@ -434,6 +445,18 @@ impl CodexMessageProcessor {
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
|
||||
async fn get_user_info(&self, request_id: RequestId) {
|
||||
// Read alleged user email from auth.json (best-effort; not verified).
|
||||
let auth_path = get_auth_file(&self.config.codex_home);
|
||||
let alleged_user_email = match try_read_auth_json(&auth_path) {
|
||||
Ok(auth) => auth.tokens.and_then(|t| t.id_token.email),
|
||||
Err(_) => None,
|
||||
};
|
||||
|
||||
let response = UserInfoResponse { alleged_user_email };
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
|
||||
async fn exec_one_off_command(&self, request_id: RequestId, params: ExecOneOffCommandParams) {
|
||||
tracing::debug!("ExecOneOffCommand params: {params:?}");
|
||||
|
||||
@@ -528,6 +551,7 @@ impl CodexMessageProcessor {
|
||||
let response = NewConversationResponse {
|
||||
conversation_id,
|
||||
model: session_configured.model,
|
||||
rollout_path: session_configured.rollout_path,
|
||||
};
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
@@ -669,6 +693,141 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
async fn archive_conversation(&self, request_id: RequestId, params: ArchiveConversationParams) {
|
||||
let ArchiveConversationParams {
|
||||
conversation_id,
|
||||
rollout_path,
|
||||
} = params;
|
||||
|
||||
// Verify that the rollout path is in the sessions directory or else
|
||||
// a malicious client could specify an arbitrary path.
|
||||
let rollout_folder = self.config.codex_home.join(codex_core::SESSIONS_SUBDIR);
|
||||
let canonical_rollout_path = tokio::fs::canonicalize(&rollout_path).await;
|
||||
let canonical_rollout_path = if let Ok(path) = canonical_rollout_path
|
||||
&& path.starts_with(&rollout_folder)
|
||||
{
|
||||
path
|
||||
} else {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!(
|
||||
"rollout path `{}` must be in sessions directory",
|
||||
rollout_path.display()
|
||||
),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
};
|
||||
|
||||
let required_suffix = format!("{}.jsonl", conversation_id.0);
|
||||
let Some(file_name) = canonical_rollout_path.file_name().map(OsStr::to_owned) else {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!(
|
||||
"rollout path `{}` missing file name",
|
||||
rollout_path.display()
|
||||
),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
};
|
||||
|
||||
if !file_name
|
||||
.to_string_lossy()
|
||||
.ends_with(required_suffix.as_str())
|
||||
{
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!(
|
||||
"rollout path `{}` does not match conversation id {conversation_id}",
|
||||
rollout_path.display()
|
||||
),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
|
||||
let removed_conversation = self
|
||||
.conversation_manager
|
||||
.remove_conversation(&conversation_id)
|
||||
.await;
|
||||
if let Some(conversation) = removed_conversation {
|
||||
info!("conversation {conversation_id} was active; shutting down");
|
||||
let conversation_clone = conversation.clone();
|
||||
let notify = Arc::new(tokio::sync::Notify::new());
|
||||
let notify_clone = notify.clone();
|
||||
|
||||
// Establish the listener for ShutdownComplete before submitting
|
||||
// Shutdown so it is not missed.
|
||||
let is_shutdown = tokio::spawn(async move {
|
||||
loop {
|
||||
select! {
|
||||
_ = notify_clone.notified() => {
|
||||
break;
|
||||
}
|
||||
event = conversation_clone.next_event() => {
|
||||
if let Ok(event) = event && matches!(event.msg, EventMsg::ShutdownComplete) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Request shutdown.
|
||||
match conversation.submit(Op::Shutdown).await {
|
||||
Ok(_) => {
|
||||
// Successfully submitted Shutdown; wait before proceeding.
|
||||
select! {
|
||||
_ = is_shutdown => {
|
||||
// Normal shutdown: proceed with archive.
|
||||
}
|
||||
_ = tokio::time::sleep(Duration::from_secs(10)) => {
|
||||
warn!("conversation {conversation_id} shutdown timed out; proceeding with archive");
|
||||
notify.notify_one();
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!("failed to submit Shutdown to conversation {conversation_id}: {err}");
|
||||
notify.notify_one();
|
||||
// Perhaps we lost a shutdown race, so let's continue to
|
||||
// clean up the .jsonl file.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move the .jsonl file to the archived sessions subdir.
|
||||
let result: std::io::Result<()> = async {
|
||||
let archive_folder = self
|
||||
.config
|
||||
.codex_home
|
||||
.join(codex_core::ARCHIVED_SESSIONS_SUBDIR);
|
||||
tokio::fs::create_dir_all(&archive_folder).await?;
|
||||
tokio::fs::rename(&canonical_rollout_path, &archive_folder.join(&file_name)).await?;
|
||||
Ok(())
|
||||
}
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(()) => {
|
||||
let response = ArchiveConversationResponse {};
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to archive conversation: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_user_message(&self, request_id: RequestId, params: SendUserMessageParams) {
|
||||
let SendUserMessageParams {
|
||||
conversation_id,
|
||||
@@ -1110,10 +1269,7 @@ fn extract_conversation_summary(
|
||||
head: &[serde_json::Value],
|
||||
) -> Option<ConversationSummary> {
|
||||
let session_meta = match head.first() {
|
||||
Some(first_line) => match serde_json::from_value::<SessionMeta>(first_line.clone()) {
|
||||
Ok(session_meta) => session_meta,
|
||||
Err(..) => return None,
|
||||
},
|
||||
Some(first_line) => serde_json::from_value::<SessionMeta>(first_line.clone()).ok()?,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
@@ -1171,6 +1327,10 @@ mod tests {
|
||||
json!({
|
||||
"id": conversation_id.0,
|
||||
"timestamp": timestamp,
|
||||
"cwd": "/",
|
||||
"originator": "codex",
|
||||
"cli_version": "0.0.0",
|
||||
"instructions": null
|
||||
}),
|
||||
json!({
|
||||
"type": "message",
|
||||
|
||||
@@ -277,7 +277,7 @@ async fn run_codex_tool_session_inner(
|
||||
| EventMsg::GetHistoryEntryResponse(_)
|
||||
| EventMsg::PlanUpdate(_)
|
||||
| EventMsg::TurnAborted(_)
|
||||
| EventMsg::ConversationHistory(_)
|
||||
| EventMsg::ConversationPath(_)
|
||||
| EventMsg::UserMessage(_)
|
||||
| EventMsg::ShutdownComplete => {
|
||||
// For now, we do not do anything extra for these
|
||||
|
||||
@@ -14,6 +14,8 @@ use codex_protocol::mcp_protocol::ConversationId;
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::default_client::USER_AGENT_SUFFIX;
|
||||
use codex_core::default_client::get_codex_user_agent;
|
||||
use codex_core::protocol::Submission;
|
||||
use mcp_types::CallToolRequestParams;
|
||||
use mcp_types::CallToolResult;
|
||||
@@ -54,11 +56,8 @@ impl MessageProcessor {
|
||||
config: Arc<Config>,
|
||||
) -> Self {
|
||||
let outgoing = Arc::new(outgoing);
|
||||
let auth_manager = AuthManager::shared(
|
||||
config.codex_home.clone(),
|
||||
config.preferred_auth_method,
|
||||
config.responses_originator_header.clone(),
|
||||
);
|
||||
let auth_manager =
|
||||
AuthManager::shared(config.codex_home.clone(), config.preferred_auth_method);
|
||||
let conversation_manager = Arc::new(ConversationManager::new(auth_manager.clone()));
|
||||
let codex_message_processor = CodexMessageProcessor::new(
|
||||
auth_manager,
|
||||
@@ -211,6 +210,14 @@ impl MessageProcessor {
|
||||
return;
|
||||
}
|
||||
|
||||
let client_info = params.client_info;
|
||||
let name = client_info.name;
|
||||
let version = client_info.version;
|
||||
let user_agent_suffix = format!("{name}; {version}");
|
||||
if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() {
|
||||
*suffix = Some(user_agent_suffix);
|
||||
}
|
||||
|
||||
self.initialized = true;
|
||||
|
||||
// Build a minimal InitializeResult. Fill with placeholders.
|
||||
@@ -231,6 +238,7 @@ impl MessageProcessor {
|
||||
name: "codex-mcp-server".to_string(),
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
title: Some("Codex".to_string()),
|
||||
user_agent: Some(get_codex_user_agent()),
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -262,6 +262,7 @@ mod tests {
|
||||
use codex_protocol::mcp_protocol::LoginChatGptCompleteNotification;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use tempfile::NamedTempFile;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::*;
|
||||
@@ -272,6 +273,7 @@ mod tests {
|
||||
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
|
||||
|
||||
let conversation_id = ConversationId::new();
|
||||
let rollout_file = NamedTempFile::new().unwrap();
|
||||
let event = Event {
|
||||
id: "1".to_string(),
|
||||
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
|
||||
@@ -280,6 +282,7 @@ mod tests {
|
||||
history_log_id: 1,
|
||||
history_entry_count: 1000,
|
||||
initial_messages: None,
|
||||
rollout_path: rollout_file.path().to_path_buf(),
|
||||
}),
|
||||
};
|
||||
|
||||
@@ -305,12 +308,14 @@ mod tests {
|
||||
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
|
||||
|
||||
let conversation_id = ConversationId::new();
|
||||
let rollout_file = NamedTempFile::new().unwrap();
|
||||
let session_configured_event = SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model: "gpt-4o".to_string(),
|
||||
history_log_id: 1,
|
||||
history_entry_count: 1000,
|
||||
initial_messages: None,
|
||||
rollout_path: rollout_file.path().to_path_buf(),
|
||||
};
|
||||
let event = Event {
|
||||
id: "1".to_string(),
|
||||
@@ -340,6 +345,7 @@ mod tests {
|
||||
"history_log_id": session_configured_event.history_log_id,
|
||||
"history_entry_count": session_configured_event.history_entry_count,
|
||||
"type": "session_configured",
|
||||
"rollout_path": rollout_file.path().to_path_buf(),
|
||||
}
|
||||
});
|
||||
assert_eq!(params.unwrap(), expected_params);
|
||||
|
||||
@@ -13,16 +13,14 @@ codex-core = { path = "../../../core" }
|
||||
codex-mcp-server = { path = "../.." }
|
||||
codex-protocol = { path = "../../../protocol" }
|
||||
mcp-types = { path = "../../../mcp-types" }
|
||||
os_info = "3.12.0"
|
||||
pretty_assertions = "1.4.1"
|
||||
serde = { version = "1" }
|
||||
serde_json = "1"
|
||||
shlex = "1.3.0"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1", features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
"process",
|
||||
"rt-multi-thread",
|
||||
] }
|
||||
uuid = { version = "1", features = ["serde", "v4"] }
|
||||
wiremock = "0.6"
|
||||
|
||||
@@ -13,6 +13,7 @@ use anyhow::Context;
|
||||
use assert_cmd::prelude::*;
|
||||
use codex_mcp_server::CodexToolCallParam;
|
||||
use codex_protocol::mcp_protocol::AddConversationListenerParams;
|
||||
use codex_protocol::mcp_protocol::ArchiveConversationParams;
|
||||
use codex_protocol::mcp_protocol::CancelLoginChatGptParams;
|
||||
use codex_protocol::mcp_protocol::GetAuthStatusParams;
|
||||
use codex_protocol::mcp_protocol::InterruptConversationParams;
|
||||
@@ -53,6 +54,18 @@ pub struct McpProcess {
|
||||
|
||||
impl McpProcess {
|
||||
pub async fn new(codex_home: &Path) -> anyhow::Result<Self> {
|
||||
Self::new_with_env(codex_home, &[]).await
|
||||
}
|
||||
|
||||
/// Creates a new MCP process, allowing tests to override or remove
|
||||
/// specific environment variables for the child process only.
|
||||
///
|
||||
/// Pass a tuple of (key, Some(value)) to set/override, or (key, None) to
|
||||
/// remove a variable from the child's environment.
|
||||
pub async fn new_with_env(
|
||||
codex_home: &Path,
|
||||
env_overrides: &[(&str, Option<&str>)],
|
||||
) -> anyhow::Result<Self> {
|
||||
// Use assert_cmd to locate the binary path and then switch to tokio::process::Command
|
||||
let std_cmd = StdCommand::cargo_bin("codex-mcp-server")
|
||||
.context("should find binary for codex-mcp-server")?;
|
||||
@@ -67,6 +80,17 @@ impl McpProcess {
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
cmd.env("RUST_LOG", "debug");
|
||||
|
||||
for (k, v) in env_overrides {
|
||||
match v {
|
||||
Some(val) => {
|
||||
cmd.env(k, val);
|
||||
}
|
||||
None => {
|
||||
cmd.env_remove(k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut process = cmd
|
||||
.kill_on_drop(true)
|
||||
.spawn()
|
||||
@@ -114,6 +138,7 @@ impl McpProcess {
|
||||
name: "elicitation test".into(),
|
||||
title: Some("Elicitation Test".into()),
|
||||
version: "0.0.0".into(),
|
||||
user_agent: None,
|
||||
},
|
||||
protocol_version: mcp_types::MCP_SCHEMA_VERSION.into(),
|
||||
};
|
||||
@@ -128,6 +153,14 @@ impl McpProcess {
|
||||
.await?;
|
||||
|
||||
let initialized = self.read_jsonrpc_message().await?;
|
||||
let os_info = os_info::get();
|
||||
let user_agent = format!(
|
||||
"codex_cli_rs/0.0.0 ({} {}; {}) {} (elicitation test; 0.0.0)",
|
||||
os_info.os_type(),
|
||||
os_info.version(),
|
||||
os_info.architecture().unwrap_or("unknown"),
|
||||
codex_core::terminal::user_agent()
|
||||
);
|
||||
assert_eq!(
|
||||
JSONRPCMessage::Response(JSONRPCResponse {
|
||||
jsonrpc: JSONRPC_VERSION.into(),
|
||||
@@ -141,7 +174,8 @@ impl McpProcess {
|
||||
"serverInfo": {
|
||||
"name": "codex-mcp-server",
|
||||
"title": "Codex",
|
||||
"version": "0.0.0"
|
||||
"version": "0.0.0",
|
||||
"user_agent": user_agent
|
||||
},
|
||||
"protocolVersion": mcp_types::MCP_SCHEMA_VERSION
|
||||
})
|
||||
@@ -186,6 +220,15 @@ impl McpProcess {
|
||||
self.send_request("newConversation", params).await
|
||||
}
|
||||
|
||||
/// Send an `archiveConversation` JSON-RPC request.
|
||||
pub async fn send_archive_conversation_request(
|
||||
&mut self,
|
||||
params: ArchiveConversationParams,
|
||||
) -> anyhow::Result<i64> {
|
||||
let params = Some(serde_json::to_value(params)?);
|
||||
self.send_request("archiveConversation", params).await
|
||||
}
|
||||
|
||||
/// Send an `addConversationListener` JSON-RPC request.
|
||||
pub async fn send_add_conversation_listener_request(
|
||||
&mut self,
|
||||
@@ -252,6 +295,11 @@ impl McpProcess {
|
||||
self.send_request("getUserAgent", None).await
|
||||
}
|
||||
|
||||
/// Send a `userInfo` JSON-RPC request.
|
||||
pub async fn send_user_info_request(&mut self) -> anyhow::Result<i64> {
|
||||
self.send_request("userInfo", None).await
|
||||
}
|
||||
|
||||
/// Send a `listConversations` JSON-RPC request.
|
||||
pub async fn send_list_conversations_request(
|
||||
&mut self,
|
||||
|
||||
105
codex-rs/mcp-server/tests/suite/archive_conversation.rs
Normal file
105
codex-rs/mcp-server/tests/suite/archive_conversation.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
use std::path::Path;
|
||||
|
||||
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
|
||||
use codex_protocol::mcp_protocol::ArchiveConversationParams;
|
||||
use codex_protocol::mcp_protocol::ArchiveConversationResponse;
|
||||
use codex_protocol::mcp_protocol::NewConversationParams;
|
||||
use codex_protocol::mcp_protocol::NewConversationResponse;
|
||||
use mcp_test_support::McpProcess;
|
||||
use mcp_test_support::to_response;
|
||||
use mcp_types::JSONRPCResponse;
|
||||
use mcp_types::RequestId;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn archive_conversation_moves_rollout_into_archived_directory() {
|
||||
let codex_home = TempDir::new().expect("create temp dir");
|
||||
create_config_toml(codex_home.path()).expect("write config.toml");
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
.expect("spawn mcp process");
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
.await
|
||||
.expect("initialize timeout")
|
||||
.expect("initialize request");
|
||||
|
||||
let new_request_id = mcp
|
||||
.send_new_conversation_request(NewConversationParams {
|
||||
model: Some("mock-model".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.expect("send newConversation");
|
||||
let new_response: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(new_request_id)),
|
||||
)
|
||||
.await
|
||||
.expect("newConversation timeout")
|
||||
.expect("newConversation response");
|
||||
|
||||
let NewConversationResponse {
|
||||
conversation_id,
|
||||
rollout_path,
|
||||
..
|
||||
} = to_response::<NewConversationResponse>(new_response)
|
||||
.expect("deserialize newConversation response");
|
||||
|
||||
assert!(
|
||||
rollout_path.exists(),
|
||||
"expected rollout path {} to exist",
|
||||
rollout_path.display()
|
||||
);
|
||||
|
||||
let archive_request_id = mcp
|
||||
.send_archive_conversation_request(ArchiveConversationParams {
|
||||
conversation_id,
|
||||
rollout_path: rollout_path.clone(),
|
||||
})
|
||||
.await
|
||||
.expect("send archiveConversation");
|
||||
let archive_response: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(archive_request_id)),
|
||||
)
|
||||
.await
|
||||
.expect("archiveConversation timeout")
|
||||
.expect("archiveConversation response");
|
||||
|
||||
let _: ArchiveConversationResponse =
|
||||
to_response::<ArchiveConversationResponse>(archive_response)
|
||||
.expect("deserialize archiveConversation response");
|
||||
|
||||
let archived_directory = codex_home.path().join(ARCHIVED_SESSIONS_SUBDIR);
|
||||
let archived_rollout_path =
|
||||
archived_directory.join(rollout_path.file_name().unwrap_or_else(|| {
|
||||
panic!("rollout path {} missing file name", rollout_path.display())
|
||||
}));
|
||||
|
||||
assert!(
|
||||
!rollout_path.exists(),
|
||||
"expected rollout path {} to be moved",
|
||||
rollout_path.display()
|
||||
);
|
||||
assert!(
|
||||
archived_rollout_path.exists(),
|
||||
"expected archived rollout path {} to exist",
|
||||
archived_rollout_path.display()
|
||||
);
|
||||
}
|
||||
|
||||
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(config_toml, config_contents())
|
||||
}
|
||||
|
||||
fn config_contents() -> &'static str {
|
||||
r#"model = "mock-model"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
"#
|
||||
}
|
||||
@@ -41,7 +41,7 @@ async fn get_auth_status_no_auth() {
|
||||
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
|
||||
create_config_toml(codex_home.path()).expect("write config.toml");
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)])
|
||||
.await
|
||||
.expect("spawn mcp process");
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
|
||||
@@ -90,6 +90,7 @@ async fn test_codex_jsonrpc_conversation_flow() {
|
||||
let NewConversationResponse {
|
||||
conversation_id,
|
||||
model,
|
||||
rollout_path: _,
|
||||
} = new_conv_resp;
|
||||
assert_eq!(model, "mock-model");
|
||||
|
||||
|
||||
@@ -59,6 +59,7 @@ async fn test_conversation_create_and_send_message_ok() {
|
||||
let NewConversationResponse {
|
||||
conversation_id,
|
||||
model,
|
||||
rollout_path: _,
|
||||
} = to_response::<NewConversationResponse>(new_conv_resp)
|
||||
.expect("deserialize newConversation response");
|
||||
assert_eq!(model, "o3");
|
||||
|
||||
@@ -156,14 +156,45 @@ fn create_fake_rollout(codex_home: &Path, filename_ts: &str, meta_rfc3339: &str,
|
||||
|
||||
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
|
||||
let mut lines = Vec::new();
|
||||
// Meta line with timestamp
|
||||
lines.push(json!({"timestamp": meta_rfc3339, "id": uuid}).to_string());
|
||||
// Minimal user message entry as a persisted response item
|
||||
// Meta line with timestamp (flattened meta in payload for new schema)
|
||||
lines.push(
|
||||
json!({
|
||||
"type":"message",
|
||||
"role":"user",
|
||||
"content":[{"type":"input_text","text": preview}]
|
||||
"timestamp": meta_rfc3339,
|
||||
"type": "session_meta",
|
||||
"payload": {
|
||||
"id": uuid,
|
||||
"timestamp": meta_rfc3339,
|
||||
"cwd": "/",
|
||||
"originator": "codex",
|
||||
"cli_version": "0.0.0",
|
||||
"instructions": null
|
||||
}
|
||||
})
|
||||
.to_string(),
|
||||
);
|
||||
// Minimal user message entry as a persisted response item (with envelope timestamp)
|
||||
lines.push(
|
||||
json!({
|
||||
"timestamp": meta_rfc3339,
|
||||
"type":"response_item",
|
||||
"payload": {
|
||||
"type":"message",
|
||||
"role":"user",
|
||||
"content":[{"type":"input_text","text": preview}]
|
||||
}
|
||||
})
|
||||
.to_string(),
|
||||
);
|
||||
// Add a matching user message event line to satisfy filters
|
||||
lines.push(
|
||||
json!({
|
||||
"timestamp": meta_rfc3339,
|
||||
"type":"event_msg",
|
||||
"payload": {
|
||||
"type":"user_message",
|
||||
"message": preview,
|
||||
"kind": "plain"
|
||||
}
|
||||
})
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
@@ -46,7 +46,7 @@ async fn logout_chatgpt_removes_auth() {
|
||||
login_with_api_key(codex_home.path(), "sk-test-key").expect("seed api key");
|
||||
assert!(codex_home.path().join("auth.json").exists());
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)])
|
||||
.await
|
||||
.expect("spawn mcp process");
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// Aggregates all former standalone integration tests as modules.
|
||||
mod archive_conversation;
|
||||
mod auth;
|
||||
mod codex_message_processor_flow;
|
||||
mod codex_tool;
|
||||
@@ -9,3 +10,4 @@ mod list_resume;
|
||||
mod login;
|
||||
mod send_message;
|
||||
mod user_agent;
|
||||
mod user_info;
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use codex_core::default_client::DEFAULT_ORIGINATOR;
|
||||
use codex_core::default_client::get_codex_user_agent;
|
||||
use codex_protocol::mcp_protocol::GetUserAgentResponse;
|
||||
use mcp_test_support::McpProcess;
|
||||
use mcp_test_support::to_response;
|
||||
@@ -35,11 +33,18 @@ async fn get_user_agent_returns_current_codex_user_agent() {
|
||||
.expect("getUserAgent timeout")
|
||||
.expect("getUserAgent response");
|
||||
|
||||
let os_info = os_info::get();
|
||||
let user_agent = format!(
|
||||
"codex_cli_rs/0.0.0 ({} {}; {}) {} (elicitation test; 0.0.0)",
|
||||
os_info.os_type(),
|
||||
os_info.version(),
|
||||
os_info.architecture().unwrap_or("unknown"),
|
||||
codex_core::terminal::user_agent()
|
||||
);
|
||||
|
||||
let received: GetUserAgentResponse =
|
||||
to_response(response).expect("deserialize getUserAgent response");
|
||||
let expected = GetUserAgentResponse {
|
||||
user_agent: get_codex_user_agent(Some(DEFAULT_ORIGINATOR)),
|
||||
};
|
||||
let expected = GetUserAgentResponse { user_agent };
|
||||
|
||||
assert_eq!(received, expected);
|
||||
}
|
||||
|
||||
78
codex-rs/mcp-server/tests/suite/user_info.rs
Normal file
78
codex-rs/mcp-server/tests/suite/user_info.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
|
||||
use codex_core::auth::AuthDotJson;
|
||||
use codex_core::auth::get_auth_file;
|
||||
use codex_core::auth::write_auth_json;
|
||||
use codex_core::token_data::IdTokenInfo;
|
||||
use codex_core::token_data::TokenData;
|
||||
use codex_protocol::mcp_protocol::UserInfoResponse;
|
||||
use mcp_test_support::McpProcess;
|
||||
use mcp_test_support::to_response;
|
||||
use mcp_types::JSONRPCResponse;
|
||||
use mcp_types::RequestId;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn user_info_returns_email_from_auth_json() {
|
||||
let codex_home = TempDir::new().expect("create tempdir");
|
||||
|
||||
let auth_path = get_auth_file(codex_home.path());
|
||||
let mut id_token = IdTokenInfo::default();
|
||||
id_token.email = Some("user@example.com".to_string());
|
||||
id_token.raw_jwt = encode_id_token_with_email("user@example.com").expect("encode id token");
|
||||
|
||||
let auth = AuthDotJson {
|
||||
openai_api_key: None,
|
||||
tokens: Some(TokenData {
|
||||
id_token,
|
||||
access_token: "access".to_string(),
|
||||
refresh_token: "refresh".to_string(),
|
||||
account_id: None,
|
||||
}),
|
||||
last_refresh: None,
|
||||
};
|
||||
write_auth_json(&auth_path, &auth).expect("write auth.json");
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
.expect("spawn mcp process");
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
.await
|
||||
.expect("initialize timeout")
|
||||
.expect("initialize request");
|
||||
|
||||
let request_id = mcp.send_user_info_request().await.expect("send userInfo");
|
||||
let response: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await
|
||||
.expect("userInfo timeout")
|
||||
.expect("userInfo response");
|
||||
|
||||
let received: UserInfoResponse = to_response(response).expect("deserialize userInfo response");
|
||||
let expected = UserInfoResponse {
|
||||
alleged_user_email: Some("user@example.com".to_string()),
|
||||
};
|
||||
|
||||
assert_eq!(received, expected);
|
||||
}
|
||||
|
||||
fn encode_id_token_with_email(email: &str) -> anyhow::Result<String> {
|
||||
let header_b64 = URL_SAFE_NO_PAD.encode(
|
||||
serde_json::to_vec(&json!({ "alg": "none", "typ": "JWT" }))
|
||||
.context("serialize jwt header")?,
|
||||
);
|
||||
let payload =
|
||||
serde_json::to_vec(&json!({ "email": email })).context("serialize jwt payload")?;
|
||||
let payload_b64 = URL_SAFE_NO_PAD.encode(payload);
|
||||
Ok(format!("{header_b64}.{payload_b64}.signature"))
|
||||
}
|
||||
@@ -265,8 +265,11 @@ class StructField:
|
||||
name: str
|
||||
type_name: str
|
||||
serde: str | None = None
|
||||
comment: str | None = None
|
||||
|
||||
def append(self, out: list[str], supports_const: bool) -> None:
|
||||
if self.comment:
|
||||
out.append(f" // {self.comment}\n")
|
||||
if self.serde:
|
||||
out.append(f" {self.serde}\n")
|
||||
if self.viz == "const":
|
||||
@@ -312,6 +315,18 @@ def define_struct(
|
||||
else:
|
||||
fields.append(StructField("pub", rs_prop.name, prop_type, rs_prop.serde))
|
||||
|
||||
# Special-case: add Codex-specific user_agent to Implementation
|
||||
if name == "Implementation":
|
||||
fields.append(
|
||||
StructField(
|
||||
"pub",
|
||||
"user_agent",
|
||||
"Option<String>",
|
||||
'#[serde(default, skip_serializing_if = "Option::is_none")]',
|
||||
"This is an extra field that the Codex MCP server sends as part of InitializeResult.",
|
||||
)
|
||||
)
|
||||
|
||||
if implements_request_trait(name):
|
||||
add_trait_impl(name, "ModelContextProtocolRequest", fields, out)
|
||||
elif implements_notification_trait(name):
|
||||
|
||||
@@ -487,6 +487,9 @@ pub struct Implementation {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub title: Option<String>,
|
||||
pub version: String,
|
||||
// This is an extra field that the Codex MCP server sends as part of InitializeResult.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub user_agent: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, TS)]
|
||||
|
||||
@@ -62,6 +62,7 @@ fn deserialize_initialize_request() {
|
||||
name: "acme-client".into(),
|
||||
title: Some("Acme".to_string()),
|
||||
version: "1.2.3".into(),
|
||||
user_agent: None,
|
||||
},
|
||||
protocol_version: "2025-06-18".into(),
|
||||
}
|
||||
|
||||
@@ -24,9 +24,7 @@ tokio = { version = "1", features = [
|
||||
"rt-multi-thread",
|
||||
"signal",
|
||||
] }
|
||||
toml = "0.9.5"
|
||||
tracing = { version = "0.1.41", features = ["log"] }
|
||||
wiremock = "0.6"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
|
||||
@@ -16,6 +16,7 @@ path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
mcp-types = { path = "../mcp-types" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
ts-rs = "11"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
|
||||
@@ -16,11 +16,15 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
|
||||
ensure_dir(out_dir)?;
|
||||
|
||||
// Generate TS bindings
|
||||
mcp_types::InitializeResult::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ConversationId::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::InputItem::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ClientRequest::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ServerRequest::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::NewConversationResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ListConversationsResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ResumeConversationResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ArchiveConversationResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::AddConversationSubscriptionResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::RemoveConversationSubscriptionResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::SendUserMessageResponse::export_all_to(out_dir)?;
|
||||
@@ -28,7 +32,6 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
|
||||
codex_protocol::mcp_protocol::InterruptConversationResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::GitDiffToRemoteResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::LoginChatGptResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::LoginChatGptCompleteNotification::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::CancelLoginChatGptResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::LogoutChatGptResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::GetAuthStatusResponse::export_all_to(out_dir)?;
|
||||
@@ -36,9 +39,11 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
|
||||
codex_protocol::mcp_protocol::ExecCommandApprovalResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::GetUserSavedConfigResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::GetUserAgentResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::UserInfoResponse::export_all_to(out_dir)?;
|
||||
|
||||
// All notification types reachable from this enum will be generated by
|
||||
// induction, so they do not need to be listed individually.
|
||||
codex_protocol::mcp_protocol::ServerNotification::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ListConversationsResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ResumeConversationResponse::export_all_to(out_dir)?;
|
||||
|
||||
generate_index_ts(out_dir)?;
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ icu_locale_core = "2.0.0"
|
||||
mcp-types = { path = "../mcp-types" }
|
||||
mime_guess = "2.0.5"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_bytes = "0.11"
|
||||
serde_json = "1"
|
||||
serde_with = { version = "3.14.0", features = ["macros", "base64"] }
|
||||
strum = "0.27.2"
|
||||
@@ -29,3 +28,8 @@ uuid = { version = "1", features = ["serde", "v4"] }
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = "1.4.1"
|
||||
tempfile = "3"
|
||||
|
||||
[package.metadata.cargo-shear]
|
||||
# Required because the not imported as strum_macros in non-nightly builds.
|
||||
ignored = ["strum"]
|
||||
|
||||
@@ -91,6 +91,11 @@ pub enum ClientRequest {
|
||||
request_id: RequestId,
|
||||
params: ResumeConversationParams,
|
||||
},
|
||||
ArchiveConversation {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
params: ArchiveConversationParams,
|
||||
},
|
||||
SendUserMessage {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
@@ -147,6 +152,10 @@ pub enum ClientRequest {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
},
|
||||
UserInfo {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
},
|
||||
/// Execute a command (argv vector) under the server's sandbox.
|
||||
ExecOneOffCommand {
|
||||
#[serde(rename = "id")]
|
||||
@@ -203,6 +212,7 @@ pub struct NewConversationParams {
|
||||
pub struct NewConversationResponse {
|
||||
pub conversation_id: ConversationId,
|
||||
pub model: String,
|
||||
pub rollout_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, TS)]
|
||||
@@ -262,6 +272,18 @@ pub struct AddConversationSubscriptionResponse {
|
||||
pub subscription_id: Uuid,
|
||||
}
|
||||
|
||||
/// The [`ConversationId`] must match the `rollout_path`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ArchiveConversationParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub rollout_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ArchiveConversationResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RemoveConversationSubscriptionResponse {}
|
||||
@@ -356,6 +378,16 @@ pub struct GetUserAgentResponse {
|
||||
pub user_agent: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UserInfoResponse {
|
||||
/// Note: `alleged_user_email` is not currently verified. We read it from
|
||||
/// the local auth.json, which the user could theoretically modify. In the
|
||||
/// future, we may add logic to verify the email against the server before
|
||||
/// returning it.
|
||||
pub alleged_user_email: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetUserSavedConfigResponse {
|
||||
|
||||
@@ -49,7 +49,7 @@ pub enum ResponseItem {
|
||||
content: Vec<ContentItem>,
|
||||
},
|
||||
Reasoning {
|
||||
#[serde(default)]
|
||||
#[serde(default, skip_serializing)]
|
||||
id: String,
|
||||
summary: Vec<ReasoningItemReasoningSummary>,
|
||||
#[serde(default, skip_serializing_if = "should_serialize_reasoning_content")]
|
||||
@@ -115,7 +115,6 @@ pub enum ResponseItem {
|
||||
status: Option<String>,
|
||||
action: WebSearchAction,
|
||||
},
|
||||
|
||||
#[serde(other)]
|
||||
Other,
|
||||
}
|
||||
|
||||
@@ -149,7 +149,7 @@ pub enum Op {
|
||||
|
||||
/// Request the full in-memory conversation transcript for the current session.
|
||||
/// Reply is delivered via `EventMsg::ConversationHistory`.
|
||||
GetHistory,
|
||||
GetPath,
|
||||
|
||||
/// Request the list of MCP tools available across all configured servers.
|
||||
/// Reply is delivered via `EventMsg::McpListToolsResponse`.
|
||||
@@ -499,7 +499,7 @@ pub enum EventMsg {
|
||||
/// Notification that the agent is shutting down.
|
||||
ShutdownComplete,
|
||||
|
||||
ConversationHistory(ConversationHistoryResponseEvent),
|
||||
ConversationPath(ConversationPathResponseEvent),
|
||||
}
|
||||
|
||||
// Individual event payload types matching each `EventMsg` variant.
|
||||
@@ -695,6 +695,8 @@ pub struct UserMessageEvent {
|
||||
pub message: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub kind: Option<InputMessageKind>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub images: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
impl<T, U> From<(T, U)> for InputMessageKind
|
||||
@@ -799,9 +801,123 @@ pub struct WebSearchEndEvent {
|
||||
/// Response payload for `Op::GetHistory` containing the current session's
|
||||
/// in-memory transcript.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
|
||||
pub struct ConversationHistoryResponseEvent {
|
||||
pub struct ConversationPathResponseEvent {
|
||||
pub conversation_id: ConversationId,
|
||||
pub entries: Vec<ResponseItem>,
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
|
||||
pub struct ResumedHistory {
|
||||
pub conversation_id: ConversationId,
|
||||
pub history: Vec<RolloutItem>,
|
||||
pub rollout_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
|
||||
pub enum InitialHistory {
|
||||
New,
|
||||
Resumed(ResumedHistory),
|
||||
Forked(Vec<RolloutItem>),
|
||||
}
|
||||
|
||||
impl InitialHistory {
|
||||
pub fn get_rollout_items(&self) -> Vec<RolloutItem> {
|
||||
match self {
|
||||
InitialHistory::New => Vec::new(),
|
||||
InitialHistory::Resumed(resumed) => resumed.history.clone(),
|
||||
InitialHistory::Forked(items) => items.clone(),
|
||||
}
|
||||
}
|
||||
pub fn get_response_items(&self) -> Vec<ResponseItem> {
|
||||
match self {
|
||||
InitialHistory::New => Vec::new(),
|
||||
InitialHistory::Resumed(resumed) => resumed
|
||||
.history
|
||||
.iter()
|
||||
.filter_map(|ri| match ri {
|
||||
RolloutItem::ResponseItem(item) => Some(item.clone()),
|
||||
_ => None,
|
||||
})
|
||||
.collect(),
|
||||
InitialHistory::Forked(items) => items
|
||||
.iter()
|
||||
.filter_map(|ri| match ri {
|
||||
RolloutItem::ResponseItem(item) => Some(item.clone()),
|
||||
_ => None,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
pub fn get_event_msgs(&self) -> Option<Vec<EventMsg>> {
|
||||
match self {
|
||||
InitialHistory::New => None,
|
||||
InitialHistory::Resumed(resumed) => Some(
|
||||
resumed
|
||||
.history
|
||||
.iter()
|
||||
.filter_map(|ri| match ri {
|
||||
RolloutItem::EventMsg(ev) => Some(ev.clone()),
|
||||
_ => None,
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
InitialHistory::Forked(items) => Some(
|
||||
items
|
||||
.iter()
|
||||
.filter_map(|ri| match ri {
|
||||
RolloutItem::EventMsg(ev) => Some(ev.clone()),
|
||||
_ => None,
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Default, Debug, TS)]
|
||||
pub struct SessionMeta {
|
||||
pub id: ConversationId,
|
||||
pub timestamp: String,
|
||||
pub cwd: PathBuf,
|
||||
pub originator: String,
|
||||
pub cli_version: String,
|
||||
pub instructions: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, TS)]
|
||||
pub struct SessionMetaLine {
|
||||
#[serde(flatten)]
|
||||
pub meta: SessionMeta,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub git: Option<GitInfo>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, TS)]
|
||||
#[serde(tag = "type", content = "payload", rename_all = "snake_case")]
|
||||
pub enum RolloutItem {
|
||||
SessionMeta(SessionMetaLine),
|
||||
ResponseItem(ResponseItem),
|
||||
EventMsg(EventMsg),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
pub struct RolloutLine {
|
||||
pub timestamp: String,
|
||||
#[serde(flatten)]
|
||||
pub item: RolloutItem,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, TS)]
|
||||
pub struct GitInfo {
|
||||
/// Current commit hash (SHA)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub commit_hash: Option<String>,
|
||||
/// Current branch name
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub branch: Option<String>,
|
||||
/// Repository URL (if available from remote)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub repository_url: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
|
||||
@@ -958,6 +1074,8 @@ pub struct SessionConfiguredEvent {
|
||||
/// When present, UIs can use these to seed the history.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub initial_messages: Option<Vec<EventMsg>>,
|
||||
|
||||
pub rollout_path: PathBuf,
|
||||
}
|
||||
|
||||
/// User's decision in response to an ExecApprovalRequest.
|
||||
@@ -1020,12 +1138,15 @@ pub enum TurnAbortReason {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde_json::json;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
/// Serialize Event to verify that its JSON representation has the expected
|
||||
/// amount of nesting.
|
||||
#[test]
|
||||
fn serialize_event() {
|
||||
let conversation_id = ConversationId(uuid::uuid!("67e55044-10b1-426f-9247-bb680e5fe0c8"));
|
||||
let rollout_file = NamedTempFile::new().unwrap();
|
||||
let event = Event {
|
||||
id: "1234".to_string(),
|
||||
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
|
||||
@@ -1034,13 +1155,22 @@ mod tests {
|
||||
history_log_id: 0,
|
||||
history_entry_count: 0,
|
||||
initial_messages: None,
|
||||
rollout_path: rollout_file.path().to_path_buf(),
|
||||
}),
|
||||
};
|
||||
let serialized = serde_json::to_string(&event).unwrap();
|
||||
assert_eq!(
|
||||
serialized,
|
||||
r#"{"id":"1234","msg":{"type":"session_configured","session_id":"67e55044-10b1-426f-9247-bb680e5fe0c8","model":"codex-mini-latest","history_log_id":0,"history_entry_count":0}}"#
|
||||
);
|
||||
|
||||
let expected = json!({
|
||||
"id": "1234",
|
||||
"msg": {
|
||||
"type": "session_configured",
|
||||
"session_id": "67e55044-10b1-426f-9247-bb680e5fe0c8",
|
||||
"model": "codex-mini-latest",
|
||||
"history_log_id": 0,
|
||||
"history_entry_count": 0,
|
||||
"rollout_path": format!("{}", rollout_file.path().display()),
|
||||
}
|
||||
});
|
||||
assert_eq!(expected, serde_json::to_value(&event).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -59,9 +59,7 @@ ratatui = { version = "0.29.0", features = [
|
||||
"unstable-rendered-line-info",
|
||||
"unstable-widget-ref",
|
||||
] }
|
||||
ratatui-image = "8.0.0"
|
||||
regex-lite = "0.1"
|
||||
reqwest = { version = "0.12", features = ["json"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = { version = "1", features = ["preserve_order"] }
|
||||
shlex = "1.3.0"
|
||||
@@ -81,12 +79,10 @@ tokio-stream = "0.1.17"
|
||||
tracing = { version = "0.1.41", features = ["log"] }
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
tui-input = "0.14.0"
|
||||
tui-markdown = "0.3.3"
|
||||
pulldown-cmark = "0.10"
|
||||
unicode-segmentation = "1.12.0"
|
||||
unicode-width = "0.1"
|
||||
url = "2"
|
||||
uuid = "1"
|
||||
pathdiff = "0.2"
|
||||
|
||||
[target.'cfg(unix)'.dependencies]
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::app::App;
|
||||
use crate::backtrack_helpers;
|
||||
use crate::pager_overlay::Overlay;
|
||||
use crate::tui;
|
||||
use crate::tui::TuiEvent;
|
||||
use codex_core::protocol::ConversationHistoryResponseEvent;
|
||||
use codex_core::protocol::ConversationPathResponseEvent;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use color_eyre::eyre::Result;
|
||||
use crossterm::event::KeyCode;
|
||||
@@ -98,7 +100,7 @@ impl App {
|
||||
) {
|
||||
self.backtrack.pending = Some((base_id, drop_last_messages, prefill));
|
||||
self.app_event_tx.send(crate::app_event::AppEvent::CodexOp(
|
||||
codex_core::protocol::Op::GetHistory,
|
||||
codex_core::protocol::Op::GetPath,
|
||||
));
|
||||
}
|
||||
|
||||
@@ -265,7 +267,7 @@ impl App {
|
||||
pub(crate) async fn on_conversation_history_for_backtrack(
|
||||
&mut self,
|
||||
tui: &mut tui::Tui,
|
||||
ev: ConversationHistoryResponseEvent,
|
||||
ev: ConversationPathResponseEvent,
|
||||
) -> Result<()> {
|
||||
if let Some((base_id, _, _)) = self.backtrack.pending.as_ref()
|
||||
&& ev.conversation_id == *base_id
|
||||
@@ -281,14 +283,14 @@ impl App {
|
||||
async fn fork_and_switch_to_new_conversation(
|
||||
&mut self,
|
||||
tui: &mut tui::Tui,
|
||||
ev: ConversationHistoryResponseEvent,
|
||||
ev: ConversationPathResponseEvent,
|
||||
drop_count: usize,
|
||||
prefill: String,
|
||||
) {
|
||||
let cfg = self.chat_widget.config_ref().clone();
|
||||
// Perform the fork via a thin wrapper for clarity/testability.
|
||||
let result = self
|
||||
.perform_fork(ev.entries.clone(), drop_count, cfg.clone())
|
||||
.perform_fork(ev.path.clone(), drop_count, cfg.clone())
|
||||
.await;
|
||||
match result {
|
||||
Ok(new_conv) => {
|
||||
@@ -301,13 +303,11 @@ impl App {
|
||||
/// Thin wrapper around ConversationManager::fork_conversation.
|
||||
async fn perform_fork(
|
||||
&self,
|
||||
entries: Vec<codex_protocol::models::ResponseItem>,
|
||||
path: PathBuf,
|
||||
drop_count: usize,
|
||||
cfg: codex_core::config::Config,
|
||||
) -> codex_core::error::Result<codex_core::NewConversation> {
|
||||
self.server
|
||||
.fork_conversation(entries, drop_count, cfg)
|
||||
.await
|
||||
self.server.fork_conversation(drop_count, cfg, path).await
|
||||
}
|
||||
|
||||
/// Install a forked conversation into the ChatWidget and update UI to reflect selection.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use codex_core::protocol::ConversationHistoryResponseEvent;
|
||||
use codex_core::protocol::ConversationPathResponseEvent;
|
||||
use codex_core::protocol::Event;
|
||||
use codex_file_search::FileMatch;
|
||||
|
||||
@@ -58,5 +58,5 @@ pub(crate) enum AppEvent {
|
||||
UpdateSandboxPolicy(SandboxPolicy),
|
||||
|
||||
/// Forwarded conversation history snapshot from the current conversation.
|
||||
ConversationHistory(ConversationHistoryResponseEvent),
|
||||
ConversationHistory(ConversationPathResponseEvent),
|
||||
}
|
||||
|
||||
15
codex-rs/tui/src/bin/md-events.rs
Normal file
15
codex-rs/tui/src/bin/md-events.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
use std::io::Read;
|
||||
use std::io::{self};
|
||||
|
||||
fn main() {
|
||||
let mut input = String::new();
|
||||
if let Err(err) = io::stdin().read_to_string(&mut input) {
|
||||
eprintln!("failed to read stdin: {err}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
let parser = pulldown_cmark::Parser::new(&input);
|
||||
for event in parser {
|
||||
println!("{event:?}");
|
||||
}
|
||||
}
|
||||
@@ -252,6 +252,11 @@ impl TextArea {
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
} => self.delete_backward(1),
|
||||
KeyEvent {
|
||||
code: KeyCode::Delete,
|
||||
modifiers: KeyModifiers::ALT,
|
||||
..
|
||||
} => self.delete_forward_word(),
|
||||
KeyEvent {
|
||||
code: KeyCode::Delete,
|
||||
..
|
||||
@@ -435,6 +440,18 @@ impl TextArea {
|
||||
self.replace_range(start..self.cursor_pos, "");
|
||||
}
|
||||
|
||||
/// Delete text to the right of the cursor using "word" semantics.
|
||||
///
|
||||
/// Deletes from the current cursor position through the end of the next word as determined
|
||||
/// by `end_of_next_word()`. Any whitespace (including newlines) between the cursor and that
|
||||
/// word is included in the deletion.
|
||||
pub fn delete_forward_word(&mut self) {
|
||||
let end = self.end_of_next_word();
|
||||
if end > self.cursor_pos {
|
||||
self.replace_range(self.cursor_pos..end, "");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn kill_to_end_of_line(&mut self) {
|
||||
let eol = self.end_of_current_line();
|
||||
if self.cursor_pos == eol {
|
||||
@@ -1104,6 +1121,79 @@ mod tests {
|
||||
assert_eq!(t.cursor(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delete_forward_word_variants() {
|
||||
let mut t = ta_with("hello world ");
|
||||
t.set_cursor(0);
|
||||
t.delete_forward_word();
|
||||
assert_eq!(t.text(), " world ");
|
||||
assert_eq!(t.cursor(), 0);
|
||||
|
||||
let mut t = ta_with("hello world ");
|
||||
t.set_cursor(1);
|
||||
t.delete_forward_word();
|
||||
assert_eq!(t.text(), "h world ");
|
||||
assert_eq!(t.cursor(), 1);
|
||||
|
||||
let mut t = ta_with("hello world");
|
||||
t.set_cursor(t.text().len());
|
||||
t.delete_forward_word();
|
||||
assert_eq!(t.text(), "hello world");
|
||||
assert_eq!(t.cursor(), t.text().len());
|
||||
|
||||
let mut t = ta_with("foo \nbar");
|
||||
t.set_cursor(3);
|
||||
t.delete_forward_word();
|
||||
assert_eq!(t.text(), "foo");
|
||||
assert_eq!(t.cursor(), 3);
|
||||
|
||||
let mut t = ta_with("foo\nbar");
|
||||
t.set_cursor(3);
|
||||
t.delete_forward_word();
|
||||
assert_eq!(t.text(), "foo");
|
||||
assert_eq!(t.cursor(), 3);
|
||||
|
||||
let mut t = ta_with("hello world ");
|
||||
t.set_cursor(t.text().len() + 10);
|
||||
t.delete_forward_word();
|
||||
assert_eq!(t.text(), "hello world ");
|
||||
assert_eq!(t.cursor(), t.text().len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delete_forward_word_handles_atomic_elements() {
|
||||
let mut t = TextArea::new();
|
||||
t.insert_element("<element>");
|
||||
t.insert_str(" tail");
|
||||
|
||||
t.set_cursor(0);
|
||||
t.delete_forward_word();
|
||||
assert_eq!(t.text(), " tail");
|
||||
assert_eq!(t.cursor(), 0);
|
||||
|
||||
let mut t = TextArea::new();
|
||||
t.insert_str(" ");
|
||||
t.insert_element("<element>");
|
||||
t.insert_str(" tail");
|
||||
|
||||
t.set_cursor(0);
|
||||
t.delete_forward_word();
|
||||
assert_eq!(t.text(), " tail");
|
||||
assert_eq!(t.cursor(), 0);
|
||||
|
||||
let mut t = TextArea::new();
|
||||
t.insert_str("prefix ");
|
||||
t.insert_element("<element>");
|
||||
t.insert_str(" tail");
|
||||
|
||||
// cursor in the middle of the element, delete_forward_word deletes the element
|
||||
let elem_range = t.elements[0].range.clone();
|
||||
t.cursor_pos = elem_range.start + (elem_range.len() / 2);
|
||||
t.delete_forward_word();
|
||||
assert_eq!(t.text(), "prefix tail");
|
||||
assert_eq!(t.cursor(), elem_range.start);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cursor_left_and_right_handle_graphemes() {
|
||||
let mut t = ta_with("a👍b");
|
||||
@@ -1174,6 +1264,21 @@ mod tests {
|
||||
assert_eq!(t.cursor(), 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delete_forward_word_with_without_alt_modifier() {
|
||||
let mut t = ta_with("hello world");
|
||||
t.set_cursor(0);
|
||||
t.input(KeyEvent::new(KeyCode::Delete, KeyModifiers::ALT));
|
||||
assert_eq!(t.text(), " world");
|
||||
assert_eq!(t.cursor(), 0);
|
||||
|
||||
let mut t = ta_with("hello");
|
||||
t.set_cursor(0);
|
||||
t.input(KeyEvent::new(KeyCode::Delete, KeyModifiers::NONE));
|
||||
assert_eq!(t.text(), "ello");
|
||||
assert_eq!(t.cursor(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn control_h_backspace() {
|
||||
// Test Ctrl+H as backspace
|
||||
|
||||
@@ -1083,7 +1083,7 @@ impl ChatWidget {
|
||||
self.on_user_message_event(ev);
|
||||
}
|
||||
}
|
||||
EventMsg::ConversationHistory(ev) => {
|
||||
EventMsg::ConversationPath(ev) => {
|
||||
self.app_event_tx
|
||||
.send(crate::app_event::AppEvent::ConversationHistory(ev));
|
||||
}
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
---
|
||||
source: tui/src/chatwidget/tests.rs
|
||||
expression: visual
|
||||
---
|
||||
> -- Indented code block (4 spaces)
|
||||
SELECT *
|
||||
FROM "users"
|
||||
WHERE "email" LIKE '%@example.com';
|
||||
|
||||
```sh
|
||||
printf 'fenced within fenced\n'
|
||||
```
|
||||
|
||||
{
|
||||
// comment allowed in jsonc
|
||||
"path": "C:\\Program Files\\App",
|
||||
"regex": "^foo.*(bar)?$"
|
||||
}
|
||||
@@ -35,6 +35,7 @@ use std::fs::File;
|
||||
use std::io::BufRead;
|
||||
use std::io::BufReader;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::NamedTempFile;
|
||||
use tokio::sync::mpsc::unbounded_channel;
|
||||
|
||||
fn test_config() -> Config {
|
||||
@@ -133,7 +134,7 @@ fn resumed_initial_messages_render_history() {
|
||||
let (mut chat, mut rx, _ops) = make_chatwidget_manual();
|
||||
|
||||
let conversation_id = ConversationId::new();
|
||||
|
||||
let rollout_file = NamedTempFile::new().unwrap();
|
||||
let configured = codex_core::protocol::SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model: "test-model".to_string(),
|
||||
@@ -143,11 +144,13 @@ fn resumed_initial_messages_render_history() {
|
||||
EventMsg::UserMessage(UserMessageEvent {
|
||||
message: "hello from user".to_string(),
|
||||
kind: Some(InputMessageKind::Plain),
|
||||
images: None,
|
||||
}),
|
||||
EventMsg::AgentMessage(AgentMessageEvent {
|
||||
message: "assistant reply".to_string(),
|
||||
}),
|
||||
]),
|
||||
rollout_path: rollout_file.path().to_path_buf(),
|
||||
};
|
||||
|
||||
chat.handle_codex_event(Event {
|
||||
@@ -1753,3 +1756,123 @@ fn chatwidget_exec_and_status_layout_vt100_snapshot() {
|
||||
let visual = vt_lines.join("\n");
|
||||
assert_snapshot!(visual);
|
||||
}
|
||||
|
||||
// E2E vt100 snapshot for complex markdown with indented and nested fenced code blocks
|
||||
#[test]
|
||||
fn chatwidget_markdown_code_blocks_vt100_snapshot() {
|
||||
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
||||
|
||||
// Simulate a final agent message via streaming deltas instead of a single message
|
||||
|
||||
chat.handle_codex_event(Event {
|
||||
id: "t1".into(),
|
||||
msg: EventMsg::TaskStarted(TaskStartedEvent {
|
||||
model_context_window: None,
|
||||
}),
|
||||
});
|
||||
// Build a vt100 visual from the history insertions only (no UI overlay)
|
||||
let width: u16 = 80;
|
||||
let height: u16 = 50;
|
||||
let backend = ratatui::backend::TestBackend::new(width, height);
|
||||
let mut term = crate::custom_terminal::Terminal::with_options(backend).expect("terminal");
|
||||
// Place viewport at the last line so that history lines insert above it
|
||||
term.set_viewport_area(Rect::new(0, height - 1, width, 1));
|
||||
|
||||
let mut ansi: Vec<u8> = Vec::new();
|
||||
|
||||
// Simulate streaming via AgentMessageDelta in 2-character chunks (no final AgentMessage).
|
||||
let source: &str = r#"
|
||||
|
||||
-- Indented code block (4 spaces)
|
||||
SELECT *
|
||||
FROM "users"
|
||||
WHERE "email" LIKE '%@example.com';
|
||||
|
||||
````markdown
|
||||
```sh
|
||||
printf 'fenced within fenced\n'
|
||||
```
|
||||
````
|
||||
|
||||
```jsonc
|
||||
{
|
||||
// comment allowed in jsonc
|
||||
"path": "C:\\Program Files\\App",
|
||||
"regex": "^foo.*(bar)?$"
|
||||
}
|
||||
```
|
||||
"#;
|
||||
|
||||
let mut it = source.chars();
|
||||
loop {
|
||||
let mut delta = String::new();
|
||||
match it.next() {
|
||||
Some(c) => delta.push(c),
|
||||
None => break,
|
||||
}
|
||||
if let Some(c2) = it.next() {
|
||||
delta.push(c2);
|
||||
}
|
||||
|
||||
chat.handle_codex_event(Event {
|
||||
id: "t1".into(),
|
||||
msg: EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }),
|
||||
});
|
||||
// Drive commit ticks and drain emitted history lines into the vt100 buffer.
|
||||
loop {
|
||||
chat.on_commit_tick();
|
||||
let mut inserted_any = false;
|
||||
while let Ok(app_ev) = rx.try_recv() {
|
||||
if let AppEvent::InsertHistoryCell(cell) = app_ev {
|
||||
let lines = cell.display_lines(width);
|
||||
crate::insert_history::insert_history_lines_to_writer(
|
||||
&mut term, &mut ansi, lines,
|
||||
);
|
||||
inserted_any = true;
|
||||
}
|
||||
}
|
||||
if !inserted_any {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Finalize the stream without sending a final AgentMessage, to flush any tail.
|
||||
chat.handle_codex_event(Event {
|
||||
id: "t1".into(),
|
||||
msg: EventMsg::TaskComplete(TaskCompleteEvent {
|
||||
last_agent_message: None,
|
||||
}),
|
||||
});
|
||||
for lines in drain_insert_history(&mut rx) {
|
||||
crate::insert_history::insert_history_lines_to_writer(&mut term, &mut ansi, lines);
|
||||
}
|
||||
|
||||
let mut parser = vt100::Parser::new(height, width, 0);
|
||||
parser.process(&ansi);
|
||||
|
||||
let mut vt_lines: Vec<String> = (0..height)
|
||||
.map(|row| {
|
||||
let mut s = String::with_capacity(width as usize);
|
||||
for col in 0..width {
|
||||
if let Some(cell) = parser.screen().cell(row, col) {
|
||||
if let Some(ch) = cell.contents().chars().next() {
|
||||
s.push(ch);
|
||||
} else {
|
||||
s.push(' ');
|
||||
}
|
||||
} else {
|
||||
s.push(' ');
|
||||
}
|
||||
}
|
||||
s.trim_end().to_string()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Compact trailing blank rows for a stable snapshot
|
||||
while matches!(vt_lines.last(), Some(l) if l.trim().is_empty()) {
|
||||
vt_lines.pop();
|
||||
}
|
||||
let visual = vt_lines.join("\n");
|
||||
assert_snapshot!(visual);
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
|
||||
@@ -605,6 +605,7 @@ pub(crate) fn new_session_info(
|
||||
history_log_id: _,
|
||||
history_entry_count: _,
|
||||
initial_messages: _,
|
||||
rollout_path: _,
|
||||
} = event;
|
||||
if is_first_event {
|
||||
let cwd_str = match relativize_to_home(&config.cwd) {
|
||||
|
||||
@@ -97,7 +97,17 @@ pub fn insert_history_lines_to_writer<B, W>(
|
||||
|
||||
for line in wrapped {
|
||||
queue!(writer, Print("\r\n")).ok();
|
||||
write_spans(writer, &line).ok();
|
||||
// Merge line-level style into each span so that ANSI colors reflect
|
||||
// line styles (e.g., blockquotes with green fg).
|
||||
let merged_spans: Vec<Span> = line
|
||||
.spans
|
||||
.iter()
|
||||
.map(|s| Span {
|
||||
style: s.style.patch(line.style),
|
||||
content: s.content.clone(),
|
||||
})
|
||||
.collect();
|
||||
write_spans(writer, merged_spans.iter()).ok();
|
||||
}
|
||||
|
||||
queue!(writer, ResetScrollRegion).ok();
|
||||
@@ -264,6 +274,10 @@ where
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::markdown_render::render_markdown_text;
|
||||
use ratatui::layout::Rect;
|
||||
use ratatui::style::Color;
|
||||
use vt100::Parser;
|
||||
|
||||
#[test]
|
||||
fn writes_bold_then_regular_spans() {
|
||||
@@ -292,4 +306,240 @@ mod tests {
|
||||
String::from_utf8(expected).unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn vt100_blockquote_line_emits_green_fg() {
|
||||
// Set up a small off-screen terminal
|
||||
let width: u16 = 40;
|
||||
let height: u16 = 10;
|
||||
let backend = ratatui::backend::TestBackend::new(width, height);
|
||||
let mut term = crate::custom_terminal::Terminal::with_options(backend).expect("terminal");
|
||||
// Place viewport on the last line so history inserts scroll upward
|
||||
let viewport = Rect::new(0, height - 1, width, 1);
|
||||
term.set_viewport_area(viewport);
|
||||
|
||||
// Build a blockquote-like line: apply line-level green style and prefix "> "
|
||||
let mut line: Line<'static> = Line::from(vec!["> ".into(), "Hello world".into()]);
|
||||
line = line.style(Color::Green);
|
||||
let mut ansi: Vec<u8> = Vec::new();
|
||||
insert_history_lines_to_writer(&mut term, &mut ansi, vec![line]);
|
||||
|
||||
// Parse ANSI using vt100 and assert at least one non-default fg color appears
|
||||
let mut parser = Parser::new(height, width, 0);
|
||||
parser.process(&ansi);
|
||||
|
||||
let mut saw_colored = false;
|
||||
'outer: for row in 0..height {
|
||||
for col in 0..width {
|
||||
if let Some(cell) = parser.screen().cell(row, col)
|
||||
&& cell.has_contents()
|
||||
&& cell.fgcolor() != vt100::Color::Default
|
||||
{
|
||||
saw_colored = true;
|
||||
break 'outer;
|
||||
}
|
||||
}
|
||||
}
|
||||
assert!(
|
||||
saw_colored,
|
||||
"expected at least one colored cell in vt100 output"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn vt100_blockquote_wrap_preserves_color_on_all_wrapped_lines() {
|
||||
// Force wrapping by using a narrow viewport width and a long blockquote line.
|
||||
let width: u16 = 20;
|
||||
let height: u16 = 8;
|
||||
let backend = ratatui::backend::TestBackend::new(width, height);
|
||||
let mut term = crate::custom_terminal::Terminal::with_options(backend).expect("terminal");
|
||||
// Viewport is the last line so history goes directly above it.
|
||||
let viewport = Rect::new(0, height - 1, width, 1);
|
||||
term.set_viewport_area(viewport);
|
||||
|
||||
// Create a long blockquote with a distinct prefix and enough text to wrap.
|
||||
let mut line: Line<'static> = Line::from(vec![
|
||||
"> ".into(),
|
||||
"This is a long quoted line that should wrap".into(),
|
||||
]);
|
||||
line = line.style(Color::Green);
|
||||
|
||||
let mut ansi: Vec<u8> = Vec::new();
|
||||
insert_history_lines_to_writer(&mut term, &mut ansi, vec![line]);
|
||||
|
||||
// Parse and inspect the final screen buffer.
|
||||
let mut parser = Parser::new(height, width, 0);
|
||||
parser.process(&ansi);
|
||||
let screen = parser.screen();
|
||||
|
||||
// Collect rows that are non-empty; these should correspond to our wrapped lines.
|
||||
let mut non_empty_rows: Vec<u16> = Vec::new();
|
||||
for row in 0..height {
|
||||
let mut any = false;
|
||||
for col in 0..width {
|
||||
if let Some(cell) = screen.cell(row, col)
|
||||
&& cell.has_contents()
|
||||
&& cell.contents() != "\0"
|
||||
&& cell.contents() != " "
|
||||
{
|
||||
any = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if any {
|
||||
non_empty_rows.push(row);
|
||||
}
|
||||
}
|
||||
|
||||
// Expect at least two rows due to wrapping.
|
||||
assert!(
|
||||
non_empty_rows.len() >= 2,
|
||||
"expected wrapped output to span >=2 rows, got {non_empty_rows:?}",
|
||||
);
|
||||
|
||||
// For each non-empty row, ensure all non-space cells are using a non-default fg color.
|
||||
for row in non_empty_rows {
|
||||
for col in 0..width {
|
||||
if let Some(cell) = screen.cell(row, col) {
|
||||
let contents = cell.contents();
|
||||
if !contents.is_empty() && contents != " " {
|
||||
assert!(
|
||||
cell.fgcolor() != vt100::Color::Default,
|
||||
"expected non-default fg on row {row} col {col}, got {:?}",
|
||||
cell.fgcolor()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn vt100_colored_prefix_then_plain_text_resets_color() {
|
||||
let width: u16 = 40;
|
||||
let height: u16 = 6;
|
||||
let backend = ratatui::backend::TestBackend::new(width, height);
|
||||
let mut term = crate::custom_terminal::Terminal::with_options(backend).expect("terminal");
|
||||
let viewport = Rect::new(0, height - 1, width, 1);
|
||||
term.set_viewport_area(viewport);
|
||||
|
||||
// First span colored, rest plain.
|
||||
let line: Line<'static> = Line::from(vec![
|
||||
Span::styled("1. ", ratatui::style::Style::default().fg(Color::LightBlue)),
|
||||
Span::raw("Hello world"),
|
||||
]);
|
||||
|
||||
let mut ansi: Vec<u8> = Vec::new();
|
||||
insert_history_lines_to_writer(&mut term, &mut ansi, vec![line]);
|
||||
|
||||
let mut parser = Parser::new(height, width, 0);
|
||||
parser.process(&ansi);
|
||||
let screen = parser.screen();
|
||||
|
||||
// Find the first non-empty row; verify first three cells are colored, following cells default.
|
||||
'rows: for row in 0..height {
|
||||
let mut has_text = false;
|
||||
for col in 0..width {
|
||||
if let Some(cell) = screen.cell(row, col)
|
||||
&& cell.has_contents()
|
||||
&& cell.contents() != " "
|
||||
{
|
||||
has_text = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !has_text {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Expect "1. Hello world" starting at col 0.
|
||||
for col in 0..3 {
|
||||
let cell = screen.cell(row, col).unwrap();
|
||||
assert!(
|
||||
cell.fgcolor() != vt100::Color::Default,
|
||||
"expected colored prefix at col {col}, got {:?}",
|
||||
cell.fgcolor()
|
||||
);
|
||||
}
|
||||
for col in 3..(3 + "Hello world".len() as u16) {
|
||||
let cell = screen.cell(row, col).unwrap();
|
||||
assert_eq!(
|
||||
cell.fgcolor(),
|
||||
vt100::Color::Default,
|
||||
"expected default color for plain text at col {col}, got {:?}",
|
||||
cell.fgcolor()
|
||||
);
|
||||
}
|
||||
break 'rows;
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn vt100_deep_nested_mixed_list_third_level_marker_is_colored() {
|
||||
// Markdown with five levels (ordered → unordered → ordered → unordered → unordered).
|
||||
let md = "1. First\n - Second level\n 1. Third level (ordered)\n - Fourth level (bullet)\n - Fifth level to test indent consistency\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<Line<'static>> = text.lines.clone();
|
||||
|
||||
let width: u16 = 60;
|
||||
let height: u16 = 12;
|
||||
let backend = ratatui::backend::TestBackend::new(width, height);
|
||||
let mut term = crate::custom_terminal::Terminal::with_options(backend).expect("terminal");
|
||||
let viewport = ratatui::layout::Rect::new(0, height - 1, width, 1);
|
||||
term.set_viewport_area(viewport);
|
||||
|
||||
let mut ansi: Vec<u8> = Vec::new();
|
||||
insert_history_lines_to_writer(&mut term, &mut ansi, lines);
|
||||
|
||||
let mut parser = Parser::new(height, width, 0);
|
||||
parser.process(&ansi);
|
||||
let screen = parser.screen();
|
||||
|
||||
// Reconstruct screen rows as strings to locate the 3rd level line.
|
||||
let mut rows: Vec<String> = Vec::with_capacity(height as usize);
|
||||
for row in 0..height {
|
||||
let mut s = String::with_capacity(width as usize);
|
||||
for col in 0..width {
|
||||
if let Some(cell) = screen.cell(row, col) {
|
||||
if let Some(ch) = cell.contents().chars().next() {
|
||||
s.push(ch);
|
||||
} else {
|
||||
s.push(' ');
|
||||
}
|
||||
} else {
|
||||
s.push(' ');
|
||||
}
|
||||
}
|
||||
rows.push(s.trim_end().to_string());
|
||||
}
|
||||
|
||||
let needle = "1. Third level (ordered)";
|
||||
let row_idx = rows
|
||||
.iter()
|
||||
.position(|r| r.contains(needle))
|
||||
.unwrap_or_else(|| {
|
||||
panic!("expected to find row containing {needle:?}, have rows: {rows:?}")
|
||||
});
|
||||
let col_start = rows[row_idx].find(needle).unwrap() as u16; // column where '1' starts
|
||||
|
||||
// Verify that the numeric marker ("1.") at the third level is colored
|
||||
// (non-default fg) and the content after the following space resets to default.
|
||||
for c in [col_start, col_start + 1] {
|
||||
let cell = screen.cell(row_idx as u16, c).unwrap();
|
||||
assert!(
|
||||
cell.fgcolor() != vt100::Color::Default,
|
||||
"expected colored 3rd-level marker at row {row_idx} col {c}, got {:?}",
|
||||
cell.fgcolor()
|
||||
);
|
||||
}
|
||||
let content_col = col_start + 3; // skip '1', '.', and the space
|
||||
if let Some(cell) = screen.cell(row_idx as u16, content_col) {
|
||||
assert_eq!(
|
||||
cell.fgcolor(),
|
||||
vt100::Color::Default,
|
||||
"expected default color for 3rd-level content at row {row_idx} col {content_col}, got {:?}",
|
||||
cell.fgcolor()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@ pub mod insert_history;
|
||||
mod key_hint;
|
||||
pub mod live_wrap;
|
||||
mod markdown;
|
||||
mod markdown_render;
|
||||
mod markdown_stream;
|
||||
pub mod onboarding;
|
||||
mod pager_overlay;
|
||||
@@ -61,11 +62,6 @@ mod user_approval_widget;
|
||||
mod version;
|
||||
mod wrapping;
|
||||
|
||||
// Internal vt100-based replay tests live as a separate source file to keep them
|
||||
// close to the widget code. Include them in unit tests.
|
||||
#[cfg(test)]
|
||||
mod chatwidget_stream_tests;
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
mod updates;
|
||||
|
||||
@@ -312,11 +308,7 @@ async fn run_ratatui_app(
|
||||
..
|
||||
} = cli;
|
||||
|
||||
let auth_manager = AuthManager::shared(
|
||||
config.codex_home.clone(),
|
||||
config.preferred_auth_method,
|
||||
config.responses_originator_header.clone(),
|
||||
);
|
||||
let auth_manager = AuthManager::shared(config.codex_home.clone(), config.preferred_auth_method);
|
||||
let login_status = get_login_status(&config);
|
||||
let should_show_onboarding =
|
||||
should_show_onboarding(login_status, &config, should_show_trust_screen);
|
||||
@@ -400,11 +392,7 @@ fn get_login_status(config: &Config) -> LoginStatus {
|
||||
// Reading the OpenAI API key is an async operation because it may need
|
||||
// to refresh the token. Block on it.
|
||||
let codex_home = config.codex_home.clone();
|
||||
match CodexAuth::from_codex_home(
|
||||
&codex_home,
|
||||
config.preferred_auth_method,
|
||||
&config.responses_originator_header,
|
||||
) {
|
||||
match CodexAuth::from_codex_home(&codex_home, config.preferred_auth_method) {
|
||||
Ok(Some(auth)) => LoginStatus::AuthMode(auth.mode),
|
||||
Ok(None) => LoginStatus::NotAuthenticated,
|
||||
Err(err) => {
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
use crate::citation_regex::CITATION_REGEX;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config_types::UriBasedFileOpener;
|
||||
use ratatui::text::Line;
|
||||
use std::borrow::Cow;
|
||||
use std::path::Path;
|
||||
|
||||
pub(crate) fn append_markdown(
|
||||
@@ -19,238 +17,13 @@ fn append_markdown_with_opener_and_cwd(
|
||||
file_opener: UriBasedFileOpener,
|
||||
cwd: &Path,
|
||||
) {
|
||||
// Historically, we fed the entire `markdown_source` into the renderer in
|
||||
// one pass. However, fenced code blocks sometimes lost leading whitespace
|
||||
// when formatted by the markdown renderer/highlighter. To preserve code
|
||||
// block content exactly, split the source into "text" and "code" segments:
|
||||
// - Render non-code text through `tui_markdown` (with citation rewrite).
|
||||
// - Render code block content verbatim as plain lines without additional
|
||||
// formatting, preserving leading spaces.
|
||||
for seg in split_text_and_fences(markdown_source) {
|
||||
match seg {
|
||||
Segment::Text(s) => {
|
||||
let processed = rewrite_file_citations(&s, file_opener, cwd);
|
||||
let rendered = tui_markdown::from_str(&processed);
|
||||
crate::render::line_utils::push_owned_lines(&rendered.lines, lines);
|
||||
}
|
||||
Segment::Code { content, .. } => {
|
||||
// Emit the code content exactly as-is, line by line.
|
||||
// We don't attempt syntax highlighting to avoid whitespace bugs.
|
||||
for line in content.split_inclusive('\n') {
|
||||
// split_inclusive keeps the trailing \n; we want lines without it.
|
||||
let line = if let Some(stripped) = line.strip_suffix('\n') {
|
||||
stripped
|
||||
} else {
|
||||
line
|
||||
};
|
||||
let owned_line: Line<'static> = line.to_string().into();
|
||||
lines.push(owned_line);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Rewrites file citations in `src` into markdown hyperlinks using the
|
||||
/// provided `scheme` (`vscode`, `cursor`, etc.). The resulting URI follows the
|
||||
/// format expected by VS Code-compatible file openers:
|
||||
///
|
||||
/// ```text
|
||||
/// <scheme>://file<ABS_PATH>:<LINE>
|
||||
/// ```
|
||||
fn rewrite_file_citations<'a>(
|
||||
src: &'a str,
|
||||
file_opener: UriBasedFileOpener,
|
||||
cwd: &Path,
|
||||
) -> Cow<'a, str> {
|
||||
// Map enum values to the corresponding URI scheme strings.
|
||||
let scheme: &str = match file_opener.get_scheme() {
|
||||
Some(scheme) => scheme,
|
||||
None => return Cow::Borrowed(src),
|
||||
};
|
||||
|
||||
CITATION_REGEX.replace_all(src, |caps: ®ex_lite::Captures<'_>| {
|
||||
let file = &caps[1];
|
||||
let start_line = &caps[2];
|
||||
|
||||
// Resolve the path against `cwd` when it is relative.
|
||||
let absolute_path = {
|
||||
let p = Path::new(file);
|
||||
let absolute_path = if p.is_absolute() {
|
||||
path_clean::clean(p)
|
||||
} else {
|
||||
path_clean::clean(cwd.join(p))
|
||||
};
|
||||
// VS Code expects forward slashes even on Windows because URIs use
|
||||
// `/` as the path separator.
|
||||
absolute_path.to_string_lossy().replace('\\', "/")
|
||||
};
|
||||
|
||||
// Render as a normal markdown link so the downstream renderer emits
|
||||
// the hyperlink escape sequence (when supported by the terminal).
|
||||
//
|
||||
// In practice, sometimes multiple citations for the same file, but with a
|
||||
// different line number, are shown sequentially, so we:
|
||||
// - include the line number in the label to disambiguate them
|
||||
// - add a space after the link to make it easier to read
|
||||
format!("[{file}:{start_line}]({scheme}://file{absolute_path}:{start_line}) ")
|
||||
})
|
||||
}
|
||||
|
||||
// use shared helper from `line_utils`
|
||||
|
||||
// Minimal code block splitting.
|
||||
// - Recognizes fenced blocks opened by ``` or ~~~ (allowing leading whitespace).
|
||||
// The opening fence may include a language string which we ignore.
|
||||
// The closing fence must be on its own line (ignoring surrounding whitespace).
|
||||
// - Additionally recognizes indented code blocks that begin after a blank line
|
||||
// with a line starting with at least 4 spaces or a tab, and continue for
|
||||
// consecutive lines that are blank or also indented by >= 4 spaces or a tab.
|
||||
enum Segment {
|
||||
Text(String),
|
||||
Code {
|
||||
_lang: Option<String>,
|
||||
content: String,
|
||||
},
|
||||
}
|
||||
|
||||
fn split_text_and_fences(src: &str) -> Vec<Segment> {
|
||||
let mut segments = Vec::new();
|
||||
let mut curr_text = String::new();
|
||||
#[derive(Copy, Clone, PartialEq)]
|
||||
enum CodeMode {
|
||||
None,
|
||||
Fenced,
|
||||
Indented,
|
||||
}
|
||||
let mut code_mode = CodeMode::None;
|
||||
let mut fence_token = "";
|
||||
let mut code_lang: Option<String> = None;
|
||||
let mut code_content = String::new();
|
||||
// We intentionally do not require a preceding blank line for indented code blocks,
|
||||
// since streamed model output often omits it. This favors preserving indentation.
|
||||
|
||||
for line in src.split_inclusive('\n') {
|
||||
let line_no_nl = line.strip_suffix('\n');
|
||||
let trimmed_start = match line_no_nl {
|
||||
Some(l) => l.trim_start(),
|
||||
None => line.trim_start(),
|
||||
};
|
||||
if code_mode == CodeMode::None {
|
||||
let open = if trimmed_start.starts_with("```") {
|
||||
Some("```")
|
||||
} else if trimmed_start.starts_with("~~~") {
|
||||
Some("~~~")
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(tok) = open {
|
||||
// Flush pending text segment.
|
||||
if !curr_text.is_empty() {
|
||||
segments.push(Segment::Text(curr_text.clone()));
|
||||
curr_text.clear();
|
||||
}
|
||||
fence_token = tok;
|
||||
// Capture language after the token on this line (before newline).
|
||||
let after = &trimmed_start[tok.len()..];
|
||||
let lang = after.trim();
|
||||
code_lang = if lang.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(lang.to_string())
|
||||
};
|
||||
code_mode = CodeMode::Fenced;
|
||||
code_content.clear();
|
||||
// Do not include the opening fence line in output.
|
||||
continue;
|
||||
}
|
||||
// Check for start of an indented code block: only after a blank line
|
||||
// (or at the beginning), and the line must start with >=4 spaces or a tab.
|
||||
let raw_line = match line_no_nl {
|
||||
Some(l) => l,
|
||||
None => line,
|
||||
};
|
||||
let leading_spaces = raw_line.chars().take_while(|c| *c == ' ').count();
|
||||
let starts_with_tab = raw_line.starts_with('\t');
|
||||
// Consider any line that begins with >=4 spaces or a tab to start an
|
||||
// indented code block. This favors preserving indentation even when a
|
||||
// preceding blank line is omitted (common in streamed model output).
|
||||
let starts_indented_code = (leading_spaces >= 4) || starts_with_tab;
|
||||
if starts_indented_code {
|
||||
// Flush pending text and begin an indented code block.
|
||||
if !curr_text.is_empty() {
|
||||
segments.push(Segment::Text(curr_text.clone()));
|
||||
curr_text.clear();
|
||||
}
|
||||
code_mode = CodeMode::Indented;
|
||||
code_content.clear();
|
||||
code_content.push_str(line);
|
||||
// Inside code now; do not treat this line as normal text.
|
||||
continue;
|
||||
}
|
||||
// Normal text line.
|
||||
curr_text.push_str(line);
|
||||
} else {
|
||||
match code_mode {
|
||||
CodeMode::Fenced => {
|
||||
// inside fenced code: check for closing fence on its own line
|
||||
let trimmed = match line_no_nl {
|
||||
Some(l) => l.trim(),
|
||||
None => line.trim(),
|
||||
};
|
||||
if trimmed == fence_token {
|
||||
// End code block: emit segment without fences
|
||||
segments.push(Segment::Code {
|
||||
_lang: code_lang.take(),
|
||||
content: code_content.clone(),
|
||||
});
|
||||
code_content.clear();
|
||||
code_mode = CodeMode::None;
|
||||
fence_token = "";
|
||||
continue;
|
||||
}
|
||||
// Accumulate code content exactly as-is.
|
||||
code_content.push_str(line);
|
||||
}
|
||||
CodeMode::Indented => {
|
||||
// Continue while the line is blank, or starts with >=4 spaces, or a tab.
|
||||
let raw_line = match line_no_nl {
|
||||
Some(l) => l,
|
||||
None => line,
|
||||
};
|
||||
let is_blank = raw_line.trim().is_empty();
|
||||
let leading_spaces = raw_line.chars().take_while(|c| *c == ' ').count();
|
||||
let starts_with_tab = raw_line.starts_with('\t');
|
||||
if is_blank || leading_spaces >= 4 || starts_with_tab {
|
||||
code_content.push_str(line);
|
||||
} else {
|
||||
// Close the indented code block and reprocess this line as normal text.
|
||||
segments.push(Segment::Code {
|
||||
_lang: None,
|
||||
content: code_content.clone(),
|
||||
});
|
||||
code_content.clear();
|
||||
code_mode = CodeMode::None;
|
||||
// Now handle current line as text.
|
||||
curr_text.push_str(line);
|
||||
}
|
||||
}
|
||||
CodeMode::None => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if code_mode != CodeMode::None {
|
||||
// Unterminated code fence: treat accumulated content as a code segment.
|
||||
segments.push(Segment::Code {
|
||||
_lang: code_lang.take(),
|
||||
content: code_content.clone(),
|
||||
});
|
||||
} else if !curr_text.is_empty() {
|
||||
segments.push(Segment::Text(curr_text.clone()));
|
||||
}
|
||||
|
||||
segments
|
||||
// Render via pulldown-cmark and rewrite citations during traversal (outside code blocks).
|
||||
let rendered = crate::markdown_render::render_markdown_text_with_citations(
|
||||
markdown_source,
|
||||
file_opener.get_scheme(),
|
||||
cwd,
|
||||
);
|
||||
crate::render::line_utils::push_owned_lines(&rendered.lines, lines);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -258,88 +31,6 @@ mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn citation_is_rewritten_with_absolute_path() {
|
||||
let markdown = "See 【F:/src/main.rs†L42-L50】 for details.";
|
||||
let cwd = Path::new("/workspace");
|
||||
let result = rewrite_file_citations(markdown, UriBasedFileOpener::VsCode, cwd);
|
||||
|
||||
assert_eq!(
|
||||
"See [/src/main.rs:42](vscode://file/src/main.rs:42) for details.",
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn citation_is_rewritten_with_relative_path() {
|
||||
let markdown = "Refer to 【F:lib/mod.rs†L5】 here.";
|
||||
let cwd = Path::new("/home/user/project");
|
||||
let result = rewrite_file_citations(markdown, UriBasedFileOpener::Windsurf, cwd);
|
||||
|
||||
assert_eq!(
|
||||
"Refer to [lib/mod.rs:5](windsurf://file/home/user/project/lib/mod.rs:5) here.",
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn citation_followed_by_space_so_they_do_not_run_together() {
|
||||
let markdown = "References on lines 【F:src/foo.rs†L24】【F:src/foo.rs†L42】";
|
||||
let cwd = Path::new("/home/user/project");
|
||||
let result = rewrite_file_citations(markdown, UriBasedFileOpener::VsCode, cwd);
|
||||
|
||||
assert_eq!(
|
||||
"References on lines [src/foo.rs:24](vscode://file/home/user/project/src/foo.rs:24) [src/foo.rs:42](vscode://file/home/user/project/src/foo.rs:42) ",
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn citation_unchanged_without_file_opener() {
|
||||
let markdown = "Look at 【F:file.rs†L1】.";
|
||||
let cwd = Path::new("/");
|
||||
let unchanged = rewrite_file_citations(markdown, UriBasedFileOpener::VsCode, cwd);
|
||||
// The helper itself always rewrites – this test validates behaviour of
|
||||
// append_markdown when `file_opener` is None.
|
||||
let mut out = Vec::new();
|
||||
append_markdown_with_opener_and_cwd(markdown, &mut out, UriBasedFileOpener::None, cwd);
|
||||
// Convert lines back to string for comparison.
|
||||
let rendered: String = out
|
||||
.iter()
|
||||
.flat_map(|l| l.spans.iter())
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
assert_eq!(markdown, rendered);
|
||||
// Ensure helper rewrites.
|
||||
assert_ne!(markdown, unchanged);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fenced_code_blocks_preserve_leading_whitespace() {
|
||||
let src = "```\n indented\n\t\twith tabs\n four spaces\n```\n";
|
||||
let cwd = Path::new("/");
|
||||
let mut out = Vec::new();
|
||||
append_markdown_with_opener_and_cwd(src, &mut out, UriBasedFileOpener::None, cwd);
|
||||
let rendered: Vec<String> = out
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(
|
||||
rendered,
|
||||
vec![
|
||||
" indented".to_string(),
|
||||
"\t\twith tabs".to_string(),
|
||||
" four spaces".to_string()
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn citations_not_rewritten_inside_code_blocks() {
|
||||
let src = "Before 【F:/x.rs†L1】\n```\nInside 【F:/x.rs†L2】\n```\nAfter 【F:/x.rs†L3】\n";
|
||||
@@ -355,19 +46,31 @@ mod tests {
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
// Expect first and last lines rewritten, middle line unchanged.
|
||||
assert!(rendered[0].contains("vscode://file"));
|
||||
assert_eq!(rendered[1], "Inside 【F:/x.rs†L2】");
|
||||
assert!(matches!(rendered.last(), Some(s) if s.contains("vscode://file")));
|
||||
// Expect a line containing the inside text unchanged.
|
||||
assert!(rendered.iter().any(|s| s.contains("Inside 【F:/x.rs†L2】")));
|
||||
// And first/last sections rewritten.
|
||||
assert!(
|
||||
rendered
|
||||
.first()
|
||||
.map(|s| s.contains("vscode://file"))
|
||||
.unwrap_or(false)
|
||||
);
|
||||
assert!(
|
||||
rendered
|
||||
.last()
|
||||
.map(|s| s.contains("vscode://file"))
|
||||
.unwrap_or(false)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn indented_code_blocks_preserve_leading_whitespace() {
|
||||
let src = "Before\n code 1\n\tcode with tab\n code 2\nAfter\n";
|
||||
// Basic sanity: indented code with surrounding blank lines should produce the indented line.
|
||||
let src = "Before\n\n code 1\n\nAfter\n";
|
||||
let cwd = Path::new("/");
|
||||
let mut out = Vec::new();
|
||||
append_markdown_with_opener_and_cwd(src, &mut out, UriBasedFileOpener::None, cwd);
|
||||
let rendered: Vec<String> = out
|
||||
let lines: Vec<String> = out
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
@@ -376,16 +79,7 @@ mod tests {
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(
|
||||
rendered,
|
||||
vec![
|
||||
"Before".to_string(),
|
||||
" code 1".to_string(),
|
||||
"\tcode with tab".to_string(),
|
||||
" code 2".to_string(),
|
||||
"After".to_string()
|
||||
]
|
||||
);
|
||||
assert_eq!(lines, vec!["Before", "", " code 1", "", "After"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -403,11 +97,17 @@ mod tests {
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
// Expect first and last lines rewritten, and the indented code line present
|
||||
// unchanged (citations inside not rewritten). We do not assert on blank
|
||||
// separator lines since the markdown renderer may normalize them.
|
||||
assert!(rendered.iter().any(|s| s.contains("vscode://file")));
|
||||
assert!(rendered.iter().any(|s| s == " Inside 【F:/x.rs†L2】"));
|
||||
assert!(
|
||||
rendered
|
||||
.iter()
|
||||
.any(|s| s.contains("Start") && s.contains("vscode://file"))
|
||||
);
|
||||
assert!(
|
||||
rendered
|
||||
.iter()
|
||||
.any(|s| s.contains("End") && s.contains("vscode://file"))
|
||||
);
|
||||
assert!(rendered.iter().any(|s| s.contains("Inside 【F:/x.rs†L2】")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -435,27 +135,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tui_markdown_splits_ordered_marker_and_text() {
|
||||
// With marker and content on the same line, tui_markdown keeps it as one line
|
||||
// even in the surrounding section context.
|
||||
let rendered = tui_markdown::from_str("Loose vs. tight list items:\n1. Tight item\n");
|
||||
let lines: Vec<String> = rendered
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert!(
|
||||
lines.iter().any(|w| w == "1. Tight item"),
|
||||
"expected single line '1. Tight item' in context: {lines:?}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn append_markdown_matches_tui_markdown_for_ordered_item() {
|
||||
use codex_core::config_types::UriBasedFileOpener;
|
||||
@@ -480,72 +159,6 @@ mod tests {
|
||||
assert_eq!(lines, vec!["1. Tight item".to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tui_markdown_shape_for_loose_tight_section() {
|
||||
// Use the exact source from the session deltas used in tests.
|
||||
let source = r#"
|
||||
Loose vs. tight list items:
|
||||
1. Tight item
|
||||
2. Another tight item
|
||||
|
||||
3.
|
||||
Loose item
|
||||
"#;
|
||||
|
||||
let rendered = tui_markdown::from_str(source);
|
||||
let lines: Vec<String> = rendered
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
// Join into a single string and assert the exact shape we observe
|
||||
// from tui_markdown in this larger context (marker and content split).
|
||||
let joined = {
|
||||
let mut s = String::new();
|
||||
for (i, l) in lines.iter().enumerate() {
|
||||
s.push_str(l);
|
||||
if i + 1 < lines.len() {
|
||||
s.push('\n');
|
||||
}
|
||||
}
|
||||
s
|
||||
};
|
||||
let expected = r#"Loose vs. tight list items:
|
||||
|
||||
1.
|
||||
Tight item
|
||||
2.
|
||||
Another tight item
|
||||
3.
|
||||
Loose item"#;
|
||||
assert_eq!(
|
||||
joined, expected,
|
||||
"unexpected tui_markdown shape: {joined:?}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn split_text_and_fences_keeps_ordered_list_line_as_text() {
|
||||
// No fences here; expect a single Text segment containing the full input.
|
||||
let src = "Loose vs. tight list items:\n1. Tight item\n";
|
||||
let segs = super::split_text_and_fences(src);
|
||||
assert_eq!(
|
||||
segs.len(),
|
||||
1,
|
||||
"expected single text segment, got {}",
|
||||
segs.len()
|
||||
);
|
||||
match &segs[0] {
|
||||
super::Segment::Text(s) => assert_eq!(s, src),
|
||||
_ => panic!("expected Text segment for non-fence input"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn append_markdown_keeps_ordered_list_line_unsplit_in_context() {
|
||||
use codex_core::config_types::UriBasedFileOpener;
|
||||
|
||||
566
codex-rs/tui/src/markdown_render.rs
Normal file
566
codex-rs/tui/src/markdown_render.rs
Normal file
@@ -0,0 +1,566 @@
|
||||
use crate::citation_regex::CITATION_REGEX;
|
||||
use pulldown_cmark::CodeBlockKind;
|
||||
use pulldown_cmark::CowStr;
|
||||
use pulldown_cmark::Event;
|
||||
use pulldown_cmark::HeadingLevel;
|
||||
use pulldown_cmark::Options;
|
||||
use pulldown_cmark::Parser;
|
||||
use pulldown_cmark::Tag;
|
||||
use pulldown_cmark::TagEnd;
|
||||
use ratatui::style::Style;
|
||||
use ratatui::style::Stylize;
|
||||
use ratatui::text::Line;
|
||||
use ratatui::text::Span;
|
||||
use ratatui::text::Text;
|
||||
use std::borrow::Cow;
|
||||
use std::path::Path;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct IndentContext {
|
||||
prefix: Vec<Span<'static>>,
|
||||
marker: Option<Vec<Span<'static>>>,
|
||||
is_list: bool,
|
||||
}
|
||||
|
||||
impl IndentContext {
|
||||
fn new(prefix: Vec<Span<'static>>, marker: Option<Vec<Span<'static>>>, is_list: bool) -> Self {
|
||||
Self {
|
||||
prefix,
|
||||
marker,
|
||||
is_list,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn render_markdown_text(input: &str) -> Text<'static> {
|
||||
let mut options = Options::empty();
|
||||
options.insert(Options::ENABLE_STRIKETHROUGH);
|
||||
let parser = Parser::new_ext(input, options);
|
||||
let mut w = Writer::new(parser, None, None);
|
||||
w.run();
|
||||
w.text
|
||||
}
|
||||
|
||||
pub(crate) fn render_markdown_text_with_citations(
|
||||
input: &str,
|
||||
scheme: Option<&str>,
|
||||
cwd: &Path,
|
||||
) -> Text<'static> {
|
||||
let mut options = Options::empty();
|
||||
options.insert(Options::ENABLE_STRIKETHROUGH);
|
||||
let parser = Parser::new_ext(input, options);
|
||||
let mut w = Writer::new(
|
||||
parser,
|
||||
scheme.map(|s| s.to_string()),
|
||||
Some(cwd.to_path_buf()),
|
||||
);
|
||||
w.run();
|
||||
w.text
|
||||
}
|
||||
|
||||
struct Writer<'a, I>
|
||||
where
|
||||
I: Iterator<Item = Event<'a>>,
|
||||
{
|
||||
iter: I,
|
||||
text: Text<'static>,
|
||||
inline_styles: Vec<Style>,
|
||||
indent_stack: Vec<IndentContext>,
|
||||
list_indices: Vec<Option<u64>>,
|
||||
link: Option<String>,
|
||||
needs_newline: bool,
|
||||
pending_marker_line: bool,
|
||||
in_paragraph: bool,
|
||||
scheme: Option<String>,
|
||||
cwd: Option<std::path::PathBuf>,
|
||||
in_code_block: bool,
|
||||
}
|
||||
|
||||
impl<'a, I> Writer<'a, I>
|
||||
where
|
||||
I: Iterator<Item = Event<'a>>,
|
||||
{
|
||||
fn new(iter: I, scheme: Option<String>, cwd: Option<std::path::PathBuf>) -> Self {
|
||||
Self {
|
||||
iter,
|
||||
text: Text::default(),
|
||||
inline_styles: Vec::new(),
|
||||
indent_stack: Vec::new(),
|
||||
list_indices: Vec::new(),
|
||||
link: None,
|
||||
needs_newline: false,
|
||||
pending_marker_line: false,
|
||||
in_paragraph: false,
|
||||
scheme,
|
||||
cwd,
|
||||
in_code_block: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn run(&mut self) {
|
||||
while let Some(ev) = self.iter.next() {
|
||||
self.handle_event(ev);
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_event(&mut self, event: Event<'a>) {
|
||||
match event {
|
||||
Event::Start(tag) => self.start_tag(tag),
|
||||
Event::End(tag) => self.end_tag(tag),
|
||||
Event::Text(text) => self.text(text),
|
||||
Event::Code(code) => self.code(code),
|
||||
Event::SoftBreak => self.soft_break(),
|
||||
Event::HardBreak => self.hard_break(),
|
||||
Event::Rule => {
|
||||
if !self.text.lines.is_empty() {
|
||||
self.push_blank_line();
|
||||
}
|
||||
self.push_line(Line::from("———"));
|
||||
self.needs_newline = true;
|
||||
}
|
||||
Event::Html(html) => self.html(html, false),
|
||||
Event::InlineHtml(html) => self.html(html, true),
|
||||
Event::FootnoteReference(_) => {}
|
||||
Event::TaskListMarker(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn start_tag(&mut self, tag: Tag<'a>) {
|
||||
match tag {
|
||||
Tag::Paragraph => self.start_paragraph(),
|
||||
Tag::Heading { level, .. } => self.start_heading(level),
|
||||
Tag::BlockQuote => self.start_blockquote(),
|
||||
Tag::CodeBlock(kind) => {
|
||||
let indent = match kind {
|
||||
CodeBlockKind::Fenced(_) => None,
|
||||
CodeBlockKind::Indented => Some(Span::from(" ".repeat(4))),
|
||||
};
|
||||
let lang = match kind {
|
||||
CodeBlockKind::Fenced(lang) => Some(lang.to_string()),
|
||||
CodeBlockKind::Indented => None,
|
||||
};
|
||||
self.start_codeblock(lang, indent)
|
||||
}
|
||||
Tag::List(start) => self.start_list(start),
|
||||
Tag::Item => self.start_item(),
|
||||
Tag::Emphasis => self.push_inline_style(Style::new().italic()),
|
||||
Tag::Strong => self.push_inline_style(Style::new().bold()),
|
||||
Tag::Strikethrough => self.push_inline_style(Style::new().crossed_out()),
|
||||
Tag::Link { dest_url, .. } => self.push_link(dest_url.to_string()),
|
||||
Tag::HtmlBlock
|
||||
| Tag::FootnoteDefinition(_)
|
||||
| Tag::Table(_)
|
||||
| Tag::TableHead
|
||||
| Tag::TableRow
|
||||
| Tag::TableCell
|
||||
| Tag::Image { .. }
|
||||
| Tag::MetadataBlock(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn end_tag(&mut self, tag: TagEnd) {
|
||||
match tag {
|
||||
TagEnd::Paragraph => self.end_paragraph(),
|
||||
TagEnd::Heading(_) => self.end_heading(),
|
||||
TagEnd::BlockQuote => self.end_blockquote(),
|
||||
TagEnd::CodeBlock => self.end_codeblock(),
|
||||
TagEnd::List(_) => self.end_list(),
|
||||
TagEnd::Item => {
|
||||
self.indent_stack.pop();
|
||||
self.pending_marker_line = false;
|
||||
}
|
||||
TagEnd::Emphasis | TagEnd::Strong | TagEnd::Strikethrough => self.pop_inline_style(),
|
||||
TagEnd::Link => self.pop_link(),
|
||||
TagEnd::HtmlBlock
|
||||
| TagEnd::FootnoteDefinition
|
||||
| TagEnd::Table
|
||||
| TagEnd::TableHead
|
||||
| TagEnd::TableRow
|
||||
| TagEnd::TableCell
|
||||
| TagEnd::Image
|
||||
| TagEnd::MetadataBlock(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn start_paragraph(&mut self) {
|
||||
if self.needs_newline {
|
||||
self.push_blank_line();
|
||||
}
|
||||
self.push_line(Line::default());
|
||||
self.needs_newline = false;
|
||||
self.in_paragraph = true;
|
||||
}
|
||||
|
||||
fn end_paragraph(&mut self) {
|
||||
self.needs_newline = true;
|
||||
self.in_paragraph = false;
|
||||
self.pending_marker_line = false;
|
||||
}
|
||||
|
||||
fn start_heading(&mut self, level: HeadingLevel) {
|
||||
if self.needs_newline {
|
||||
self.push_line(Line::default());
|
||||
self.needs_newline = false;
|
||||
}
|
||||
let heading_style = match level {
|
||||
HeadingLevel::H1 => Style::new().bold().underlined(),
|
||||
HeadingLevel::H2 => Style::new().bold(),
|
||||
HeadingLevel::H3 => Style::new().bold().italic(),
|
||||
HeadingLevel::H4 => Style::new().italic(),
|
||||
HeadingLevel::H5 => Style::new().italic(),
|
||||
HeadingLevel::H6 => Style::new().italic(),
|
||||
};
|
||||
let content = format!("{} ", "#".repeat(level as usize));
|
||||
self.push_line(Line::from(vec![Span::styled(content, heading_style)]));
|
||||
self.push_inline_style(heading_style);
|
||||
self.needs_newline = false;
|
||||
}
|
||||
|
||||
fn end_heading(&mut self) {
|
||||
self.needs_newline = true;
|
||||
self.pop_inline_style();
|
||||
}
|
||||
|
||||
fn start_blockquote(&mut self) {
|
||||
if self.needs_newline {
|
||||
self.push_blank_line();
|
||||
self.needs_newline = false;
|
||||
}
|
||||
self.indent_stack
|
||||
.push(IndentContext::new(vec![Span::from("> ")], None, false));
|
||||
}
|
||||
|
||||
fn end_blockquote(&mut self) {
|
||||
self.indent_stack.pop();
|
||||
self.needs_newline = true;
|
||||
}
|
||||
|
||||
fn text(&mut self, text: CowStr<'a>) {
|
||||
if self.pending_marker_line {
|
||||
self.push_line(Line::default());
|
||||
}
|
||||
self.pending_marker_line = false;
|
||||
if self.in_code_block
|
||||
&& !self.needs_newline
|
||||
&& self
|
||||
.text
|
||||
.lines
|
||||
.last()
|
||||
.map(|line| !line.spans.is_empty())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
self.push_line(Line::default());
|
||||
}
|
||||
for (i, line) in text.lines().enumerate() {
|
||||
if self.needs_newline {
|
||||
self.push_line(Line::default());
|
||||
self.needs_newline = false;
|
||||
}
|
||||
if i > 0 {
|
||||
self.push_line(Line::default());
|
||||
}
|
||||
let mut content = line.to_string();
|
||||
if !self.in_code_block
|
||||
&& let (Some(scheme), Some(cwd)) = (&self.scheme, &self.cwd)
|
||||
{
|
||||
let cow = rewrite_file_citations_with_scheme(&content, Some(scheme.as_str()), cwd);
|
||||
if let std::borrow::Cow::Owned(s) = cow {
|
||||
content = s;
|
||||
}
|
||||
}
|
||||
let span = Span::styled(
|
||||
content,
|
||||
self.inline_styles.last().copied().unwrap_or_default(),
|
||||
);
|
||||
self.push_span(span);
|
||||
}
|
||||
self.needs_newline = false;
|
||||
}
|
||||
|
||||
fn code(&mut self, code: CowStr<'a>) {
|
||||
if self.pending_marker_line {
|
||||
self.push_line(Line::default());
|
||||
self.pending_marker_line = false;
|
||||
}
|
||||
let span = Span::from(code.into_string()).dim();
|
||||
self.push_span(span);
|
||||
}
|
||||
|
||||
fn html(&mut self, html: CowStr<'a>, inline: bool) {
|
||||
self.pending_marker_line = false;
|
||||
for (i, line) in html.lines().enumerate() {
|
||||
if self.needs_newline {
|
||||
self.push_line(Line::default());
|
||||
self.needs_newline = false;
|
||||
}
|
||||
if i > 0 {
|
||||
self.push_line(Line::default());
|
||||
}
|
||||
let style = self.inline_styles.last().copied().unwrap_or_default();
|
||||
self.push_span(Span::styled(line.to_string(), style));
|
||||
}
|
||||
self.needs_newline = !inline;
|
||||
}
|
||||
|
||||
fn hard_break(&mut self) {
|
||||
self.push_line(Line::default());
|
||||
}
|
||||
|
||||
fn soft_break(&mut self) {
|
||||
self.push_line(Line::default());
|
||||
}
|
||||
|
||||
fn start_list(&mut self, index: Option<u64>) {
|
||||
if self.list_indices.is_empty() && self.needs_newline {
|
||||
self.push_line(Line::default());
|
||||
}
|
||||
self.list_indices.push(index);
|
||||
}
|
||||
|
||||
fn end_list(&mut self) {
|
||||
self.list_indices.pop();
|
||||
self.needs_newline = true;
|
||||
}
|
||||
|
||||
fn start_item(&mut self) {
|
||||
self.pending_marker_line = true;
|
||||
let depth = self.list_indices.len();
|
||||
let is_ordered = self
|
||||
.list_indices
|
||||
.last()
|
||||
.map(|index| index.is_some())
|
||||
.unwrap_or(false);
|
||||
let width = depth * 4 - 3;
|
||||
let marker = if let Some(last_index) = self.list_indices.last_mut() {
|
||||
match last_index {
|
||||
None => Some(vec![Span::from(" ".repeat(width - 1) + "- ")]),
|
||||
Some(index) => {
|
||||
*index += 1;
|
||||
Some(vec![format!("{:width$}. ", *index - 1).light_blue()])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let indent_prefix = if depth == 0 {
|
||||
Vec::new()
|
||||
} else {
|
||||
let indent_len = if is_ordered { width + 2 } else { width + 1 };
|
||||
vec![Span::from(" ".repeat(indent_len))]
|
||||
};
|
||||
self.indent_stack
|
||||
.push(IndentContext::new(indent_prefix, marker, true));
|
||||
self.needs_newline = false;
|
||||
}
|
||||
|
||||
fn start_codeblock(&mut self, _lang: Option<String>, indent: Option<Span<'static>>) {
|
||||
if !self.text.lines.is_empty() {
|
||||
self.push_blank_line();
|
||||
}
|
||||
self.in_code_block = true;
|
||||
self.indent_stack.push(IndentContext::new(
|
||||
vec![indent.unwrap_or_default()],
|
||||
None,
|
||||
false,
|
||||
));
|
||||
// let opener = match lang {
|
||||
// Some(l) if !l.is_empty() => format!("```{l}"),
|
||||
// _ => "```".to_string(),
|
||||
// };
|
||||
// self.push_line(opener.into());
|
||||
self.needs_newline = true;
|
||||
}
|
||||
|
||||
fn end_codeblock(&mut self) {
|
||||
// self.push_line("```".into());
|
||||
self.needs_newline = true;
|
||||
self.in_code_block = false;
|
||||
self.indent_stack.pop();
|
||||
}
|
||||
|
||||
fn push_inline_style(&mut self, style: Style) {
|
||||
let current = self.inline_styles.last().copied().unwrap_or_default();
|
||||
let merged = current.patch(style);
|
||||
self.inline_styles.push(merged);
|
||||
}
|
||||
|
||||
fn pop_inline_style(&mut self) {
|
||||
self.inline_styles.pop();
|
||||
}
|
||||
|
||||
fn push_link(&mut self, dest_url: String) {
|
||||
self.link = Some(dest_url);
|
||||
}
|
||||
|
||||
fn pop_link(&mut self) {
|
||||
if let Some(link) = self.link.take() {
|
||||
self.push_span(" (".into());
|
||||
self.push_span(link.cyan().underlined());
|
||||
self.push_span(")".into());
|
||||
}
|
||||
}
|
||||
|
||||
fn push_line(&mut self, line: Line<'static>) {
|
||||
let mut line = line;
|
||||
let was_pending = self.pending_marker_line;
|
||||
let mut spans = self.current_prefix_spans();
|
||||
spans.append(&mut line.spans);
|
||||
let blockquote_active = self
|
||||
.indent_stack
|
||||
.iter()
|
||||
.any(|ctx| ctx.prefix.iter().any(|s| s.content.contains('>')));
|
||||
let style = if blockquote_active {
|
||||
Style::new().green()
|
||||
} else {
|
||||
line.style
|
||||
};
|
||||
self.text.lines.push(Line::from_iter(spans).style(style));
|
||||
if was_pending {
|
||||
self.pending_marker_line = false;
|
||||
}
|
||||
}
|
||||
|
||||
fn push_span(&mut self, span: Span<'static>) {
|
||||
if let Some(last) = self.text.lines.last_mut() {
|
||||
last.push_span(span);
|
||||
} else {
|
||||
self.push_line(Line::from(vec![span]));
|
||||
}
|
||||
}
|
||||
|
||||
fn push_blank_line(&mut self) {
|
||||
if self.indent_stack.iter().all(|ctx| ctx.is_list) {
|
||||
self.text.lines.push(Line::default());
|
||||
} else {
|
||||
self.push_line(Line::default());
|
||||
}
|
||||
}
|
||||
|
||||
fn current_prefix_spans(&self) -> Vec<Span<'static>> {
|
||||
let mut prefix: Vec<Span<'static>> = Vec::new();
|
||||
let last_marker_index = if self.pending_marker_line {
|
||||
self.indent_stack
|
||||
.iter()
|
||||
.enumerate()
|
||||
.rev()
|
||||
.find_map(|(i, ctx)| if ctx.marker.is_some() { Some(i) } else { None })
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let last_list_index = self.indent_stack.iter().rposition(|ctx| ctx.is_list);
|
||||
|
||||
for (i, ctx) in self.indent_stack.iter().enumerate() {
|
||||
if self.pending_marker_line {
|
||||
if Some(i) == last_marker_index
|
||||
&& let Some(marker) = &ctx.marker
|
||||
{
|
||||
prefix.extend(marker.iter().cloned());
|
||||
continue;
|
||||
}
|
||||
if ctx.is_list && last_marker_index.is_some_and(|idx| idx > i) {
|
||||
continue;
|
||||
}
|
||||
} else if ctx.is_list && Some(i) != last_list_index {
|
||||
continue;
|
||||
}
|
||||
prefix.extend(ctx.prefix.iter().cloned());
|
||||
}
|
||||
|
||||
prefix
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn rewrite_file_citations_with_scheme<'a>(
|
||||
src: &'a str,
|
||||
scheme_opt: Option<&str>,
|
||||
cwd: &Path,
|
||||
) -> Cow<'a, str> {
|
||||
let scheme: &str = match scheme_opt {
|
||||
Some(s) => s,
|
||||
None => return Cow::Borrowed(src),
|
||||
};
|
||||
|
||||
CITATION_REGEX.replace_all(src, |caps: ®ex_lite::Captures<'_>| {
|
||||
let file = &caps[1];
|
||||
let start_line = &caps[2];
|
||||
|
||||
// Resolve the path against `cwd` when it is relative.
|
||||
let absolute_path = {
|
||||
let p = Path::new(file);
|
||||
let absolute_path = if p.is_absolute() {
|
||||
path_clean::clean(p)
|
||||
} else {
|
||||
path_clean::clean(cwd.join(p))
|
||||
};
|
||||
// VS Code expects forward slashes even on Windows because URIs use
|
||||
// `/` as the path separator.
|
||||
absolute_path.to_string_lossy().replace('\\', "/")
|
||||
};
|
||||
|
||||
// Render as a normal markdown link so the downstream renderer emits
|
||||
// the hyperlink escape sequence (when supported by the terminal).
|
||||
//
|
||||
// In practice, sometimes multiple citations for the same file, but with a
|
||||
// different line number, are shown sequentially, so we:
|
||||
// - include the line number in the label to disambiguate them
|
||||
// - add a space after the link to make it easier to read
|
||||
format!("[{file}:{start_line}]({scheme}://file{absolute_path}:{start_line}) ")
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod markdown_render_tests {
|
||||
include!("markdown_render_tests.rs");
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn citation_is_rewritten_with_absolute_path() {
|
||||
let markdown = "See 【F:/src/main.rs†L42-L50】 for details.";
|
||||
let cwd = Path::new("/workspace");
|
||||
let result = rewrite_file_citations_with_scheme(markdown, Some("vscode"), cwd);
|
||||
|
||||
assert_eq!(
|
||||
"See [/src/main.rs:42](vscode://file/src/main.rs:42) for details.",
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn citation_followed_by_space_so_they_do_not_run_together() {
|
||||
let markdown = "References on lines 【F:src/foo.rs†L24】【F:src/foo.rs†L42】";
|
||||
let cwd = Path::new("/home/user/project");
|
||||
let result = rewrite_file_citations_with_scheme(markdown, Some("vscode"), cwd);
|
||||
|
||||
assert_eq!(
|
||||
"References on lines [src/foo.rs:24](vscode://file/home/user/project/src/foo.rs:24) [src/foo.rs:42](vscode://file/home/user/project/src/foo.rs:42) ",
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn citation_unchanged_without_file_opener() {
|
||||
let markdown = "Look at 【F:file.rs†L1】.";
|
||||
let cwd = Path::new("/");
|
||||
let unchanged = rewrite_file_citations_with_scheme(markdown, Some("vscode"), cwd);
|
||||
// The helper itself always rewrites – this test validates behaviour of
|
||||
// append_markdown when `file_opener` is None.
|
||||
let rendered = render_markdown_text_with_citations(markdown, None, cwd);
|
||||
// Convert lines back to string for comparison.
|
||||
let rendered: String = rendered
|
||||
.lines
|
||||
.iter()
|
||||
.flat_map(|l| l.spans.iter())
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
assert_eq!(markdown, rendered);
|
||||
// Ensure helper rewrites.
|
||||
assert_ne!(markdown, unchanged);
|
||||
}
|
||||
}
|
||||
995
codex-rs/tui/src/markdown_render_tests.rs
Normal file
995
codex-rs/tui/src/markdown_render_tests.rs
Normal file
@@ -0,0 +1,995 @@
|
||||
use pretty_assertions::assert_eq;
|
||||
use ratatui::style::Stylize;
|
||||
use ratatui::text::Line;
|
||||
use ratatui::text::Span;
|
||||
use ratatui::text::Text;
|
||||
|
||||
use crate::markdown_render::render_markdown_text;
|
||||
use insta::assert_snapshot;
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
assert_eq!(render_markdown_text(""), Text::default());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn paragraph_single() {
|
||||
assert_eq!(
|
||||
render_markdown_text("Hello, world!"),
|
||||
Text::from("Hello, world!")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn paragraph_soft_break() {
|
||||
assert_eq!(
|
||||
render_markdown_text("Hello\nWorld"),
|
||||
Text::from_iter(["Hello", "World"])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn paragraph_multiple() {
|
||||
assert_eq!(
|
||||
render_markdown_text("Paragraph 1\n\nParagraph 2"),
|
||||
Text::from_iter(["Paragraph 1", "", "Paragraph 2"])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn headings() {
|
||||
let md = "# Heading 1\n## Heading 2\n### Heading 3\n#### Heading 4\n##### Heading 5\n###### Heading 6\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["# ".bold().underlined(), "Heading 1".bold().underlined()]),
|
||||
Line::default(),
|
||||
Line::from_iter(["## ".bold(), "Heading 2".bold()]),
|
||||
Line::default(),
|
||||
Line::from_iter(["### ".bold().italic(), "Heading 3".bold().italic()]),
|
||||
Line::default(),
|
||||
Line::from_iter(["#### ".italic(), "Heading 4".italic()]),
|
||||
Line::default(),
|
||||
Line::from_iter(["##### ".italic(), "Heading 5".italic()]),
|
||||
Line::default(),
|
||||
Line::from_iter(["###### ".italic(), "Heading 6".italic()]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_single() {
|
||||
let text = render_markdown_text("> Blockquote");
|
||||
let expected = Text::from(Line::from_iter(["> ", "Blockquote"]).green());
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_soft_break() {
|
||||
// Soft break via lazy continuation should render as a new line in blockquotes.
|
||||
let text = render_markdown_text("> This is a blockquote\nwith a soft break\n");
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(
|
||||
lines,
|
||||
vec![
|
||||
"> This is a blockquote".to_string(),
|
||||
"> with a soft break".to_string()
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_multiple_with_break() {
|
||||
let text = render_markdown_text("> Blockquote 1\n\n> Blockquote 2\n");
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["> ", "Blockquote 1"]).green(),
|
||||
Line::default(),
|
||||
Line::from_iter(["> ", "Blockquote 2"]).green(),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_three_paragraphs_short_lines() {
|
||||
let md = "> one\n>\n> two\n>\n> three\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["> ", "one"]).green(),
|
||||
Line::from_iter(["> "]).green(),
|
||||
Line::from_iter(["> ", "two"]).green(),
|
||||
Line::from_iter(["> "]).green(),
|
||||
Line::from_iter(["> ", "three"]).green(),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_nested_two_levels() {
|
||||
let md = "> Level 1\n>> Level 2\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["> ", "Level 1"]).green(),
|
||||
Line::from_iter(["> "]).green(),
|
||||
Line::from_iter(["> ", "> ", "Level 2"]).green(),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_with_list_items() {
|
||||
let md = "> - item 1\n> - item 2\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["> ", "- ", "item 1"]).green(),
|
||||
Line::from_iter(["> ", "- ", "item 2"]).green(),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_with_ordered_list() {
|
||||
let md = "> 1. first\n> 2. second\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(vec![
|
||||
Span::from("> "),
|
||||
"1. ".light_blue(),
|
||||
Span::from("first"),
|
||||
])
|
||||
.green(),
|
||||
Line::from_iter(vec![
|
||||
Span::from("> "),
|
||||
"2. ".light_blue(),
|
||||
Span::from("second"),
|
||||
])
|
||||
.green(),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_list_then_nested_blockquote() {
|
||||
let md = "> - parent\n> > child\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["> ", "- ", "parent"]).green(),
|
||||
Line::from_iter(["> ", " ", "> ", "child"]).green(),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_item_with_inline_blockquote_on_same_line() {
|
||||
let md = "1. > quoted\n";
|
||||
let text = render_markdown_text(md);
|
||||
let mut lines = text.lines.iter();
|
||||
let first = lines.next().expect("one line");
|
||||
// Expect content to include the ordered marker, a space, "> ", and the text
|
||||
let s: String = first.spans.iter().map(|sp| sp.content.clone()).collect();
|
||||
assert_eq!(s, "1. > quoted");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_surrounded_by_blank_lines() {
|
||||
let md = "foo\n\n> bar\n\nbaz\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(
|
||||
lines,
|
||||
vec![
|
||||
"foo".to_string(),
|
||||
"".to_string(),
|
||||
"> bar".to_string(),
|
||||
"".to_string(),
|
||||
"baz".to_string(),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_in_ordered_list_on_next_line() {
|
||||
// Blockquote begins on a new line within an ordered list item; it should
|
||||
// render inline on the same marker line.
|
||||
let md = "1.\n > quoted\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(lines, vec!["1. > quoted".to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_in_unordered_list_on_next_line() {
|
||||
// Blockquote begins on a new line within an unordered list item; it should
|
||||
// render inline on the same marker line.
|
||||
let md = "-\n > quoted\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(lines, vec!["- > quoted".to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_two_paragraphs_inside_ordered_list_has_blank_line() {
|
||||
// Two blockquote paragraphs inside a list item should be separated by a blank line.
|
||||
let md = "1.\n > para 1\n >\n > para 2\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(
|
||||
lines,
|
||||
vec![
|
||||
"1. > para 1".to_string(),
|
||||
" > ".to_string(),
|
||||
" > para 2".to_string(),
|
||||
],
|
||||
"expected blockquote content to stay aligned after list marker"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_inside_nested_list() {
|
||||
let md = "1. A\n - B\n > inner\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(lines, vec!["1. A", " - B", " > inner"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_item_text_then_blockquote() {
|
||||
let md = "1. before\n > quoted\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(lines, vec!["1. before", " > quoted"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_item_blockquote_then_text() {
|
||||
let md = "1.\n > quoted\n after\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(lines, vec!["1. > quoted", " > after"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_item_text_blockquote_text() {
|
||||
let md = "1. before\n > quoted\n after\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(lines, vec!["1. before", " > quoted", " > after"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_with_heading_and_paragraph() {
|
||||
let md = "> # Heading\n> paragraph text\n";
|
||||
let text = render_markdown_text(md);
|
||||
// Validate on content shape; styling is handled elsewhere
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(
|
||||
lines,
|
||||
vec![
|
||||
"> # Heading".to_string(),
|
||||
"> ".to_string(),
|
||||
"> paragraph text".to_string(),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_heading_inherits_heading_style() {
|
||||
let text = render_markdown_text("> # test header\n> in blockquote\n");
|
||||
assert_eq!(
|
||||
text.lines,
|
||||
[
|
||||
Line::from_iter([
|
||||
"> ".into(),
|
||||
"# ".bold().underlined(),
|
||||
"test header".bold().underlined(),
|
||||
])
|
||||
.green(),
|
||||
Line::from_iter(["> "]).green(),
|
||||
Line::from_iter(["> ", "in blockquote"]).green(),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_with_code_block() {
|
||||
let md = "> ```\n> code\n> ```\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(lines, vec!["> code".to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blockquote_with_multiline_code_block() {
|
||||
let md = "> ```\n> first\n> second\n> ```\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(lines, vec!["> first", "> second"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nested_blockquote_with_inline_and_fenced_code() {
|
||||
/*
|
||||
let md = \"> Nested quote with code:\n\
|
||||
> > Inner quote and `inline code`\n\
|
||||
> >\n\
|
||||
> > ```\n\
|
||||
> > # fenced code inside a quote\n\
|
||||
> > echo \"hello from a quote\"\n\
|
||||
> > ```\n";
|
||||
*/
|
||||
let md = r#"> Nested quote with code:
|
||||
> > Inner quote and `inline code`
|
||||
> >
|
||||
> > ```
|
||||
> > # fenced code inside a quote
|
||||
> > echo "hello from a quote"
|
||||
> > ```
|
||||
"#;
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(
|
||||
lines,
|
||||
vec![
|
||||
"> Nested quote with code:".to_string(),
|
||||
"> ".to_string(),
|
||||
"> > Inner quote and inline code".to_string(),
|
||||
"> > ".to_string(),
|
||||
"> > # fenced code inside a quote".to_string(),
|
||||
"> > echo \"hello from a quote\"".to_string(),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_unordered_single() {
|
||||
let text = render_markdown_text("- List item 1\n");
|
||||
let expected = Text::from_iter([Line::from_iter(["- ", "List item 1"])]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_unordered_multiple() {
|
||||
let text = render_markdown_text("- List item 1\n- List item 2\n");
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["- ", "List item 1"]),
|
||||
Line::from_iter(["- ", "List item 2"]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_ordered() {
|
||||
let text = render_markdown_text("1. List item 1\n2. List item 2\n");
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["1. ".light_blue(), "List item 1".into()]),
|
||||
Line::from_iter(["2. ".light_blue(), "List item 2".into()]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_nested() {
|
||||
let text = render_markdown_text("- List item 1\n - Nested list item 1\n");
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["- ", "List item 1"]),
|
||||
Line::from_iter([" - ", "Nested list item 1"]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_ordered_custom_start() {
|
||||
let text = render_markdown_text("3. First\n4. Second\n");
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["3. ".light_blue(), "First".into()]),
|
||||
Line::from_iter(["4. ".light_blue(), "Second".into()]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nested_unordered_in_ordered() {
|
||||
let md = "1. Outer\n - Inner A\n - Inner B\n2. Next\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["1. ".light_blue(), "Outer".into()]),
|
||||
Line::from_iter([" - ", "Inner A"]),
|
||||
Line::from_iter([" - ", "Inner B"]),
|
||||
Line::from_iter(["2. ".light_blue(), "Next".into()]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nested_ordered_in_unordered() {
|
||||
let md = "- Outer\n 1. One\n 2. Two\n- Last\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["- ", "Outer"]),
|
||||
Line::from_iter([" 1. ".light_blue(), "One".into()]),
|
||||
Line::from_iter([" 2. ".light_blue(), "Two".into()]),
|
||||
Line::from_iter(["- ", "Last"]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loose_list_item_multiple_paragraphs() {
|
||||
let md = "1. First paragraph\n\n Second paragraph of same item\n\n2. Next item\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["1. ".light_blue(), "First paragraph".into()]),
|
||||
Line::default(),
|
||||
Line::from_iter([" ", "Second paragraph of same item"]),
|
||||
Line::from_iter(["2. ".light_blue(), "Next item".into()]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tight_item_with_soft_break() {
|
||||
let md = "- item line1\n item line2\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["- ", "item line1"]),
|
||||
Line::from_iter([" ", "item line2"]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deeply_nested_mixed_three_levels() {
|
||||
let md = "1. A\n - B\n 1. C\n2. D\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["1. ".light_blue(), "A".into()]),
|
||||
Line::from_iter([" - ", "B"]),
|
||||
Line::from_iter([" 1. ".light_blue(), "C".into()]),
|
||||
Line::from_iter(["2. ".light_blue(), "D".into()]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loose_items_due_to_blank_line_between_items() {
|
||||
let md = "1. First\n\n2. Second\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["1. ".light_blue(), "First".into()]),
|
||||
Line::from_iter(["2. ".light_blue(), "Second".into()]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mixed_tight_then_loose_in_one_list() {
|
||||
let md = "1. Tight\n\n2.\n Loose\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["1. ".light_blue(), "Tight".into()]),
|
||||
Line::from_iter(["2. ".light_blue(), "Loose".into()]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ordered_item_with_indented_continuation_is_tight() {
|
||||
let md = "1. Foo\n Bar\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["1. ".light_blue(), "Foo".into()]),
|
||||
Line::from_iter([" ", "Bar"]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn inline_code() {
|
||||
let text = render_markdown_text("Example of `Inline code`");
|
||||
let expected = Line::from_iter(["Example of ".into(), "Inline code".dim()]).into();
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn strong() {
|
||||
assert_eq!(
|
||||
render_markdown_text("**Strong**"),
|
||||
Text::from(Line::from("Strong".bold()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn emphasis() {
|
||||
assert_eq!(
|
||||
render_markdown_text("*Emphasis*"),
|
||||
Text::from(Line::from("Emphasis".italic()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn strikethrough() {
|
||||
assert_eq!(
|
||||
render_markdown_text("~~Strikethrough~~"),
|
||||
Text::from(Line::from("Strikethrough".crossed_out()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn strong_emphasis() {
|
||||
let text = render_markdown_text("**Strong *emphasis***");
|
||||
let expected = Text::from(Line::from_iter([
|
||||
"Strong ".bold(),
|
||||
"emphasis".bold().italic(),
|
||||
]));
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn link() {
|
||||
let text = render_markdown_text("[Link](https://example.com)");
|
||||
let expected = Text::from(Line::from_iter([
|
||||
"Link".into(),
|
||||
" (".into(),
|
||||
"https://example.com".cyan().underlined(),
|
||||
")".into(),
|
||||
]));
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn code_block_unhighlighted() {
|
||||
let text = render_markdown_text("```rust\nfn main() {}\n```\n");
|
||||
let expected = Text::from_iter([Line::from_iter(["", "fn main() {}"])]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn code_block_multiple_lines_root() {
|
||||
let md = "```\nfirst\nsecond\n```\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["", "first"]),
|
||||
Line::from_iter(["", "second"]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn code_block_indented() {
|
||||
let md = " function greet() {\n console.log(\"Hi\");\n }\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter([" ", "function greet() {"]),
|
||||
Line::from_iter([" ", " console.log(\"Hi\");"]),
|
||||
Line::from_iter([" ", "}"]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn horizontal_rule_renders_em_dashes() {
|
||||
let md = "Before\n\n---\n\nAfter\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(lines, vec!["Before", "", "———", "", "After"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn code_block_with_inner_triple_backticks_outer_four() {
|
||||
let md = r#"````text
|
||||
Here is a code block that shows another fenced block:
|
||||
|
||||
```md
|
||||
# Inside fence
|
||||
- bullet
|
||||
- `inline code`
|
||||
```
|
||||
````
|
||||
"#;
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(
|
||||
lines,
|
||||
vec![
|
||||
"Here is a code block that shows another fenced block:".to_string(),
|
||||
String::new(),
|
||||
"```md".to_string(),
|
||||
"# Inside fence".to_string(),
|
||||
"- bullet".to_string(),
|
||||
"- `inline code`".to_string(),
|
||||
"```".to_string(),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn code_block_inside_unordered_list_item_is_indented() {
|
||||
let md = "- Item\n\n ```\n code line\n ```\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(lines, vec!["- Item", "", " code line"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn code_block_multiple_lines_inside_unordered_list() {
|
||||
let md = "- Item\n\n ```\n first\n second\n ```\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(lines, vec!["- Item", "", " first", " second"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn code_block_inside_unordered_list_item_multiple_lines() {
|
||||
let md = "- Item\n\n ```\n first\n second\n ```\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(lines, vec!["- Item", "", " first", " second"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn markdown_render_complex_snapshot() {
|
||||
let md = r#"# H1: Markdown Streaming Test
|
||||
Intro paragraph with bold **text**, italic *text*, and inline code `x=1`.
|
||||
Combined bold-italic ***both*** and escaped asterisks \*literal\*.
|
||||
Auto-link: <https://example.com> and reference link [ref][r1].
|
||||
Link with title: [hover me](https://example.com "Example") and mailto <mailto:test@example.com>.
|
||||
Image: 
|
||||
> Blockquote level 1
|
||||
>> Blockquote level 2 with `inline code`
|
||||
- Unordered list item 1
|
||||
- Nested bullet with italics _inner_
|
||||
- Unordered list item 2 with ~~strikethrough~~
|
||||
1. Ordered item one
|
||||
2. Ordered item two with sublist:
|
||||
1) Alt-numbered subitem
|
||||
- [ ] Task: unchecked
|
||||
- [x] Task: checked with link [home](https://example.org)
|
||||
---
|
||||
Table below (alignment test):
|
||||
| Left | Center | Right |
|
||||
|:-----|:------:|------:|
|
||||
| a | b | c |
|
||||
Inline HTML: <sup>sup</sup> and <sub>sub</sub>.
|
||||
HTML block:
|
||||
<div style="border:1px solid #ccc;padding:2px">inline block</div>
|
||||
Escapes: \_underscores\_, backslash \\, ticks ``code with `backtick` inside``.
|
||||
Emoji shortcodes: :sparkles: :tada: (if supported).
|
||||
Hard break test (line ends with two spaces)
|
||||
Next line should be close to previous.
|
||||
Footnote reference here[^1] and another[^longnote].
|
||||
Horizontal rule with asterisks:
|
||||
***
|
||||
Fenced code block (JSON):
|
||||
```json
|
||||
{ "a": 1, "b": [true, false] }
|
||||
```
|
||||
Fenced code with tildes and triple backticks inside:
|
||||
~~~markdown
|
||||
To close ``` you need tildes.
|
||||
~~~
|
||||
Indented code block:
|
||||
for i in range(3): print(i)
|
||||
Definition-like list:
|
||||
Term
|
||||
: Definition with `code`.
|
||||
Character entities: & < > " '
|
||||
[^1]: This is the first footnote.
|
||||
[^longnote]: A longer footnote with a link to [Rust](https://www.rust-lang.org/).
|
||||
Escaped pipe in text: a \| b \| c.
|
||||
URL with parentheses: [link](https://example.com/path_(with)_parens).
|
||||
[r1]: https://example.com/ref "Reference link title"
|
||||
"#;
|
||||
|
||||
let text = render_markdown_text(md);
|
||||
// Convert to plain text lines for snapshot (ignore styles)
|
||||
let rendered = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
assert_snapshot!(rendered);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ordered_item_with_code_block_and_nested_bullet() {
|
||||
let md = "1. **item 1**\n\n2. **item 2**\n ```\n code\n ```\n - `PROCESS_START` (a `OnceLock<Instant>`) keeps the start time for the entire process.\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|line| {
|
||||
line.spans
|
||||
.iter()
|
||||
.map(|span| span.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(
|
||||
lines,
|
||||
vec![
|
||||
"1. item 1".to_string(),
|
||||
"2. item 2".to_string(),
|
||||
String::new(),
|
||||
" code".to_string(),
|
||||
" - PROCESS_START (a OnceLock<Instant>) keeps the start time for the entire process.".to_string(),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nested_five_levels_mixed_lists() {
|
||||
let md = "1. First\n - Second level\n 1. Third level (ordered)\n - Fourth level (bullet)\n - Fifth level to test indent consistency\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["1. ".light_blue(), "First".into()]),
|
||||
Line::from_iter([" - ", "Second level"]),
|
||||
Line::from_iter([" 1. ".light_blue(), "Third level (ordered)".into()]),
|
||||
Line::from_iter([" - ", "Fourth level (bullet)"]),
|
||||
Line::from_iter([
|
||||
" - ",
|
||||
"Fifth level to test indent consistency",
|
||||
]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn html_inline_is_verbatim() {
|
||||
let md = "Hello <span>world</span>!";
|
||||
let text = render_markdown_text(md);
|
||||
let expected: Text = Line::from_iter(["Hello ", "<span>", "world", "</span>", "!"]).into();
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn html_block_is_verbatim_multiline() {
|
||||
let md = "<div>\n <span>hi</span>\n</div>\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["<div>"]),
|
||||
Line::from_iter([" <span>hi</span>"]),
|
||||
Line::from_iter(["</div>"]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn html_in_tight_ordered_item_soft_breaks_with_space() {
|
||||
let md = "1. Foo\n <i>Bar</i>\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["1. ".light_blue(), "Foo".into()]),
|
||||
Line::from_iter([" ", "<i>", "Bar", "</i>"]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn html_continuation_paragraph_in_unordered_item_indented() {
|
||||
let md = "- Item\n\n <em>continued</em>\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["- ", "Item"]),
|
||||
Line::default(),
|
||||
Line::from_iter([" ", "<em>", "continued", "</em>"]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unordered_item_continuation_paragraph_is_indented() {
|
||||
let md = "- Intro\n\n Continuation paragraph line 1\n Continuation paragraph line 2\n";
|
||||
let text = render_markdown_text(md);
|
||||
let lines: Vec<String> = text
|
||||
.lines
|
||||
.iter()
|
||||
.map(|line| {
|
||||
line.spans
|
||||
.iter()
|
||||
.map(|span| span.content.clone())
|
||||
.collect::<String>()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(
|
||||
lines,
|
||||
vec![
|
||||
"- Intro".to_string(),
|
||||
String::new(),
|
||||
" Continuation paragraph line 1".to_string(),
|
||||
" Continuation paragraph line 2".to_string(),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ordered_item_continuation_paragraph_is_indented() {
|
||||
let md = "1. Intro\n\n More details about intro\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["1. ".light_blue(), "Intro".into()]),
|
||||
Line::default(),
|
||||
Line::from_iter([" ", "More details about intro"]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nested_item_continuation_paragraph_is_indented() {
|
||||
let md = "1. A\n - B\n\n Continuation for B\n2. C\n";
|
||||
let text = render_markdown_text(md);
|
||||
let expected = Text::from_iter([
|
||||
Line::from_iter(["1. ".light_blue(), "A".into()]),
|
||||
Line::from_iter([" - ", "B"]),
|
||||
Line::default(),
|
||||
Line::from_iter([" ", "Continuation for B"]),
|
||||
Line::from_iter(["2. ".light_blue(), "C".into()]),
|
||||
]);
|
||||
assert_eq!(text, expected);
|
||||
}
|
||||
@@ -4,8 +4,6 @@ use codex_core::config::Config;
|
||||
use ratatui::text::Line;
|
||||
|
||||
use crate::markdown;
|
||||
use crate::render::markdown_utils::is_inside_unclosed_fence;
|
||||
use crate::render::markdown_utils::strip_empty_fenced_code_blocks;
|
||||
|
||||
/// Newline-gated accumulator that renders markdown and commits only fully
|
||||
/// completed logical lines.
|
||||
@@ -42,6 +40,7 @@ impl MarkdownStreamCollector {
|
||||
}
|
||||
|
||||
pub fn push_delta(&mut self, delta: &str) {
|
||||
tracing::trace!("push_delta: {delta:?}");
|
||||
self.buffer.push_str(delta);
|
||||
}
|
||||
|
||||
@@ -49,14 +48,15 @@ impl MarkdownStreamCollector {
|
||||
/// since the last commit. When the buffer does not end with a newline, the
|
||||
/// final rendered line is considered incomplete and is not emitted.
|
||||
pub fn commit_complete_lines(&mut self, config: &Config) -> Vec<Line<'static>> {
|
||||
// In non-test builds, unwrap an outer ```markdown fence during commit as well,
|
||||
// so fence markers never appear in streamed history.
|
||||
let source = unwrap_markdown_language_fence_if_enabled(self.buffer.clone());
|
||||
let source = strip_empty_fenced_code_blocks(&source);
|
||||
|
||||
let source = self.buffer.clone();
|
||||
let last_newline_idx = source.rfind('\n');
|
||||
let source = if let Some(last_newline_idx) = last_newline_idx {
|
||||
source[..=last_newline_idx].to_string()
|
||||
} else {
|
||||
return Vec::new();
|
||||
};
|
||||
let mut rendered: Vec<Line<'static>> = Vec::new();
|
||||
markdown::append_markdown(&source, &mut rendered, config);
|
||||
|
||||
let mut complete_line_count = rendered.len();
|
||||
if complete_line_count > 0
|
||||
&& crate::render::line_utils::is_blank_line_spaces_only(
|
||||
@@ -65,87 +65,12 @@ impl MarkdownStreamCollector {
|
||||
{
|
||||
complete_line_count -= 1;
|
||||
}
|
||||
// Heuristic: if the buffer ends with a double newline and the last non-blank
|
||||
// rendered line looks like a list bullet with inline content (e.g., "- item"),
|
||||
// defer committing that line. Subsequent context (e.g., another list item)
|
||||
// can cause the renderer to split the bullet marker and text into separate
|
||||
// logical lines ("- " then "item"), which would otherwise duplicate content.
|
||||
if self.buffer.ends_with("\n\n") && complete_line_count > 0 {
|
||||
let last = &rendered[complete_line_count - 1];
|
||||
let mut text = String::new();
|
||||
for s in &last.spans {
|
||||
text.push_str(&s.content);
|
||||
}
|
||||
if text.starts_with("- ") && text.trim() != "-" {
|
||||
complete_line_count = complete_line_count.saturating_sub(1);
|
||||
}
|
||||
}
|
||||
if !self.buffer.ends_with('\n') {
|
||||
complete_line_count = complete_line_count.saturating_sub(1);
|
||||
// If we're inside an unclosed fenced code block, also drop the
|
||||
// last rendered line to avoid committing a partial code line.
|
||||
if is_inside_unclosed_fence(&source) {
|
||||
complete_line_count = complete_line_count.saturating_sub(1);
|
||||
}
|
||||
// If the next (incomplete) line appears to begin a list item,
|
||||
// also defer the previous completed line because the renderer may
|
||||
// retroactively treat it as part of the list (e.g., ordered list item 1).
|
||||
if let Some(last_nl) = source.rfind('\n') {
|
||||
let tail = &source[last_nl + 1..];
|
||||
if starts_with_list_marker(tail) {
|
||||
complete_line_count = complete_line_count.saturating_sub(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Conservatively withhold trailing list-like lines (unordered or ordered)
|
||||
// because streaming mid-item can cause the renderer to later split or
|
||||
// restructure them (e.g., duplicating content or separating the marker).
|
||||
// Only defers lines at the end of the out slice so previously committed
|
||||
// lines remain stable.
|
||||
if complete_line_count > self.committed_line_count {
|
||||
let mut safe_count = complete_line_count;
|
||||
while safe_count > self.committed_line_count {
|
||||
let l = &rendered[safe_count - 1];
|
||||
let mut text = String::new();
|
||||
for s in &l.spans {
|
||||
text.push_str(&s.content);
|
||||
}
|
||||
let listish = is_potentially_volatile_list_line(&text);
|
||||
if listish {
|
||||
safe_count -= 1;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
complete_line_count = safe_count;
|
||||
}
|
||||
|
||||
if self.committed_line_count >= complete_line_count {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let out_slice = &rendered[self.committed_line_count..complete_line_count];
|
||||
// Strong correctness: while a fenced code block is open (no closing fence yet),
|
||||
// do not emit any new lines from inside it. Wait until the fence closes to emit
|
||||
// the entire block together. This avoids stray backticks and misformatted content.
|
||||
if is_inside_unclosed_fence(&source) {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
// Additional conservative hold-back: if exactly one short, plain word
|
||||
// line would be emitted, defer it. This avoids committing a lone word
|
||||
// that might become the first ordered-list item once the next delta
|
||||
// arrives (e.g., next line starts with "2 " or "2. ").
|
||||
if out_slice.len() == 1 {
|
||||
let mut s = String::new();
|
||||
for sp in &out_slice[0].spans {
|
||||
s.push_str(&sp.content);
|
||||
}
|
||||
if is_short_plain_word(&s) {
|
||||
return Vec::new();
|
||||
}
|
||||
}
|
||||
|
||||
let out = out_slice.to_vec();
|
||||
self.committed_line_count = complete_line_count;
|
||||
@@ -157,12 +82,19 @@ impl MarkdownStreamCollector {
|
||||
/// for rendering. Optionally unwraps ```markdown language fences in
|
||||
/// non-test builds.
|
||||
pub fn finalize_and_drain(&mut self, config: &Config) -> Vec<Line<'static>> {
|
||||
let mut source: String = self.buffer.clone();
|
||||
let raw_buffer = self.buffer.clone();
|
||||
let mut source: String = raw_buffer.clone();
|
||||
if !source.ends_with('\n') {
|
||||
source.push('\n');
|
||||
}
|
||||
let source = unwrap_markdown_language_fence_if_enabled(source);
|
||||
let source = strip_empty_fenced_code_blocks(&source);
|
||||
tracing::debug!(
|
||||
raw_len = raw_buffer.len(),
|
||||
source_len = source.len(),
|
||||
"markdown finalize (raw length: {}, rendered length: {})",
|
||||
raw_buffer.len(),
|
||||
source.len()
|
||||
);
|
||||
tracing::trace!("markdown finalize (raw source):\n---\n{source}\n---");
|
||||
|
||||
let mut rendered: Vec<Line<'static>> = Vec::new();
|
||||
markdown::append_markdown(&source, &mut rendered, config);
|
||||
@@ -179,122 +111,6 @@ impl MarkdownStreamCollector {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_potentially_volatile_list_line(text: &str) -> bool {
|
||||
let t = text.trim_end();
|
||||
if t == "-" || t == "*" || t == "- " || t == "* " {
|
||||
return true;
|
||||
}
|
||||
if t.starts_with("- ") || t.starts_with("* ") {
|
||||
return true;
|
||||
}
|
||||
// ordered list like "1. " or "23. "
|
||||
let mut it = t.chars().peekable();
|
||||
let mut saw_digit = false;
|
||||
while let Some(&ch) = it.peek() {
|
||||
if ch.is_ascii_digit() {
|
||||
saw_digit = true;
|
||||
it.next();
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if saw_digit && it.peek() == Some(&'.') {
|
||||
// consume '.'
|
||||
it.next();
|
||||
if it.peek() == Some(&' ') {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn starts_with_list_marker(text: &str) -> bool {
|
||||
let t = text.trim_start();
|
||||
if t.starts_with("- ") || t.starts_with("* ") || t.starts_with("-\t") || t.starts_with("*\t") {
|
||||
return true;
|
||||
}
|
||||
// ordered list marker like "1 ", "1. ", "23 ", "23. "
|
||||
let mut it = t.chars().peekable();
|
||||
let mut saw_digit = false;
|
||||
while let Some(&ch) = it.peek() {
|
||||
if ch.is_ascii_digit() {
|
||||
saw_digit = true;
|
||||
it.next();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !saw_digit {
|
||||
return false;
|
||||
}
|
||||
match it.peek() {
|
||||
Some('.') => {
|
||||
it.next();
|
||||
matches!(it.peek(), Some(' '))
|
||||
}
|
||||
Some(' ') => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_short_plain_word(s: &str) -> bool {
|
||||
let t = s.trim();
|
||||
if t.is_empty() || t.len() > 5 {
|
||||
return false;
|
||||
}
|
||||
t.chars().all(|c| c.is_alphanumeric())
|
||||
}
|
||||
|
||||
/// fence helpers are provided by `crate::render::markdown_utils`
|
||||
#[cfg(test)]
|
||||
fn unwrap_markdown_language_fence_if_enabled(s: String) -> String {
|
||||
// In tests, keep content exactly as provided to simplify assertions.
|
||||
s
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
fn unwrap_markdown_language_fence_if_enabled(s: String) -> String {
|
||||
// Best-effort unwrap of a single outer fenced markdown block.
|
||||
// Recognizes common forms like ```markdown, ```md (any case), optional
|
||||
// surrounding whitespace, and flexible trailing newlines/CRLF.
|
||||
// If the block is not recognized, return the input unchanged.
|
||||
let lines = s.lines().collect::<Vec<_>>();
|
||||
if lines.len() < 2 {
|
||||
return s;
|
||||
}
|
||||
|
||||
// Identify opening fence and language.
|
||||
let open = lines.first().map(|l| l.trim_start()).unwrap_or("");
|
||||
if !open.starts_with("```") {
|
||||
return s;
|
||||
}
|
||||
let lang = open.trim_start_matches("```").trim();
|
||||
let is_markdown_lang = lang.eq_ignore_ascii_case("markdown") || lang.eq_ignore_ascii_case("md");
|
||||
if !is_markdown_lang {
|
||||
return s;
|
||||
}
|
||||
|
||||
// Find the last non-empty line and ensure it is a closing fence.
|
||||
let mut last_idx = lines.len() - 1;
|
||||
while last_idx > 0 && lines[last_idx].trim().is_empty() {
|
||||
last_idx -= 1;
|
||||
}
|
||||
if lines[last_idx].trim() != "```" {
|
||||
return s;
|
||||
}
|
||||
|
||||
// Reconstruct the inner content between the fences.
|
||||
let mut out = String::new();
|
||||
for l in lines.iter().take(last_idx).skip(1) {
|
||||
out.push_str(l);
|
||||
out.push('\n');
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub(crate) struct StepResult {
|
||||
pub history: Vec<Line<'static>>, // lines to insert into history this step
|
||||
}
|
||||
@@ -373,6 +189,7 @@ mod tests {
|
||||
use super::*;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use ratatui::style::Color;
|
||||
|
||||
fn test_config() -> Config {
|
||||
let overrides = ConfigOverrides {
|
||||
@@ -406,6 +223,125 @@ mod tests {
|
||||
assert_eq!(out.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn e2e_stream_blockquote_simple_is_green() {
|
||||
let cfg = test_config();
|
||||
let out = super::simulate_stream_markdown_for_tests(&["> Hello\n"], true, &cfg);
|
||||
assert_eq!(out.len(), 1);
|
||||
let l = &out[0];
|
||||
assert_eq!(
|
||||
l.style.fg,
|
||||
Some(Color::Green),
|
||||
"expected blockquote line fg green, got {:?}",
|
||||
l.style.fg
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn e2e_stream_blockquote_nested_is_green() {
|
||||
let cfg = test_config();
|
||||
let out =
|
||||
super::simulate_stream_markdown_for_tests(&["> Level 1\n>> Level 2\n"], true, &cfg);
|
||||
// Filter out any blank lines that may be inserted at paragraph starts.
|
||||
let non_blank: Vec<_> = out
|
||||
.into_iter()
|
||||
.filter(|l| {
|
||||
let s = l
|
||||
.spans
|
||||
.iter()
|
||||
.map(|sp| sp.content.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
let t = s.trim();
|
||||
// Ignore quote-only blank lines like ">" inserted at paragraph boundaries.
|
||||
!(t.is_empty() || t == ">")
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(non_blank.len(), 2);
|
||||
assert_eq!(non_blank[0].style.fg, Some(Color::Green));
|
||||
assert_eq!(non_blank[1].style.fg, Some(Color::Green));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn e2e_stream_blockquote_with_list_items_is_green() {
|
||||
let cfg = test_config();
|
||||
let out =
|
||||
super::simulate_stream_markdown_for_tests(&["> - item 1\n> - item 2\n"], true, &cfg);
|
||||
assert_eq!(out.len(), 2);
|
||||
assert_eq!(out[0].style.fg, Some(Color::Green));
|
||||
assert_eq!(out[1].style.fg, Some(Color::Green));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn e2e_stream_nested_mixed_lists_ordered_marker_is_light_blue() {
|
||||
let cfg = test_config();
|
||||
let md = [
|
||||
"1. First\n",
|
||||
" - Second level\n",
|
||||
" 1. Third level (ordered)\n",
|
||||
" - Fourth level (bullet)\n",
|
||||
" - Fifth level to test indent consistency\n",
|
||||
];
|
||||
let out = super::simulate_stream_markdown_for_tests(&md, true, &cfg);
|
||||
// Find the line that contains the third-level ordered text
|
||||
let find_idx = out.iter().position(|l| {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<String>()
|
||||
.contains("Third level (ordered)")
|
||||
});
|
||||
let idx = find_idx.expect("expected third-level ordered line");
|
||||
let line = &out[idx];
|
||||
// Expect at least one span on this line to be styled light blue
|
||||
let has_light_blue = line
|
||||
.spans
|
||||
.iter()
|
||||
.any(|s| s.style.fg == Some(ratatui::style::Color::LightBlue));
|
||||
assert!(
|
||||
has_light_blue,
|
||||
"expected an ordered-list marker span with light blue fg on: {line:?}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn e2e_stream_blockquote_wrap_preserves_green_style() {
|
||||
let cfg = test_config();
|
||||
let long = "> This is a very long quoted line that should wrap across multiple columns to verify style preservation.";
|
||||
let out = super::simulate_stream_markdown_for_tests(&[long, "\n"], true, &cfg);
|
||||
// Wrap to a narrow width to force multiple output lines.
|
||||
let wrapped = crate::wrapping::word_wrap_lines(
|
||||
out.iter().collect::<Vec<_>>(),
|
||||
crate::wrapping::RtOptions::new(24),
|
||||
);
|
||||
// Filter out purely blank lines
|
||||
let non_blank: Vec<_> = wrapped
|
||||
.into_iter()
|
||||
.filter(|l| {
|
||||
let s = l
|
||||
.spans
|
||||
.iter()
|
||||
.map(|sp| sp.content.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
!s.trim().is_empty()
|
||||
})
|
||||
.collect();
|
||||
assert!(
|
||||
non_blank.len() >= 2,
|
||||
"expected wrapped blockquote to span multiple lines"
|
||||
);
|
||||
for (i, l) in non_blank.iter().enumerate() {
|
||||
assert_eq!(
|
||||
l.style.fg,
|
||||
Some(Color::Green),
|
||||
"wrapped line {} should preserve green style, got {:?}",
|
||||
i,
|
||||
l.style.fg
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn heading_starts_on_new_line_when_following_paragraph() {
|
||||
let cfg = test_config();
|
||||
@@ -490,7 +426,7 @@ mod tests {
|
||||
.collect();
|
||||
assert_eq!(
|
||||
s1,
|
||||
vec!["Sounds good!", ""],
|
||||
vec!["Sounds good!"],
|
||||
"expected paragraph followed by blank separator before heading chunk"
|
||||
);
|
||||
|
||||
@@ -509,7 +445,7 @@ mod tests {
|
||||
.collect();
|
||||
assert_eq!(
|
||||
s2,
|
||||
vec!["## Adding Bird subcommand"],
|
||||
vec!["", "## Adding Bird subcommand"],
|
||||
"expected the heading line only on the final commit"
|
||||
);
|
||||
|
||||
@@ -531,18 +467,6 @@ mod tests {
|
||||
vec!["Hello."],
|
||||
"unexpected markdown lines: {rendered_strings:?}"
|
||||
);
|
||||
|
||||
let line_to_string = |l: &ratatui::text::Line<'_>| -> String {
|
||||
l.spans
|
||||
.iter()
|
||||
.map(|s| s.content.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join("")
|
||||
};
|
||||
|
||||
assert_eq!(line_to_string(&out1[0]), "Sounds good!");
|
||||
assert_eq!(line_to_string(&out1[1]), "");
|
||||
assert_eq!(line_to_string(&out2[0]), "## Adding Bird subcommand");
|
||||
}
|
||||
|
||||
fn lines_to_plain_strings(lines: &[ratatui::text::Line<'_>]) -> Vec<String> {
|
||||
@@ -560,35 +484,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn lists_and_fences_commit_without_duplication() {
|
||||
let cfg = test_config();
|
||||
|
||||
// List case
|
||||
let deltas = vec!["- a\n- ", "b\n- c\n"];
|
||||
let streamed = simulate_stream_markdown_for_tests(&deltas, true, &cfg);
|
||||
let streamed_str = lines_to_plain_strings(&streamed);
|
||||
|
||||
let mut rendered_all: Vec<ratatui::text::Line<'static>> = Vec::new();
|
||||
crate::markdown::append_markdown("- a\n- b\n- c\n", &mut rendered_all, &cfg);
|
||||
let rendered_all_str = lines_to_plain_strings(&rendered_all);
|
||||
|
||||
assert_eq!(
|
||||
streamed_str, rendered_all_str,
|
||||
"list streaming should equal full render without duplication"
|
||||
);
|
||||
assert_streamed_equals_full(&["- a\n- ", "b\n- c\n"]);
|
||||
|
||||
// Fenced code case: stream in small chunks
|
||||
let deltas2 = vec!["```", "\nco", "de 1\ncode 2\n", "```\n"];
|
||||
let streamed2 = simulate_stream_markdown_for_tests(&deltas2, true, &cfg);
|
||||
let streamed2_str = lines_to_plain_strings(&streamed2);
|
||||
|
||||
let mut rendered_all2: Vec<ratatui::text::Line<'static>> = Vec::new();
|
||||
crate::markdown::append_markdown("```\ncode 1\ncode 2\n```\n", &mut rendered_all2, &cfg);
|
||||
let rendered_all2_str = lines_to_plain_strings(&rendered_all2);
|
||||
|
||||
assert_eq!(
|
||||
streamed2_str, rendered_all2_str,
|
||||
"fence streaming should equal full render without duplication"
|
||||
);
|
||||
assert_streamed_equals_full(&["```", "\nco", "de 1\ncode 2\n", "```\n"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -622,6 +522,56 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn e2e_stream_deep_nested_third_level_marker_is_light_blue() {
|
||||
let cfg = test_config();
|
||||
let md = "1. First\n - Second level\n 1. Third level (ordered)\n - Fourth level (bullet)\n - Fifth level to test indent consistency\n";
|
||||
let streamed = super::simulate_stream_markdown_for_tests(&[md], true, &cfg);
|
||||
let streamed_strs = lines_to_plain_strings(&streamed);
|
||||
|
||||
// Locate the third-level line in the streamed output; avoid relying on exact indent.
|
||||
let target_suffix = "1. Third level (ordered)";
|
||||
let mut found = None;
|
||||
for line in &streamed {
|
||||
let s: String = line.spans.iter().map(|sp| sp.content.clone()).collect();
|
||||
if s.contains(target_suffix) {
|
||||
found = Some(line.clone());
|
||||
break;
|
||||
}
|
||||
}
|
||||
let line = found.unwrap_or_else(|| {
|
||||
panic!("expected to find the third-level ordered list line; got: {streamed_strs:?}")
|
||||
});
|
||||
|
||||
// The marker (including indent and "1.") is expected to be in the first span
|
||||
// and colored LightBlue; following content should be default color.
|
||||
assert!(
|
||||
!line.spans.is_empty(),
|
||||
"expected non-empty spans for the third-level line"
|
||||
);
|
||||
let marker_span = &line.spans[0];
|
||||
assert_eq!(
|
||||
marker_span.style.fg,
|
||||
Some(Color::LightBlue),
|
||||
"expected LightBlue 3rd-level ordered marker, got {:?}",
|
||||
marker_span.style.fg
|
||||
);
|
||||
// Find the first non-empty non-space content span and verify it is default color.
|
||||
let mut content_fg = None;
|
||||
for sp in &line.spans[1..] {
|
||||
let t = sp.content.trim();
|
||||
if !t.is_empty() {
|
||||
content_fg = Some(sp.style.fg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
content_fg.flatten(),
|
||||
None,
|
||||
"expected default color for 3rd-level content, got {content_fg:?}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_fenced_block_is_dropped_and_separator_preserved_before_heading() {
|
||||
let cfg = test_config();
|
||||
@@ -768,16 +718,12 @@ mod tests {
|
||||
let expected = vec![
|
||||
"Loose vs. tight list items:".to_string(),
|
||||
"".to_string(),
|
||||
"1. ".to_string(),
|
||||
"Tight item".to_string(),
|
||||
"2. ".to_string(),
|
||||
"Another tight item".to_string(),
|
||||
"3. ".to_string(),
|
||||
"Loose item with its own paragraph.".to_string(),
|
||||
"1. Tight item".to_string(),
|
||||
"2. Another tight item".to_string(),
|
||||
"3. Loose item with its own paragraph.".to_string(),
|
||||
"".to_string(),
|
||||
"This paragraph belongs to the same list item.".to_string(),
|
||||
"4. ".to_string(),
|
||||
"Second loose item with a nested list after a blank line.".to_string(),
|
||||
" This paragraph belongs to the same list item.".to_string(),
|
||||
"4. Second loose item with a nested list after a blank line.".to_string(),
|
||||
" - Nested bullet under a loose item".to_string(),
|
||||
" - Another nested bullet".to_string(),
|
||||
];
|
||||
@@ -788,63 +734,39 @@ mod tests {
|
||||
}
|
||||
|
||||
// Targeted tests derived from fuzz findings. Each asserts streamed == full render.
|
||||
|
||||
#[test]
|
||||
fn fuzz_class_bare_dash_then_task_item() {
|
||||
fn assert_streamed_equals_full(deltas: &[&str]) {
|
||||
let cfg = test_config();
|
||||
// Case similar to: ["two\n", "- \n* [x] done "]
|
||||
let deltas = vec!["two\n", "- \n* [x] done \n"];
|
||||
let streamed = simulate_stream_markdown_for_tests(&deltas, true, &cfg);
|
||||
let streamed = simulate_stream_markdown_for_tests(deltas, true, &cfg);
|
||||
let streamed_strs = lines_to_plain_strings(&streamed);
|
||||
let full: String = deltas.iter().copied().collect();
|
||||
let mut rendered: Vec<ratatui::text::Line<'static>> = Vec::new();
|
||||
crate::markdown::append_markdown(&full, &mut rendered, &cfg);
|
||||
let rendered_strs = lines_to_plain_strings(&rendered);
|
||||
assert_eq!(streamed_strs, rendered_strs);
|
||||
assert_eq!(streamed_strs, rendered_strs, "full:\n---\n{full}\n---");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fuzz_class_bullet_duplication_variant_1() {
|
||||
let cfg = test_config();
|
||||
// Case similar to: ["aph.\n- let one\n- bull", "et two\n\n second paragraph "]
|
||||
let deltas = vec!["aph.\n- let one\n- bull", "et two\n\n second paragraph \n"];
|
||||
let streamed = simulate_stream_markdown_for_tests(&deltas, true, &cfg);
|
||||
let streamed_strs = lines_to_plain_strings(&streamed);
|
||||
let full: String = deltas.iter().copied().collect();
|
||||
let mut rendered: Vec<ratatui::text::Line<'static>> = Vec::new();
|
||||
crate::markdown::append_markdown(&full, &mut rendered, &cfg);
|
||||
let rendered_strs = lines_to_plain_strings(&rendered);
|
||||
assert_eq!(streamed_strs, rendered_strs);
|
||||
assert_streamed_equals_full(&[
|
||||
"aph.\n- let one\n- bull",
|
||||
"et two\n\n second paragraph \n",
|
||||
]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fuzz_class_bullet_duplication_variant_2() {
|
||||
let cfg = test_config();
|
||||
// Case similar to: ["- e\n c", "e\n- bullet two\n\n second paragraph in bullet two\n"]
|
||||
let deltas = vec![
|
||||
assert_streamed_equals_full(&[
|
||||
"- e\n c",
|
||||
"e\n- bullet two\n\n second paragraph in bullet two\n",
|
||||
];
|
||||
let streamed = simulate_stream_markdown_for_tests(&deltas, true, &cfg);
|
||||
let streamed_strs = lines_to_plain_strings(&streamed);
|
||||
let full: String = deltas.iter().copied().collect();
|
||||
let mut rendered: Vec<ratatui::text::Line<'static>> = Vec::new();
|
||||
crate::markdown::append_markdown(&full, &mut rendered, &cfg);
|
||||
let rendered_strs = lines_to_plain_strings(&rendered);
|
||||
assert_eq!(streamed_strs, rendered_strs);
|
||||
]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fuzz_class_ordered_list_split_weirdness() {
|
||||
let cfg = test_config();
|
||||
// Case similar to: ["one\n2", " two\n- \n* [x] d"]
|
||||
let deltas = vec!["one\n2", " two\n- \n* [x] d\n"];
|
||||
let streamed = simulate_stream_markdown_for_tests(&deltas, true, &cfg);
|
||||
let streamed_strs = lines_to_plain_strings(&streamed);
|
||||
let full: String = deltas.iter().copied().collect();
|
||||
let mut rendered: Vec<ratatui::text::Line<'static>> = Vec::new();
|
||||
crate::markdown::append_markdown(&full, &mut rendered, &cfg);
|
||||
let rendered_strs = lines_to_plain_strings(&rendered);
|
||||
assert_eq!(streamed_strs, rendered_strs);
|
||||
fn streaming_html_block_then_text_matches_full() {
|
||||
assert_streamed_equals_full(&[
|
||||
"HTML block:\n",
|
||||
"<div>inline block</div>\n",
|
||||
"more stuff\n",
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::auth::CLIENT_ID;
|
||||
use codex_core::config::Config;
|
||||
use codex_login::ServerOptions;
|
||||
use codex_login::ShutdownHandle;
|
||||
use codex_login::run_login_server;
|
||||
@@ -114,7 +113,6 @@ pub(crate) struct AuthModeWidget {
|
||||
pub login_status: LoginStatus,
|
||||
pub preferred_auth_method: AuthMode,
|
||||
pub auth_manager: Arc<AuthManager>,
|
||||
pub config: Config,
|
||||
}
|
||||
|
||||
impl AuthModeWidget {
|
||||
@@ -316,11 +314,7 @@ impl AuthModeWidget {
|
||||
}
|
||||
|
||||
self.error = None;
|
||||
let opts = ServerOptions::new(
|
||||
self.codex_home.clone(),
|
||||
CLIENT_ID.to_string(),
|
||||
self.config.responses_originator_header.clone(),
|
||||
);
|
||||
let opts = ServerOptions::new(self.codex_home.clone(), CLIENT_ID.to_string());
|
||||
match run_login_server(opts) {
|
||||
Ok(child) => {
|
||||
let sign_in_state = self.sign_in_state.clone();
|
||||
|
||||
@@ -85,7 +85,6 @@ impl OnboardingScreen {
|
||||
login_status,
|
||||
auth_manager,
|
||||
preferred_auth_method,
|
||||
config,
|
||||
}))
|
||||
}
|
||||
let is_git_repo = get_git_repo_root(&cwd).is_some();
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
/// Returns true if the provided text contains an unclosed fenced code block
|
||||
/// (opened by ``` or ~~~, closed by a matching fence on its own line).
|
||||
pub fn is_inside_unclosed_fence(s: &str) -> bool {
|
||||
let mut open = false;
|
||||
for line in s.lines() {
|
||||
let t = line.trim_start();
|
||||
if t.starts_with("```") || t.starts_with("~~~") {
|
||||
if !open {
|
||||
open = true;
|
||||
} else {
|
||||
// closing fence on same pattern toggles off
|
||||
open = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
open
|
||||
}
|
||||
|
||||
/// Remove fenced code blocks that contain no content (whitespace-only) to avoid
|
||||
/// streaming empty code blocks like ```lang\n``` or ```\n```.
|
||||
pub fn strip_empty_fenced_code_blocks(s: &str) -> String {
|
||||
// Only remove complete fenced blocks that contain no non-whitespace content.
|
||||
// Leave all other content unchanged to avoid affecting partial streams.
|
||||
let lines: Vec<&str> = s.lines().collect();
|
||||
let mut out = String::with_capacity(s.len());
|
||||
let mut i = 0usize;
|
||||
while i < lines.len() {
|
||||
let line = lines[i];
|
||||
let trimmed_start = line.trim_start();
|
||||
let fence_token = if trimmed_start.starts_with("```") {
|
||||
"```"
|
||||
} else if trimmed_start.starts_with("~~~") {
|
||||
"~~~"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
if !fence_token.is_empty() {
|
||||
// Find a matching closing fence on its own line.
|
||||
let mut j = i + 1;
|
||||
let mut has_content = false;
|
||||
let mut found_close = false;
|
||||
while j < lines.len() {
|
||||
let l = lines[j];
|
||||
if l.trim() == fence_token {
|
||||
found_close = true;
|
||||
break;
|
||||
}
|
||||
if !l.trim().is_empty() {
|
||||
has_content = true;
|
||||
}
|
||||
j += 1;
|
||||
}
|
||||
if found_close && !has_content {
|
||||
// Drop i..=j and insert at most a single blank separator line.
|
||||
if !out.ends_with('\n') {
|
||||
out.push('\n');
|
||||
}
|
||||
i = j + 1;
|
||||
continue;
|
||||
}
|
||||
// Not an empty fenced block; emit as-is.
|
||||
out.push_str(line);
|
||||
out.push('\n');
|
||||
i += 1;
|
||||
} else {
|
||||
out.push_str(line);
|
||||
out.push('\n');
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
@@ -1,3 +1,2 @@
|
||||
pub mod highlight;
|
||||
pub mod line_utils;
|
||||
pub mod markdown_utils;
|
||||
|
||||
@@ -2,7 +2,6 @@ use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use chrono::DateTime;
|
||||
use chrono::TimeZone;
|
||||
use chrono::Utc;
|
||||
use codex_core::ConversationItem;
|
||||
use codex_core::ConversationsPage;
|
||||
@@ -255,19 +254,10 @@ impl PickerState {
|
||||
}
|
||||
|
||||
fn to_rows(page: ConversationsPage) -> Vec<Row> {
|
||||
use std::cmp::Reverse;
|
||||
let mut rows: Vec<Row> = page
|
||||
.items
|
||||
.into_iter()
|
||||
.filter_map(|it| head_to_row(&it))
|
||||
.collect();
|
||||
// Ensure newest-first ordering within the page by timestamp when available.
|
||||
let epoch = Utc.timestamp_opt(0, 0).single().unwrap_or_else(Utc::now);
|
||||
rows.sort_by_key(|r| Reverse(r.ts.unwrap_or(epoch)));
|
||||
rows
|
||||
page.items.into_iter().map(|it| head_to_row(&it)).collect()
|
||||
}
|
||||
|
||||
fn head_to_row(item: &ConversationItem) -> Option<Row> {
|
||||
fn head_to_row(item: &ConversationItem) -> Row {
|
||||
let mut ts: Option<DateTime<Utc>> = None;
|
||||
if let Some(first) = item.head.first()
|
||||
&& let Some(t) = first.get("timestamp").and_then(|v| v.as_str())
|
||||
@@ -276,16 +266,16 @@ fn head_to_row(item: &ConversationItem) -> Option<Row> {
|
||||
ts = Some(parsed.with_timezone(&Utc));
|
||||
}
|
||||
|
||||
let preview = preview_from_head(&item.head)?;
|
||||
let preview = preview.trim().to_string();
|
||||
if preview.is_empty() {
|
||||
return None;
|
||||
}
|
||||
Some(Row {
|
||||
let preview = preview_from_head(&item.head)
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.unwrap_or_else(|| String::from("(no message yet)"));
|
||||
|
||||
Row {
|
||||
path: item.path.clone(),
|
||||
preview,
|
||||
ts,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn preview_from_head(head: &[serde_json::Value]) -> Option<String> {
|
||||
@@ -483,7 +473,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_rows_sorts_descending_by_timestamp() {
|
||||
fn to_rows_preserves_backend_order() {
|
||||
// Construct two items with different timestamps and real user text.
|
||||
let a = ConversationItem {
|
||||
path: PathBuf::from("/tmp/a.jsonl"),
|
||||
@@ -500,8 +490,8 @@ mod tests {
|
||||
reached_scan_cap: false,
|
||||
});
|
||||
assert_eq!(rows.len(), 2);
|
||||
// Expect the newer timestamp (B) first
|
||||
assert!(rows[0].preview.contains('B'));
|
||||
assert!(rows[1].preview.contains('A'));
|
||||
// Preserve the given order; backend already provides newest-first
|
||||
assert!(rows[0].preview.contains('A'));
|
||||
assert!(rows[1].preview.contains('B'));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,62 @@
|
||||
---
|
||||
source: tui/src/markdown_render_tests.rs
|
||||
expression: rendered
|
||||
---
|
||||
# H1: Markdown Streaming Test
|
||||
|
||||
Intro paragraph with bold text, italic text, and inline code x=1.
|
||||
Combined bold-italic both and escaped asterisks *literal*.
|
||||
Auto-link: https://example.com (https://example.com) and reference link [ref][r1].
|
||||
Link with title: hover me (https://example.com) and mailto mailto:test@example.com (mailto:test@example.com).
|
||||
Image: alt text
|
||||
|
||||
> Blockquote level 1
|
||||
>
|
||||
> > Blockquote level 2 with inline code
|
||||
|
||||
- Unordered list item 1
|
||||
- Nested bullet with italics inner
|
||||
- Unordered list item 2 with strikethrough
|
||||
|
||||
1. Ordered item one
|
||||
2. Ordered item two with sublist:
|
||||
1. Alt-numbered subitem
|
||||
|
||||
- [ ] Task: unchecked
|
||||
- [x] Task: checked with link home (https://example.org)
|
||||
|
||||
———
|
||||
|
||||
Table below (alignment test):
|
||||
| Left | Center | Right |
|
||||
|:-----|:------:|------:|
|
||||
| a | b | c |
|
||||
Inline HTML: <sup>sup</sup> and <sub>sub</sub>.
|
||||
HTML block:
|
||||
<div style="border:1px solid #ccc;padding:2px">inline block</div>
|
||||
Escapes: \_underscores\_, backslash \\, ticks ``code with `backtick` inside``.
|
||||
Emoji shortcodes: :sparkles: :tada: (if supported).
|
||||
Hard break test (line ends with two spaces)
|
||||
Next line should be close to previous.
|
||||
Footnote reference here[^1] and another[^longnote].
|
||||
Horizontal rule with asterisks:
|
||||
***
|
||||
Fenced code block (JSON):
|
||||
```json
|
||||
{ "a": 1, "b": [true, false] }
|
||||
```
|
||||
Fenced code with tildes and triple backticks inside:
|
||||
~~~markdown
|
||||
To close ``` you need tildes.
|
||||
~~~
|
||||
Indented code block:
|
||||
for i in range(3): print(i)
|
||||
Definition-like list:
|
||||
Term
|
||||
: Definition with `code`.
|
||||
Character entities: & < > " '
|
||||
[^1]: This is the first footnote.
|
||||
[^longnote]: A longer footnote with a link to [Rust](https://www.rust-lang.org/).
|
||||
Escaped pipe in text: a \| b \| c.
|
||||
URL with parentheses: [link](https://example.com/path_(with)_parens).
|
||||
[r1]: https://example.com/ref "Reference link title"
|
||||
@@ -380,16 +380,12 @@ mod tests {
|
||||
let expected = vec![
|
||||
"Loose vs. tight list items:".to_string(),
|
||||
"".to_string(),
|
||||
"1. ".to_string(),
|
||||
"Tight item".to_string(),
|
||||
"2. ".to_string(),
|
||||
"Another tight item".to_string(),
|
||||
"3. ".to_string(),
|
||||
"Loose item with its own paragraph.".to_string(),
|
||||
"1. Tight item".to_string(),
|
||||
"2. Another tight item".to_string(),
|
||||
"3. Loose item with its own paragraph.".to_string(),
|
||||
"".to_string(),
|
||||
"This paragraph belongs to the same list item.".to_string(),
|
||||
"4. ".to_string(),
|
||||
"Second loose item with a nested list after a blank line.".to_string(),
|
||||
" This paragraph belongs to the same list item.".to_string(),
|
||||
"4. Second loose item with a nested list after a blank line.".to_string(),
|
||||
" - Nested bullet under a loose item".to_string(),
|
||||
" - Another nested bullet".to_string(),
|
||||
];
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user