mirror of
https://github.com/openai/codex.git
synced 2026-04-25 15:15:15 +00:00
## Why `argument-comment-lint` was green in CI even though the repo still had many uncommented literal arguments. The main gap was target coverage: the repo wrapper did not force Cargo to inspect test-only call sites, so examples like the `latest_session_lookup_params(true, ...)` tests in `codex-rs/tui_app_server/src/lib.rs` never entered the blocking CI path. This change cleans up the existing backlog, makes the default repo lint path cover all Cargo targets, and starts rolling that stricter CI enforcement out on the platform where it is currently validated. ## What changed - mechanically fixed existing `argument-comment-lint` violations across the `codex-rs` workspace, including tests, examples, and benches - updated `tools/argument-comment-lint/run-prebuilt-linter.sh` and `tools/argument-comment-lint/run.sh` so non-`--fix` runs default to `--all-targets` unless the caller explicitly narrows the target set - fixed both wrappers so forwarded cargo arguments after `--` are preserved with a single separator - documented the new default behavior in `tools/argument-comment-lint/README.md` - updated `rust-ci` so the macOS lint lane keeps the plain wrapper invocation and therefore enforces `--all-targets`, while Linux and Windows temporarily pass `-- --lib --bins` That temporary CI split keeps the stricter all-targets check where it is already cleaned up, while leaving room to finish the remaining Linux- and Windows-specific target-gated cleanup before enabling `--all-targets` on those runners. The Linux and Windows failures on the intermediate revision were caused by the wrapper forwarding bug, not by additional lint findings in those lanes. ## Validation - `bash -n tools/argument-comment-lint/run.sh` - `bash -n tools/argument-comment-lint/run-prebuilt-linter.sh` - shell-level wrapper forwarding check for `-- --lib --bins` - shell-level wrapper forwarding check for `-- --tests` - `just argument-comment-lint` - `cargo test` in `tools/argument-comment-lint` - `cargo test -p codex-terminal-detection` ## Follow-up - Clean up remaining Linux-only target-gated callsites, then switch the Linux lint lane back to the plain wrapper invocation. - Clean up remaining Windows-only target-gated callsites, then switch the Windows lint lane back to the plain wrapper invocation.
282 lines
9.0 KiB
Rust
282 lines
9.0 KiB
Rust
#![cfg(unix)]
|
|
|
|
mod common;
|
|
|
|
use std::sync::Arc;
|
|
|
|
use anyhow::Result;
|
|
use codex_exec_server::Environment;
|
|
use codex_exec_server::ExecBackend;
|
|
use codex_exec_server::ExecParams;
|
|
use codex_exec_server::ExecProcess;
|
|
use codex_exec_server::ProcessId;
|
|
use codex_exec_server::ReadResponse;
|
|
use codex_exec_server::StartedExecProcess;
|
|
use pretty_assertions::assert_eq;
|
|
use test_case::test_case;
|
|
use tokio::sync::watch;
|
|
use tokio::time::Duration;
|
|
use tokio::time::timeout;
|
|
|
|
use common::exec_server::ExecServerHarness;
|
|
use common::exec_server::exec_server;
|
|
|
|
struct ProcessContext {
|
|
backend: Arc<dyn ExecBackend>,
|
|
server: Option<ExecServerHarness>,
|
|
}
|
|
|
|
async fn create_process_context(use_remote: bool) -> Result<ProcessContext> {
|
|
if use_remote {
|
|
let server = exec_server().await?;
|
|
let environment = Environment::create(Some(server.websocket_url().to_string())).await?;
|
|
Ok(ProcessContext {
|
|
backend: environment.get_exec_backend(),
|
|
server: Some(server),
|
|
})
|
|
} else {
|
|
let environment = Environment::create(/*exec_server_url*/ None).await?;
|
|
Ok(ProcessContext {
|
|
backend: environment.get_exec_backend(),
|
|
server: None,
|
|
})
|
|
}
|
|
}
|
|
|
|
async fn assert_exec_process_starts_and_exits(use_remote: bool) -> Result<()> {
|
|
let context = create_process_context(use_remote).await?;
|
|
let session = context
|
|
.backend
|
|
.start(ExecParams {
|
|
process_id: ProcessId::from("proc-1"),
|
|
argv: vec!["true".to_string()],
|
|
cwd: std::env::current_dir()?,
|
|
env: Default::default(),
|
|
tty: false,
|
|
arg0: None,
|
|
})
|
|
.await?;
|
|
assert_eq!(session.process.process_id().as_str(), "proc-1");
|
|
let wake_rx = session.process.subscribe_wake();
|
|
let (_, exit_code, closed) =
|
|
collect_process_output_from_reads(session.process, wake_rx).await?;
|
|
|
|
assert_eq!(exit_code, Some(0));
|
|
assert!(closed);
|
|
Ok(())
|
|
}
|
|
|
|
async fn read_process_until_change(
|
|
session: Arc<dyn ExecProcess>,
|
|
wake_rx: &mut watch::Receiver<u64>,
|
|
after_seq: Option<u64>,
|
|
) -> Result<ReadResponse> {
|
|
let response = session
|
|
.read(after_seq, /*max_bytes*/ None, /*wait_ms*/ Some(0))
|
|
.await?;
|
|
if !response.chunks.is_empty() || response.closed || response.failure.is_some() {
|
|
return Ok(response);
|
|
}
|
|
|
|
timeout(Duration::from_secs(2), wake_rx.changed()).await??;
|
|
session
|
|
.read(after_seq, /*max_bytes*/ None, /*wait_ms*/ Some(0))
|
|
.await
|
|
.map_err(Into::into)
|
|
}
|
|
|
|
async fn collect_process_output_from_reads(
|
|
session: Arc<dyn ExecProcess>,
|
|
mut wake_rx: watch::Receiver<u64>,
|
|
) -> Result<(String, Option<i32>, bool)> {
|
|
let mut output = String::new();
|
|
let mut exit_code = None;
|
|
let mut after_seq = None;
|
|
loop {
|
|
let response =
|
|
read_process_until_change(Arc::clone(&session), &mut wake_rx, after_seq).await?;
|
|
if let Some(message) = response.failure {
|
|
anyhow::bail!("process failed before closed state: {message}");
|
|
}
|
|
for chunk in response.chunks {
|
|
output.push_str(&String::from_utf8_lossy(&chunk.chunk.into_inner()));
|
|
after_seq = Some(chunk.seq);
|
|
}
|
|
if response.exited {
|
|
exit_code = response.exit_code;
|
|
}
|
|
if response.closed {
|
|
break;
|
|
}
|
|
after_seq = response.next_seq.checked_sub(1).or(after_seq);
|
|
}
|
|
drop(session);
|
|
Ok((output, exit_code, true))
|
|
}
|
|
|
|
async fn assert_exec_process_streams_output(use_remote: bool) -> Result<()> {
|
|
let context = create_process_context(use_remote).await?;
|
|
let process_id = "proc-stream".to_string();
|
|
let session = context
|
|
.backend
|
|
.start(ExecParams {
|
|
process_id: process_id.clone().into(),
|
|
argv: vec![
|
|
"/bin/sh".to_string(),
|
|
"-c".to_string(),
|
|
"sleep 0.05; printf 'session output\\n'".to_string(),
|
|
],
|
|
cwd: std::env::current_dir()?,
|
|
env: Default::default(),
|
|
tty: false,
|
|
arg0: None,
|
|
})
|
|
.await?;
|
|
assert_eq!(session.process.process_id().as_str(), process_id);
|
|
|
|
let StartedExecProcess { process } = session;
|
|
let wake_rx = process.subscribe_wake();
|
|
let (output, exit_code, closed) = collect_process_output_from_reads(process, wake_rx).await?;
|
|
assert_eq!(output, "session output\n");
|
|
assert_eq!(exit_code, Some(0));
|
|
assert!(closed);
|
|
Ok(())
|
|
}
|
|
|
|
async fn assert_exec_process_write_then_read(use_remote: bool) -> Result<()> {
|
|
let context = create_process_context(use_remote).await?;
|
|
let process_id = "proc-stdin".to_string();
|
|
let session = context
|
|
.backend
|
|
.start(ExecParams {
|
|
process_id: process_id.clone().into(),
|
|
argv: vec![
|
|
"/usr/bin/python3".to_string(),
|
|
"-c".to_string(),
|
|
"import sys; line = sys.stdin.readline(); sys.stdout.write(f'from-stdin:{line}'); sys.stdout.flush()".to_string(),
|
|
],
|
|
cwd: std::env::current_dir()?,
|
|
env: Default::default(),
|
|
tty: true,
|
|
arg0: None,
|
|
})
|
|
.await?;
|
|
assert_eq!(session.process.process_id().as_str(), process_id);
|
|
|
|
tokio::time::sleep(Duration::from_millis(200)).await;
|
|
session.process.write(b"hello\n".to_vec()).await?;
|
|
let StartedExecProcess { process } = session;
|
|
let wake_rx = process.subscribe_wake();
|
|
let (output, exit_code, closed) = collect_process_output_from_reads(process, wake_rx).await?;
|
|
|
|
assert!(
|
|
output.contains("from-stdin:hello"),
|
|
"unexpected output: {output:?}"
|
|
);
|
|
assert_eq!(exit_code, Some(0));
|
|
assert!(closed);
|
|
Ok(())
|
|
}
|
|
|
|
async fn assert_exec_process_preserves_queued_events_before_subscribe(
|
|
use_remote: bool,
|
|
) -> Result<()> {
|
|
let context = create_process_context(use_remote).await?;
|
|
let session = context
|
|
.backend
|
|
.start(ExecParams {
|
|
process_id: ProcessId::from("proc-queued"),
|
|
argv: vec![
|
|
"/bin/sh".to_string(),
|
|
"-c".to_string(),
|
|
"printf 'queued output\\n'".to_string(),
|
|
],
|
|
cwd: std::env::current_dir()?,
|
|
env: Default::default(),
|
|
tty: false,
|
|
arg0: None,
|
|
})
|
|
.await?;
|
|
|
|
tokio::time::sleep(Duration::from_millis(200)).await;
|
|
|
|
let StartedExecProcess { process } = session;
|
|
let wake_rx = process.subscribe_wake();
|
|
let (output, exit_code, closed) = collect_process_output_from_reads(process, wake_rx).await?;
|
|
assert_eq!(output, "queued output\n");
|
|
assert_eq!(exit_code, Some(0));
|
|
assert!(closed);
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn remote_exec_process_reports_transport_disconnect() -> Result<()> {
|
|
let mut context = create_process_context(/*use_remote*/ true).await?;
|
|
let session = context
|
|
.backend
|
|
.start(ExecParams {
|
|
process_id: ProcessId::from("proc-disconnect"),
|
|
argv: vec![
|
|
"/bin/sh".to_string(),
|
|
"-c".to_string(),
|
|
"sleep 10".to_string(),
|
|
],
|
|
cwd: std::env::current_dir()?,
|
|
env: Default::default(),
|
|
tty: false,
|
|
arg0: None,
|
|
})
|
|
.await?;
|
|
|
|
let server = context
|
|
.server
|
|
.as_mut()
|
|
.expect("remote context should include exec-server harness");
|
|
server.shutdown().await?;
|
|
|
|
let mut wake_rx = session.process.subscribe_wake();
|
|
let response =
|
|
read_process_until_change(session.process, &mut wake_rx, /*after_seq*/ None).await?;
|
|
let message = response
|
|
.failure
|
|
.expect("disconnect should surface as a failure");
|
|
assert!(
|
|
message.starts_with("exec-server transport disconnected"),
|
|
"unexpected failure message: {message}"
|
|
);
|
|
assert!(
|
|
response.closed,
|
|
"disconnect should close the process session"
|
|
);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[test_case(false ; "local")]
|
|
#[test_case(true ; "remote")]
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn exec_process_starts_and_exits(use_remote: bool) -> Result<()> {
|
|
assert_exec_process_starts_and_exits(use_remote).await
|
|
}
|
|
|
|
#[test_case(false ; "local")]
|
|
#[test_case(true ; "remote")]
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn exec_process_streams_output(use_remote: bool) -> Result<()> {
|
|
assert_exec_process_streams_output(use_remote).await
|
|
}
|
|
|
|
#[test_case(false ; "local")]
|
|
#[test_case(true ; "remote")]
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn exec_process_write_then_read(use_remote: bool) -> Result<()> {
|
|
assert_exec_process_write_then_read(use_remote).await
|
|
}
|
|
|
|
#[test_case(false ; "local")]
|
|
#[test_case(true ; "remote")]
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn exec_process_preserves_queued_events_before_subscribe(use_remote: bool) -> Result<()> {
|
|
assert_exec_process_preserves_queued_events_before_subscribe(use_remote).await
|
|
}
|