chore: clean up argument-comment lint and roll out all-target CI on macOS (#16054)

## Why

`argument-comment-lint` was green in CI even though the repo still had
many uncommented literal arguments. The main gap was target coverage:
the repo wrapper did not force Cargo to inspect test-only call sites, so
examples like the `latest_session_lookup_params(true, ...)` tests in
`codex-rs/tui_app_server/src/lib.rs` never entered the blocking CI path.

This change cleans up the existing backlog, makes the default repo lint
path cover all Cargo targets, and starts rolling that stricter CI
enforcement out on the platform where it is currently validated.

## What changed

- mechanically fixed existing `argument-comment-lint` violations across
the `codex-rs` workspace, including tests, examples, and benches
- updated `tools/argument-comment-lint/run-prebuilt-linter.sh` and
`tools/argument-comment-lint/run.sh` so non-`--fix` runs default to
`--all-targets` unless the caller explicitly narrows the target set
- fixed both wrappers so forwarded cargo arguments after `--` are
preserved with a single separator
- documented the new default behavior in
`tools/argument-comment-lint/README.md`
- updated `rust-ci` so the macOS lint lane keeps the plain wrapper
invocation and therefore enforces `--all-targets`, while Linux and
Windows temporarily pass `-- --lib --bins`

That temporary CI split keeps the stricter all-targets check where it is
already cleaned up, while leaving room to finish the remaining Linux-
and Windows-specific target-gated cleanup before enabling
`--all-targets` on those runners. The Linux and Windows failures on the
intermediate revision were caused by the wrapper forwarding bug, not by
additional lint findings in those lanes.

## Validation

- `bash -n tools/argument-comment-lint/run.sh`
- `bash -n tools/argument-comment-lint/run-prebuilt-linter.sh`
- shell-level wrapper forwarding check for `-- --lib --bins`
- shell-level wrapper forwarding check for `-- --tests`
- `just argument-comment-lint`
- `cargo test` in `tools/argument-comment-lint`
- `cargo test -p codex-terminal-detection`

## Follow-up

- Clean up remaining Linux-only target-gated callsites, then switch the
Linux lint lane back to the plain wrapper invocation.
- Clean up remaining Windows-only target-gated callsites, then switch
the Windows lint lane back to the plain wrapper invocation.
This commit is contained in:
Michael Bolin
2026-03-27 19:00:44 -07:00
committed by GitHub
parent ed977b42ac
commit 61dfe0b86c
307 changed files with 7724 additions and 4710 deletions

View File

@@ -94,7 +94,8 @@ fn json_fragment(text: &str) -> String {
}
fn non_openai_model_provider(server: &MockServer) -> ModelProviderInfo {
let mut provider = built_in_model_providers(/* openai_base_url */ None)["openai"].clone();
let mut provider =
built_in_model_providers(/* openai_base_url */ /*openai_base_url*/ None)["openai"].clone();
provider.name = "OpenAI (test)".into();
provider.base_url = Some(format!("{}/v1", server.uri()));
provider.supports_websockets = false;
@@ -413,11 +414,11 @@ async fn manual_compact_uses_custom_prompt() {
let server = start_mock_server().await;
let first_turn = sse(vec![
ev_assistant_message("m0", FIRST_REPLY),
ev_completed_with_tokens("r0", 80),
ev_completed_with_tokens("r0", /*total_tokens*/ 80),
]);
let compact_turn = sse(vec![
ev_assistant_message("m1", SUMMARY_TEXT),
ev_completed_with_tokens("r1", 100),
ev_completed_with_tokens("r1", /*total_tokens*/ 100),
]);
let request_log = mount_sse_sequence(&server, vec![first_turn, compact_turn]).await;
@@ -508,7 +509,7 @@ async fn manual_compact_emits_api_and_local_token_usage_events() {
// history.
let sse_compact = sse(vec![
ev_assistant_message("m1", SUMMARY_TEXT),
ev_completed_with_tokens("r1", 0),
ev_completed_with_tokens("r1", /*total_tokens*/ 0),
]);
mount_sse_once(&server, sse_compact).await;
@@ -1211,21 +1212,21 @@ async fn auto_compact_runs_after_token_limit_hit() {
let sse1 = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 70_000),
ev_completed_with_tokens("r1", /*total_tokens*/ 70_000),
]);
let sse2 = sse(vec![
ev_assistant_message("m2", "SECOND_REPLY"),
ev_completed_with_tokens("r2", 330_000),
ev_completed_with_tokens("r2", /*total_tokens*/ 330_000),
]);
let sse3 = sse(vec![
ev_assistant_message("m3", AUTO_SUMMARY_TEXT),
ev_completed_with_tokens("r3", 200),
ev_completed_with_tokens("r3", /*total_tokens*/ 200),
]);
let sse4 = sse(vec![
ev_assistant_message("m4", FINAL_REPLY),
ev_completed_with_tokens("r4", 120),
ev_completed_with_tokens("r4", /*total_tokens*/ 120),
]);
let prefixed_auto_summary = AUTO_SUMMARY_TEXT;
@@ -1404,19 +1405,19 @@ async fn auto_compact_emits_context_compaction_items() {
let sse1 = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 70_000),
ev_completed_with_tokens("r1", /*total_tokens*/ 70_000),
]);
let sse2 = sse(vec![
ev_assistant_message("m2", "SECOND_REPLY"),
ev_completed_with_tokens("r2", 330_000),
ev_completed_with_tokens("r2", /*total_tokens*/ 330_000),
]);
let sse3 = sse(vec![
ev_assistant_message("m3", AUTO_SUMMARY_TEXT),
ev_completed_with_tokens("r3", 200),
ev_completed_with_tokens("r3", /*total_tokens*/ 200),
]);
let sse4 = sse(vec![
ev_assistant_message("m4", FINAL_REPLY),
ev_completed_with_tokens("r4", 120),
ev_completed_with_tokens("r4", /*total_tokens*/ 120),
]);
mount_sse_sequence(&server, vec![sse1, sse2, sse3, sse4]).await;
@@ -1487,19 +1488,19 @@ async fn auto_compact_starts_after_turn_started() {
let sse1 = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 70_000),
ev_completed_with_tokens("r1", /*total_tokens*/ 70_000),
]);
let sse2 = sse(vec![
ev_assistant_message("m2", "SECOND_REPLY"),
ev_completed_with_tokens("r2", 330_000),
ev_completed_with_tokens("r2", /*total_tokens*/ 330_000),
]);
let sse3 = sse(vec![
ev_assistant_message("m3", AUTO_SUMMARY_TEXT),
ev_completed_with_tokens("r3", 200),
ev_completed_with_tokens("r3", /*total_tokens*/ 200),
]);
let sse4 = sse(vec![
ev_assistant_message("m4", FINAL_REPLY),
ev_completed_with_tokens("r4", 120),
ev_completed_with_tokens("r4", /*total_tokens*/ 120),
]);
mount_sse_sequence(&server, vec![sse1, sse2, sse3, sse4]).await;
@@ -1704,8 +1705,8 @@ async fn pre_sampling_compact_runs_on_switch_to_smaller_context_model() {
&server,
ModelsResponse {
models: vec![
model_info_with_context_window(previous_model, 273_000),
model_info_with_context_window(next_model, 125_000),
model_info_with_context_window(previous_model, /*context_window*/ 273_000),
model_info_with_context_window(next_model, /*context_window*/ 125_000),
],
},
)
@@ -1716,15 +1717,15 @@ async fn pre_sampling_compact_runs_on_switch_to_smaller_context_model() {
vec![
sse(vec![
ev_assistant_message("m1", "before switch"),
ev_completed_with_tokens("r1", 120_000),
ev_completed_with_tokens("r1", /*total_tokens*/ 120_000),
]),
sse(vec![
ev_assistant_message("m2", "PRE_SAMPLING_SUMMARY"),
ev_completed_with_tokens("r2", 10),
ev_completed_with_tokens("r2", /*total_tokens*/ 10),
]),
sse(vec![
ev_assistant_message("m3", "after switch"),
ev_completed_with_tokens("r3", 100),
ev_completed_with_tokens("r3", /*total_tokens*/ 100),
]),
],
)
@@ -1830,8 +1831,8 @@ async fn pre_sampling_compact_runs_after_resume_and_switch_to_smaller_model() {
&server,
ModelsResponse {
models: vec![
model_info_with_context_window(previous_model, 273_000),
model_info_with_context_window(next_model, 125_000),
model_info_with_context_window(previous_model, /*context_window*/ 273_000),
model_info_with_context_window(next_model, /*context_window*/ 125_000),
],
},
)
@@ -1842,15 +1843,15 @@ async fn pre_sampling_compact_runs_after_resume_and_switch_to_smaller_model() {
vec![
sse(vec![
ev_assistant_message("m1", "before resume"),
ev_completed_with_tokens("r1", 120_000),
ev_completed_with_tokens("r1", /*total_tokens*/ 120_000),
]),
sse(vec![
ev_assistant_message("m2", "PRE_SAMPLING_SUMMARY"),
ev_completed_with_tokens("r2", 10),
ev_completed_with_tokens("r2", /*total_tokens*/ 10),
]),
sse(vec![
ev_assistant_message("m3", "after resume"),
ev_completed_with_tokens("r3", 100),
ev_completed_with_tokens("r3", /*total_tokens*/ 100),
]),
],
)
@@ -1971,22 +1972,22 @@ async fn auto_compact_persists_rollout_entries() {
let sse1 = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 70_000),
ev_completed_with_tokens("r1", /*total_tokens*/ 70_000),
]);
let sse2 = sse(vec![
ev_assistant_message("m2", "SECOND_REPLY"),
ev_completed_with_tokens("r2", 330_000),
ev_completed_with_tokens("r2", /*total_tokens*/ 330_000),
]);
let auto_summary_payload = auto_summary(AUTO_SUMMARY_TEXT);
let sse3 = sse(vec![
ev_assistant_message("m3", &auto_summary_payload),
ev_completed_with_tokens("r3", 200),
ev_completed_with_tokens("r3", /*total_tokens*/ 200),
]);
let sse4 = sse(vec![
ev_assistant_message("m4", FINAL_REPLY),
ev_completed_with_tokens("r4", 120),
ev_completed_with_tokens("r4", /*total_tokens*/ 120),
]);
let first_matcher = |req: &wiremock::Request| {
@@ -2492,29 +2493,29 @@ async fn auto_compact_allows_multiple_attempts_when_interleaved_with_other_turn_
let sse1 = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 500),
ev_completed_with_tokens("r1", /*total_tokens*/ 500),
]);
let first_summary_payload = auto_summary(FIRST_AUTO_SUMMARY);
let sse2 = sse(vec![
ev_assistant_message("m2", &first_summary_payload),
ev_completed_with_tokens("r2", 50),
ev_completed_with_tokens("r2", /*total_tokens*/ 50),
]);
let sse3 = sse(vec![
ev_function_call(DUMMY_CALL_ID, DUMMY_FUNCTION_NAME, "{}"),
ev_completed_with_tokens("r3", 150),
ev_completed_with_tokens("r3", /*total_tokens*/ 150),
]);
let sse4 = sse(vec![
ev_assistant_message("m4", SECOND_LARGE_REPLY),
ev_completed_with_tokens("r4", 450),
ev_completed_with_tokens("r4", /*total_tokens*/ 450),
]);
let second_summary_payload = auto_summary(SECOND_AUTO_SUMMARY);
let sse5 = sse(vec![
ev_assistant_message("m5", &second_summary_payload),
ev_completed_with_tokens("r5", 60),
ev_completed_with_tokens("r5", /*total_tokens*/ 60),
]);
let sse6 = sse(vec![
ev_assistant_message("m6", FINAL_REPLY),
ev_completed_with_tokens("r6", 120),
ev_completed_with_tokens("r6", /*total_tokens*/ 120),
]);
let follow_up_user = "FOLLOW_UP_AUTO_COMPACT";
let final_user = "FINAL_AUTO_COMPACT";
@@ -2612,11 +2613,11 @@ async fn snapshot_request_shape_mid_turn_continuation_compaction() {
let auto_summary_payload = auto_summary(AUTO_SUMMARY_TEXT);
let auto_compact_turn = sse(vec![
ev_assistant_message("m2", &auto_summary_payload),
ev_completed_with_tokens("r3", 10),
ev_completed_with_tokens("r3", /*total_tokens*/ 10),
]);
let post_auto_compact_turn = sse(vec![
ev_assistant_message("m3", FINAL_REPLY),
ev_completed_with_tokens("r4", 10),
ev_completed_with_tokens("r4", /*total_tokens*/ 10),
]);
// Mount responses in order and keep mocks only for the ones we assert on.
@@ -2716,9 +2717,11 @@ async fn auto_compact_clamps_config_limit_to_context_window() {
let auto_summary_payload = auto_summary(AUTO_SUMMARY_TEXT);
let auto_compact_turn = sse(vec![
ev_assistant_message("m2", &auto_summary_payload),
ev_completed_with_tokens("r2", 10),
ev_completed_with_tokens("r2", /*total_tokens*/ 10),
]);
let post_auto_compact_turn = sse(vec![ev_completed_with_tokens("r3", 10)]);
let post_auto_compact_turn = sse(vec![ev_completed_with_tokens(
"r3", /*total_tokens*/ 10,
)]);
let first_turn_mock = mount_sse_once(&server, first_turn).await;
let auto_compact_mock = mount_sse_once(&server, auto_compact_turn).await;
@@ -2772,15 +2775,15 @@ async fn auto_compact_counts_encrypted_reasoning_before_last_user() {
let first_turn = sse(vec![
ev_reasoning_item("pre-reasoning", &["pre"], &[&pre_last_reasoning_content]),
ev_completed_with_tokens("r1", 10),
ev_completed_with_tokens("r1", /*total_tokens*/ 10),
]);
let second_turn = sse(vec![
ev_reasoning_item("post-reasoning", &["post"], &[&post_last_reasoning_content]),
ev_completed_with_tokens("r2", 80),
ev_completed_with_tokens("r2", /*total_tokens*/ 80),
]);
let third_turn = sse(vec![
ev_assistant_message("m4", FINAL_REPLY),
ev_completed_with_tokens("r4", 1),
ev_completed_with_tokens("r4", /*total_tokens*/ 1),
]);
let request_log = mount_sse_sequence(
@@ -2898,15 +2901,15 @@ async fn auto_compact_runs_when_reasoning_header_clears_between_turns() {
let first_turn = sse(vec![
ev_reasoning_item("pre-reasoning", &["pre"], &[&pre_last_reasoning_content]),
ev_completed_with_tokens("r1", 10),
ev_completed_with_tokens("r1", /*total_tokens*/ 10),
]);
let second_turn = sse(vec![
ev_reasoning_item("post-reasoning", &["post"], &[&post_last_reasoning_content]),
ev_completed_with_tokens("r2", 80),
ev_completed_with_tokens("r2", /*total_tokens*/ 80),
]);
let third_turn = sse(vec![
ev_assistant_message("m4", FINAL_REPLY),
ev_completed_with_tokens("r4", 1),
ev_completed_with_tokens("r4", /*total_tokens*/ 1),
]);
let responses = vec![
@@ -2975,19 +2978,19 @@ async fn snapshot_request_shape_pre_turn_compaction_including_incoming_user_mess
let sse1 = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 60),
ev_completed_with_tokens("r1", /*total_tokens*/ 60),
]);
let sse2 = sse(vec![
ev_assistant_message("m2", "SECOND_REPLY"),
ev_completed_with_tokens("r2", 500),
ev_completed_with_tokens("r2", /*total_tokens*/ 500),
]);
let sse3 = sse(vec![
ev_assistant_message("m3", "PRE_TURN_SUMMARY"),
ev_completed_with_tokens("r3", 100),
ev_completed_with_tokens("r3", /*total_tokens*/ 100),
]);
let sse4 = sse(vec![
ev_assistant_message("m4", FINAL_REPLY),
ev_completed_with_tokens("r4", 80),
ev_completed_with_tokens("r4", /*total_tokens*/ 80),
]);
let request_log = mount_sse_sequence(&server, vec![sse1, sse2, sse3, sse4]).await;
@@ -3100,15 +3103,15 @@ async fn snapshot_request_shape_pre_turn_compaction_strips_incoming_model_switch
vec![
sse(vec![
ev_assistant_message("m1", "BEFORE_SWITCH_REPLY"),
ev_completed_with_tokens("r1", 500),
ev_completed_with_tokens("r1", /*total_tokens*/ 500),
]),
sse(vec![
ev_assistant_message("m2", "PRETURN_SWITCH_SUMMARY"),
ev_completed_with_tokens("r2", 100),
ev_completed_with_tokens("r2", /*total_tokens*/ 100),
]),
sse(vec![
ev_assistant_message("m3", "AFTER_SWITCH_REPLY"),
ev_completed_with_tokens("r3", 100),
ev_completed_with_tokens("r3", /*total_tokens*/ 100),
]),
],
)
@@ -3222,7 +3225,7 @@ async fn snapshot_request_shape_pre_turn_compaction_context_window_exceeded() {
let first_turn = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 500),
ev_completed_with_tokens("r1", /*total_tokens*/ 500),
]);
let mut responses = vec![first_turn];
responses.extend(
@@ -3309,11 +3312,11 @@ async fn snapshot_request_shape_manual_compact_without_previous_user_messages()
let compact_turn = sse(vec![
ev_assistant_message("m1", "MANUAL_EMPTY_SUMMARY"),
ev_completed_with_tokens("r1", 90),
ev_completed_with_tokens("r1", /*total_tokens*/ 90),
]);
let follow_up_turn = sse(vec![
ev_assistant_message("m2", FINAL_REPLY),
ev_completed_with_tokens("r2", 80),
ev_completed_with_tokens("r2", /*total_tokens*/ 80),
]);
let request_log = mount_sse_sequence(&server, vec![compact_turn, follow_up_turn]).await;