chore: clean up argument-comment lint and roll out all-target CI on macOS (#16054)

## Why

`argument-comment-lint` was green in CI even though the repo still had
many uncommented literal arguments. The main gap was target coverage:
the repo wrapper did not force Cargo to inspect test-only call sites, so
examples like the `latest_session_lookup_params(true, ...)` tests in
`codex-rs/tui_app_server/src/lib.rs` never entered the blocking CI path.

This change cleans up the existing backlog, makes the default repo lint
path cover all Cargo targets, and starts rolling that stricter CI
enforcement out on the platform where it is currently validated.

## What changed

- mechanically fixed existing `argument-comment-lint` violations across
the `codex-rs` workspace, including tests, examples, and benches
- updated `tools/argument-comment-lint/run-prebuilt-linter.sh` and
`tools/argument-comment-lint/run.sh` so non-`--fix` runs default to
`--all-targets` unless the caller explicitly narrows the target set
- fixed both wrappers so forwarded cargo arguments after `--` are
preserved with a single separator
- documented the new default behavior in
`tools/argument-comment-lint/README.md`
- updated `rust-ci` so the macOS lint lane keeps the plain wrapper
invocation and therefore enforces `--all-targets`, while Linux and
Windows temporarily pass `-- --lib --bins`

That temporary CI split keeps the stricter all-targets check where it is
already cleaned up, while leaving room to finish the remaining Linux-
and Windows-specific target-gated cleanup before enabling
`--all-targets` on those runners. The Linux and Windows failures on the
intermediate revision were caused by the wrapper forwarding bug, not by
additional lint findings in those lanes.

## Validation

- `bash -n tools/argument-comment-lint/run.sh`
- `bash -n tools/argument-comment-lint/run-prebuilt-linter.sh`
- shell-level wrapper forwarding check for `-- --lib --bins`
- shell-level wrapper forwarding check for `-- --tests`
- `just argument-comment-lint`
- `cargo test` in `tools/argument-comment-lint`
- `cargo test -p codex-terminal-detection`

## Follow-up

- Clean up remaining Linux-only target-gated callsites, then switch the
Linux lint lane back to the plain wrapper invocation.
- Clean up remaining Windows-only target-gated callsites, then switch
the Windows lint lane back to the plain wrapper invocation.
This commit is contained in:
Michael Bolin
2026-03-27 19:00:44 -07:00
committed by GitHub
parent ed977b42ac
commit 61dfe0b86c
307 changed files with 7724 additions and 4710 deletions

View File

@@ -62,14 +62,14 @@ async fn remote_models_get_model_info_uses_longest_matching_prefix() -> Result<(
let generic = test_remote_model_with_policy(
"gpt-5.3",
ModelVisibility::List,
1_000,
TruncationPolicyConfig::bytes(10_000),
/*priority*/ 1_000,
TruncationPolicyConfig::bytes(/*limit*/ 10_000),
);
let specific = test_remote_model_with_policy(
"gpt-5.3-codex",
ModelVisibility::List,
1_000,
TruncationPolicyConfig::bytes(10_000),
/*priority*/ 1_000,
TruncationPolicyConfig::bytes(/*limit*/ 10_000),
);
let specific = ModelInfo {
display_name: "GPT 5.3 Codex".to_string(),
@@ -95,7 +95,7 @@ async fn remote_models_get_model_info_uses_longest_matching_prefix() -> Result<(
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
..built_in_model_providers(/* openai_base_url */ /*openai_base_url*/ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -124,8 +124,8 @@ async fn remote_models_long_model_slug_is_sent_with_high_reasoning() -> Result<(
let mut remote_model = test_remote_model_with_policy(
prefix_model,
ModelVisibility::List,
1_000,
TruncationPolicyConfig::bytes(10_000),
/*priority*/ 1_000,
TruncationPolicyConfig::bytes(/*limit*/ 10_000),
);
remote_model.default_reasoning_level = Some(ReasoningEffort::High);
remote_model.supported_reasoning_levels = vec![
@@ -304,7 +304,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
availability_nux: None,
apply_patch_tool_type: None,
web_search_tool_type: Default::default(),
truncation_policy: TruncationPolicyConfig::bytes(10_000),
truncation_policy: TruncationPolicyConfig::bytes(/*limit*/ 10_000),
supports_parallel_tool_calls: false,
supports_image_detail_original: false,
context_window: Some(272_000),
@@ -434,8 +434,8 @@ async fn remote_models_truncation_policy_without_override_preserves_remote() ->
let remote_model = test_remote_model_with_policy(
slug,
ModelVisibility::List,
1,
TruncationPolicyConfig::bytes(12_000),
/*priority*/ 1,
TruncationPolicyConfig::bytes(/*limit*/ 12_000),
);
mount_models_once(
&server,
@@ -458,7 +458,7 @@ async fn remote_models_truncation_policy_without_override_preserves_remote() ->
let model_info = models_manager.get_model_info(slug, &test.config).await;
assert_eq!(
model_info.truncation_policy,
TruncationPolicyConfig::bytes(12_000)
TruncationPolicyConfig::bytes(/*limit*/ 12_000)
);
Ok(())
@@ -478,8 +478,8 @@ async fn remote_models_truncation_policy_with_tool_output_override() -> Result<(
let remote_model = test_remote_model_with_policy(
slug,
ModelVisibility::List,
1,
TruncationPolicyConfig::bytes(10_000),
/*priority*/ 1,
TruncationPolicyConfig::bytes(/*limit*/ 10_000),
);
mount_models_once(
&server,
@@ -503,7 +503,7 @@ async fn remote_models_truncation_policy_with_tool_output_override() -> Result<(
let model_info = models_manager.get_model_info(slug, &test.config).await;
assert_eq!(
model_info.truncation_policy,
TruncationPolicyConfig::bytes(200)
TruncationPolicyConfig::bytes(/*limit*/ 200)
);
Ok(())
@@ -548,7 +548,7 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
availability_nux: None,
apply_patch_tool_type: None,
web_search_tool_type: Default::default(),
truncation_policy: TruncationPolicyConfig::bytes(10_000),
truncation_policy: TruncationPolicyConfig::bytes(/*limit*/ 10_000),
supports_parallel_tool_calls: false,
supports_image_detail_original: false,
context_window: Some(272_000),
@@ -642,7 +642,8 @@ async fn remote_models_do_not_append_removed_builtin_presets() -> Result<()> {
skip_if_sandbox!(Ok(()));
let server = MockServer::start().await;
let remote_model = test_remote_model("remote-alpha", ModelVisibility::List, 0);
let remote_model =
test_remote_model("remote-alpha", ModelVisibility::List, /*priority*/ 0);
let models_mock = mount_models_once(
&server,
ModelsResponse {
@@ -656,7 +657,7 @@ async fn remote_models_do_not_append_removed_builtin_presets() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
..built_in_model_providers(/* openai_base_url */ /*openai_base_url*/ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -699,7 +700,11 @@ async fn remote_models_merge_adds_new_high_priority_first() -> Result<()> {
skip_if_sandbox!(Ok(()));
let server = MockServer::start().await;
let remote_model = test_remote_model("remote-top", ModelVisibility::List, -10_000);
let remote_model = test_remote_model(
"remote-top",
ModelVisibility::List,
/*priority*/ -10_000,
);
let models_mock = mount_models_once(
&server,
ModelsResponse {
@@ -713,7 +718,7 @@ async fn remote_models_merge_adds_new_high_priority_first() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
..built_in_model_providers(/* openai_base_url */ /*openai_base_url*/ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -744,7 +749,7 @@ async fn remote_models_merge_replaces_overlapping_model() -> Result<()> {
let server = MockServer::start().await;
let slug = bundled_model_slug();
let mut remote_model = test_remote_model(&slug, ModelVisibility::List, 0);
let mut remote_model = test_remote_model(&slug, ModelVisibility::List, /*priority*/ 0);
remote_model.display_name = "Overridden".to_string();
remote_model.description = Some("Overridden description".to_string());
let models_mock = mount_models_once(
@@ -760,7 +765,7 @@ async fn remote_models_merge_replaces_overlapping_model() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
..built_in_model_providers(/* openai_base_url */ /*openai_base_url*/ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -804,7 +809,7 @@ async fn remote_models_merge_preserves_bundled_models_on_empty_response() -> Res
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
..built_in_model_providers(/* openai_base_url */ /*openai_base_url*/ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -830,7 +835,8 @@ async fn remote_models_request_times_out_after_5s() -> Result<()> {
skip_if_sandbox!(Ok(()));
let server = MockServer::start().await;
let remote_model = test_remote_model("remote-timeout", ModelVisibility::List, 0);
let remote_model =
test_remote_model("remote-timeout", ModelVisibility::List, /*priority*/ 0);
let models_mock = mount_models_once_with_delay(
&server,
ModelsResponse {
@@ -845,7 +851,7 @@ async fn remote_models_request_times_out_after_5s() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
..built_in_model_providers(/* openai_base_url */ /*openai_base_url*/ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -897,7 +903,11 @@ async fn remote_models_hide_picker_only_models() -> Result<()> {
skip_if_sandbox!(Ok(()));
let server = MockServer::start().await;
let remote_model = test_remote_model("codex-auto-balanced", ModelVisibility::Hide, 0);
let remote_model = test_remote_model(
"codex-auto-balanced",
ModelVisibility::Hide,
/*priority*/ 0,
);
let models_mock = mount_models_once(
&server,
ModelsResponse {
@@ -911,7 +921,7 @@ async fn remote_models_hide_picker_only_models() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
..built_in_model_providers(/* openai_base_url */ /*openai_base_url*/ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -982,7 +992,7 @@ fn test_remote_model(slug: &str, visibility: ModelVisibility, priority: i32) ->
slug,
visibility,
priority,
TruncationPolicyConfig::bytes(10_000),
TruncationPolicyConfig::bytes(/*limit*/ 10_000),
)
}