mirror of
https://github.com/openai/codex.git
synced 2026-04-26 23:55:25 +00:00
## Why `argument-comment-lint` was green in CI even though the repo still had many uncommented literal arguments. The main gap was target coverage: the repo wrapper did not force Cargo to inspect test-only call sites, so examples like the `latest_session_lookup_params(true, ...)` tests in `codex-rs/tui_app_server/src/lib.rs` never entered the blocking CI path. This change cleans up the existing backlog, makes the default repo lint path cover all Cargo targets, and starts rolling that stricter CI enforcement out on the platform where it is currently validated. ## What changed - mechanically fixed existing `argument-comment-lint` violations across the `codex-rs` workspace, including tests, examples, and benches - updated `tools/argument-comment-lint/run-prebuilt-linter.sh` and `tools/argument-comment-lint/run.sh` so non-`--fix` runs default to `--all-targets` unless the caller explicitly narrows the target set - fixed both wrappers so forwarded cargo arguments after `--` are preserved with a single separator - documented the new default behavior in `tools/argument-comment-lint/README.md` - updated `rust-ci` so the macOS lint lane keeps the plain wrapper invocation and therefore enforces `--all-targets`, while Linux and Windows temporarily pass `-- --lib --bins` That temporary CI split keeps the stricter all-targets check where it is already cleaned up, while leaving room to finish the remaining Linux- and Windows-specific target-gated cleanup before enabling `--all-targets` on those runners. The Linux and Windows failures on the intermediate revision were caused by the wrapper forwarding bug, not by additional lint findings in those lanes. ## Validation - `bash -n tools/argument-comment-lint/run.sh` - `bash -n tools/argument-comment-lint/run-prebuilt-linter.sh` - shell-level wrapper forwarding check for `-- --lib --bins` - shell-level wrapper forwarding check for `-- --tests` - `just argument-comment-lint` - `cargo test` in `tools/argument-comment-lint` - `cargo test -p codex-terminal-detection` ## Follow-up - Clean up remaining Linux-only target-gated callsites, then switch the Linux lint lane back to the plain wrapper invocation. - Clean up remaining Windows-only target-gated callsites, then switch the Windows lint lane back to the plain wrapper invocation.
216 lines
6.8 KiB
Rust
216 lines
6.8 KiB
Rust
use std::time::Duration;
|
|
|
|
use anyhow::Result;
|
|
use app_test_support::McpProcess;
|
|
use app_test_support::to_response;
|
|
use app_test_support::write_models_cache;
|
|
use codex_app_server_protocol::JSONRPCError;
|
|
use codex_app_server_protocol::JSONRPCResponse;
|
|
use codex_app_server_protocol::Model;
|
|
use codex_app_server_protocol::ModelListParams;
|
|
use codex_app_server_protocol::ModelListResponse;
|
|
use codex_app_server_protocol::ModelUpgradeInfo;
|
|
use codex_app_server_protocol::ReasoningEffortOption;
|
|
use codex_app_server_protocol::RequestId;
|
|
use codex_protocol::openai_models::ModelPreset;
|
|
use pretty_assertions::assert_eq;
|
|
use tempfile::TempDir;
|
|
use tokio::time::timeout;
|
|
|
|
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
|
|
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
|
|
|
|
fn model_from_preset(preset: &ModelPreset) -> Model {
|
|
Model {
|
|
id: preset.id.clone(),
|
|
model: preset.model.clone(),
|
|
upgrade: preset.upgrade.as_ref().map(|upgrade| upgrade.id.clone()),
|
|
upgrade_info: preset.upgrade.as_ref().map(|upgrade| ModelUpgradeInfo {
|
|
model: upgrade.id.clone(),
|
|
upgrade_copy: upgrade.upgrade_copy.clone(),
|
|
model_link: upgrade.model_link.clone(),
|
|
migration_markdown: upgrade.migration_markdown.clone(),
|
|
}),
|
|
availability_nux: preset.availability_nux.clone().map(Into::into),
|
|
display_name: preset.display_name.clone(),
|
|
description: preset.description.clone(),
|
|
hidden: !preset.show_in_picker,
|
|
supported_reasoning_efforts: preset
|
|
.supported_reasoning_efforts
|
|
.iter()
|
|
.map(|preset| ReasoningEffortOption {
|
|
reasoning_effort: preset.effort,
|
|
description: preset.description.clone(),
|
|
})
|
|
.collect(),
|
|
default_reasoning_effort: preset.default_reasoning_effort,
|
|
input_modalities: preset.input_modalities.clone(),
|
|
// `write_models_cache()` round-trips through a simplified ModelInfo fixture that does not
|
|
// preserve personality placeholders in base instructions, so app-server list results from
|
|
// cache report `supports_personality = false`.
|
|
// todo(sayan): fix, maybe make roundtrip use ModelInfo only
|
|
supports_personality: false,
|
|
is_default: preset.is_default,
|
|
}
|
|
}
|
|
|
|
fn expected_visible_models() -> Vec<Model> {
|
|
// Filter by supported_in_api to support testing with both ChatGPT and non-ChatGPT auth modes.
|
|
let mut presets = ModelPreset::filter_by_auth(
|
|
codex_core::test_support::all_model_presets().clone(),
|
|
/*chatgpt_mode*/ false,
|
|
);
|
|
|
|
// Mirror `ModelsManager::build_available_models()` default selection after auth filtering.
|
|
ModelPreset::mark_default_by_picker_visibility(&mut presets);
|
|
|
|
presets
|
|
.iter()
|
|
.filter(|preset| preset.show_in_picker)
|
|
.map(model_from_preset)
|
|
.collect()
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
|
let codex_home = TempDir::new()?;
|
|
write_models_cache(codex_home.path())?;
|
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
|
|
|
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
|
|
|
let request_id = mcp
|
|
.send_list_models_request(ModelListParams {
|
|
limit: Some(100),
|
|
cursor: None,
|
|
include_hidden: None,
|
|
})
|
|
.await?;
|
|
|
|
let response: JSONRPCResponse = timeout(
|
|
DEFAULT_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
|
)
|
|
.await??;
|
|
|
|
let ModelListResponse {
|
|
data: items,
|
|
next_cursor,
|
|
} = to_response::<ModelListResponse>(response)?;
|
|
|
|
let expected_models = expected_visible_models();
|
|
|
|
assert_eq!(items, expected_models);
|
|
assert!(next_cursor.is_none());
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn list_models_includes_hidden_models() -> Result<()> {
|
|
let codex_home = TempDir::new()?;
|
|
write_models_cache(codex_home.path())?;
|
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
|
|
|
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
|
|
|
let request_id = mcp
|
|
.send_list_models_request(ModelListParams {
|
|
limit: Some(100),
|
|
cursor: None,
|
|
include_hidden: Some(true),
|
|
})
|
|
.await?;
|
|
|
|
let response: JSONRPCResponse = timeout(
|
|
DEFAULT_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
|
)
|
|
.await??;
|
|
|
|
let ModelListResponse {
|
|
data: items,
|
|
next_cursor,
|
|
} = to_response::<ModelListResponse>(response)?;
|
|
|
|
assert!(items.iter().any(|item| item.hidden));
|
|
assert!(next_cursor.is_none());
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn list_models_pagination_works() -> Result<()> {
|
|
let codex_home = TempDir::new()?;
|
|
write_models_cache(codex_home.path())?;
|
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
|
|
|
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
|
|
|
let expected_models = expected_visible_models();
|
|
let mut cursor = None;
|
|
let mut items = Vec::new();
|
|
|
|
for _ in 0..expected_models.len() {
|
|
let request_id = mcp
|
|
.send_list_models_request(ModelListParams {
|
|
limit: Some(1),
|
|
cursor: cursor.clone(),
|
|
include_hidden: None,
|
|
})
|
|
.await?;
|
|
|
|
let response: JSONRPCResponse = timeout(
|
|
DEFAULT_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
|
)
|
|
.await??;
|
|
|
|
let ModelListResponse {
|
|
data: page_items,
|
|
next_cursor,
|
|
} = to_response::<ModelListResponse>(response)?;
|
|
|
|
assert_eq!(page_items.len(), 1);
|
|
items.extend(page_items);
|
|
|
|
if let Some(next_cursor) = next_cursor {
|
|
cursor = Some(next_cursor);
|
|
} else {
|
|
assert_eq!(items, expected_models);
|
|
return Ok(());
|
|
}
|
|
}
|
|
|
|
panic!(
|
|
"model pagination did not terminate after {} pages",
|
|
expected_models.len()
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn list_models_rejects_invalid_cursor() -> Result<()> {
|
|
let codex_home = TempDir::new()?;
|
|
write_models_cache(codex_home.path())?;
|
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
|
|
|
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
|
|
|
let request_id = mcp
|
|
.send_list_models_request(ModelListParams {
|
|
limit: None,
|
|
cursor: Some("invalid".to_string()),
|
|
include_hidden: None,
|
|
})
|
|
.await?;
|
|
|
|
let error: JSONRPCError = timeout(
|
|
DEFAULT_TIMEOUT,
|
|
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
|
|
)
|
|
.await??;
|
|
|
|
assert_eq!(error.id, RequestId::Integer(request_id));
|
|
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
|
|
assert_eq!(error.error.message, "invalid cursor: invalid");
|
|
Ok(())
|
|
}
|