mirror of
https://github.com/openai/codex.git
synced 2026-04-24 14:45:27 +00:00
## Why Bazel clippy now catches lints that `cargo clippy` can still miss when a crate under `codex-rs` forgets to opt into workspace lints. The concrete example here was `codex-rs/app-server/tests/common/Cargo.toml`: Bazel flagged a clippy violation in `models_cache.rs`, but Cargo did not because that crate inherited workspace package metadata without declaring `[lints] workspace = true`. We already mirror the workspace clippy deny list into Bazel after [#15955](https://github.com/openai/codex/pull/15955), so we also need a repo-side check that keeps every `codex-rs` manifest opted into the same workspace settings. ## What changed - add `.github/scripts/verify_cargo_workspace_manifests.py`, which parses every `codex-rs/**/Cargo.toml` with `tomllib` and verifies: - `version.workspace = true` - `edition.workspace = true` - `license.workspace = true` - `[lints] workspace = true` - top-level crate names follow the `codex-*` / `codex-utils-*` conventions, with explicit exceptions for `windows-sandbox-rs` and `utils/path-utils` - run that script in `.github/workflows/ci.yml` - update the current outlier manifests so the check is enforceable immediately - fix the newly exposed clippy violations in the affected crates (`app-server/tests/common`, `file-search`, `feedback`, `shell-escalation`, and `debug-client`) --- [//]: # (BEGIN SAPLING FOOTER) Stack created with [Sapling](https://sapling-scm.com). Best reviewed with [ReviewStack](https://reviewstack.dev/openai/codex/pull/16353). * #16351 * __->__ #16353
96 lines
3.9 KiB
Rust
96 lines
3.9 KiB
Rust
use chrono::DateTime;
|
|
use chrono::Utc;
|
|
use codex_core::test_support::all_model_presets;
|
|
use codex_protocol::config_types::ReasoningSummary;
|
|
use codex_protocol::openai_models::ConfigShellToolType;
|
|
use codex_protocol::openai_models::ModelInfo;
|
|
use codex_protocol::openai_models::ModelPreset;
|
|
use codex_protocol::openai_models::ModelVisibility;
|
|
use codex_protocol::openai_models::TruncationPolicyConfig;
|
|
use codex_protocol::openai_models::default_input_modalities;
|
|
use serde_json::json;
|
|
use std::path::Path;
|
|
|
|
/// Convert a ModelPreset to ModelInfo for cache storage.
|
|
fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
|
ModelInfo {
|
|
slug: preset.id.clone(),
|
|
display_name: preset.display_name.clone(),
|
|
description: Some(preset.description.clone()),
|
|
default_reasoning_level: Some(preset.default_reasoning_effort),
|
|
supported_reasoning_levels: preset.supported_reasoning_efforts.clone(),
|
|
shell_type: ConfigShellToolType::ShellCommand,
|
|
visibility: if preset.show_in_picker {
|
|
ModelVisibility::List
|
|
} else {
|
|
ModelVisibility::Hide
|
|
},
|
|
supported_in_api: preset.supported_in_api,
|
|
priority,
|
|
upgrade: preset.upgrade.as_ref().map(Into::into),
|
|
base_instructions: "base instructions".to_string(),
|
|
model_messages: None,
|
|
supports_reasoning_summaries: false,
|
|
default_reasoning_summary: ReasoningSummary::Auto,
|
|
support_verbosity: false,
|
|
default_verbosity: None,
|
|
availability_nux: None,
|
|
apply_patch_tool_type: None,
|
|
web_search_tool_type: Default::default(),
|
|
truncation_policy: TruncationPolicyConfig::bytes(/*limit*/ 10_000),
|
|
supports_parallel_tool_calls: false,
|
|
supports_image_detail_original: false,
|
|
context_window: Some(272_000),
|
|
auto_compact_token_limit: None,
|
|
effective_context_window_percent: 95,
|
|
experimental_supported_tools: Vec::new(),
|
|
input_modalities: default_input_modalities(),
|
|
used_fallback_model_metadata: false,
|
|
supports_search_tool: false,
|
|
}
|
|
}
|
|
|
|
/// Write a models_cache.json file to the codex home directory.
|
|
/// This prevents ModelsManager from making network requests to refresh models.
|
|
/// The cache will be treated as fresh (within TTL) and used instead of fetching from the network.
|
|
/// Uses bundled-catalog-derived presets, converted to ModelInfo format.
|
|
pub fn write_models_cache(codex_home: &Path) -> std::io::Result<()> {
|
|
// Get a stable bundled-catalog-derived preset list and filter for picker-visible entries.
|
|
let presets: Vec<&ModelPreset> = all_model_presets()
|
|
.iter()
|
|
.filter(|preset| preset.show_in_picker)
|
|
.collect();
|
|
// Convert presets to ModelInfo, assigning priorities (lower = earlier in list).
|
|
// Priority is used for sorting, so the first model gets the lowest priority.
|
|
let models: Vec<ModelInfo> = presets
|
|
.iter()
|
|
.enumerate()
|
|
.map(|(idx, preset)| {
|
|
// Lower priority = earlier in list.
|
|
let priority = idx as i32;
|
|
preset_to_info(preset, priority)
|
|
})
|
|
.collect();
|
|
|
|
write_models_cache_with_models(codex_home, models)
|
|
}
|
|
|
|
/// Write a models_cache.json file with specific models.
|
|
/// Useful when tests need specific models to be available.
|
|
pub fn write_models_cache_with_models(
|
|
codex_home: &Path,
|
|
models: Vec<ModelInfo>,
|
|
) -> std::io::Result<()> {
|
|
let cache_path = codex_home.join("models_cache.json");
|
|
// DateTime<Utc> serializes to RFC3339 format by default with serde
|
|
let fetched_at: DateTime<Utc> = Utc::now();
|
|
let client_version = codex_core::models_manager::client_version_to_whole();
|
|
let cache = json!({
|
|
"fetched_at": fetched_at,
|
|
"etag": null,
|
|
"client_version": client_version,
|
|
"models": models
|
|
});
|
|
std::fs::write(cache_path, serde_json::to_string_pretty(&cache)?)
|
|
}
|