mirror of
https://github.com/openai/codex.git
synced 2026-04-29 17:06:51 +00:00
## Summary - split `models-manager` out of `core` and add `ModelsManagerConfig` plus `Config::to_models_manager_config()` so model metadata paths stop depending on `core::Config` - move login-owned/auth-owned code out of `core` into `codex-login`, move model provider config into `codex-model-provider-info`, move API bridge mapping into `codex-api`, move protocol-owned types/impls into `codex-protocol`, and move response debug helpers into a dedicated `response-debug-context` crate - move feedback tag emission into `codex-feedback`, relocate tests to the crates that now own the code, and keep broad temporary re-exports so this PR avoids a giant import-only rewrite ## Major moves and decisions - created `codex-models-manager` as the owner for model cache/catalog/config/model info logic, including the new `ModelsManagerConfig` struct - created `codex-model-provider-info` as the owner for provider config parsing/defaults and kept temporary `codex-login`/`codex-core` re-exports for old import paths - moved `api_bridge` error mapping + `CoreAuthProvider` into `codex-api`, while `codex-login::api_bridge` temporarily re-exports those symbols and keeps the `auth_provider_from_auth` wrapper - moved `auth_env_telemetry` and `provider_auth` ownership to `codex-login` - moved `CodexErr` ownership to `codex-protocol::error`, plus `StreamOutput`, `bytes_to_string_smart`, and network policy helpers to protocol-owned modules - created `codex-response-debug-context` for `extract_response_debug_context`, `telemetry_transport_error_message`, and related response-debug plumbing instead of leaving that behavior in `core` - moved `FeedbackRequestTags`, `emit_feedback_request_tags`, and `emit_feedback_request_tags_with_auth_env` to `codex-feedback` - deferred removal of temporary re-exports and the mechanical import rewrites to a stacked follow-up PR so this PR stays reviewable ## Test moves - moved auth refresh coverage from `core/tests/suite/auth_refresh.rs` to `login/tests/suite/auth_refresh.rs` - moved text encoding coverage from `core/tests/suite/text_encoding_fix.rs` to `protocol/src/exec_output_tests.rs` - moved model info override coverage from `core/tests/suite/model_info_overrides.rs` to `models-manager/src/model_info_overrides_tests.rs` --------- Co-authored-by: Codex <noreply@openai.com>
184 lines
6.0 KiB
Rust
184 lines
6.0 KiB
Rust
use chrono::DateTime;
|
|
use chrono::Utc;
|
|
use codex_protocol::openai_models::ModelInfo;
|
|
use serde::Deserialize;
|
|
use serde::Serialize;
|
|
use std::io;
|
|
use std::io::ErrorKind;
|
|
use std::path::PathBuf;
|
|
use std::time::Duration;
|
|
use tokio::fs;
|
|
use tracing::error;
|
|
use tracing::info;
|
|
|
|
/// Manages loading and saving of models cache to disk.
|
|
#[derive(Debug)]
|
|
pub(crate) struct ModelsCacheManager {
|
|
cache_path: PathBuf,
|
|
cache_ttl: Duration,
|
|
}
|
|
|
|
impl ModelsCacheManager {
|
|
/// Create a new cache manager with the given path and TTL.
|
|
pub(crate) fn new(cache_path: PathBuf, cache_ttl: Duration) -> Self {
|
|
Self {
|
|
cache_path,
|
|
cache_ttl,
|
|
}
|
|
}
|
|
|
|
/// Attempt to load a fresh cache entry. Returns `None` if the cache doesn't exist or is stale.
|
|
pub(crate) async fn load_fresh(&self, expected_version: &str) -> Option<ModelsCache> {
|
|
info!(
|
|
cache_path = %self.cache_path.display(),
|
|
expected_version,
|
|
"models cache: attempting load_fresh"
|
|
);
|
|
let cache = match self.load().await {
|
|
Ok(cache) => cache?,
|
|
Err(err) => {
|
|
error!("failed to load models cache: {err}");
|
|
return None;
|
|
}
|
|
};
|
|
info!(
|
|
cache_path = %self.cache_path.display(),
|
|
cached_version = ?cache.client_version,
|
|
fetched_at = %cache.fetched_at,
|
|
"models cache: loaded cache file"
|
|
);
|
|
if cache.client_version.as_deref() != Some(expected_version) {
|
|
info!(
|
|
cache_path = %self.cache_path.display(),
|
|
expected_version,
|
|
cached_version = ?cache.client_version,
|
|
"models cache: cache version mismatch"
|
|
);
|
|
return None;
|
|
}
|
|
if !cache.is_fresh(self.cache_ttl) {
|
|
info!(
|
|
cache_path = %self.cache_path.display(),
|
|
cache_ttl_secs = self.cache_ttl.as_secs(),
|
|
fetched_at = %cache.fetched_at,
|
|
"models cache: cache is stale"
|
|
);
|
|
return None;
|
|
}
|
|
info!(
|
|
cache_path = %self.cache_path.display(),
|
|
cache_ttl_secs = self.cache_ttl.as_secs(),
|
|
"models cache: cache hit"
|
|
);
|
|
Some(cache)
|
|
}
|
|
|
|
/// Persist the cache to disk, creating parent directories as needed.
|
|
pub(crate) async fn persist_cache(
|
|
&self,
|
|
models: &[ModelInfo],
|
|
etag: Option<String>,
|
|
client_version: String,
|
|
) {
|
|
let cache = ModelsCache {
|
|
fetched_at: Utc::now(),
|
|
etag,
|
|
client_version: Some(client_version),
|
|
models: models.to_vec(),
|
|
};
|
|
if let Err(err) = self.save_internal(&cache).await {
|
|
error!("failed to write models cache: {err}");
|
|
}
|
|
}
|
|
|
|
/// Renew the cache TTL by updating the fetched_at timestamp to now.
|
|
pub(crate) async fn renew_cache_ttl(&self) -> io::Result<()> {
|
|
let mut cache = match self.load().await? {
|
|
Some(cache) => cache,
|
|
None => return Err(io::Error::new(ErrorKind::NotFound, "cache not found")),
|
|
};
|
|
cache.fetched_at = Utc::now();
|
|
self.save_internal(&cache).await
|
|
}
|
|
|
|
async fn load(&self) -> io::Result<Option<ModelsCache>> {
|
|
match fs::read(&self.cache_path).await {
|
|
Ok(contents) => {
|
|
let cache = serde_json::from_slice(&contents)
|
|
.map_err(|err| io::Error::new(ErrorKind::InvalidData, err.to_string()))?;
|
|
Ok(Some(cache))
|
|
}
|
|
Err(err) if err.kind() == ErrorKind::NotFound => Ok(None),
|
|
Err(err) => Err(err),
|
|
}
|
|
}
|
|
|
|
async fn save_internal(&self, cache: &ModelsCache) -> io::Result<()> {
|
|
if let Some(parent) = self.cache_path.parent() {
|
|
fs::create_dir_all(parent).await?;
|
|
}
|
|
let json = serde_json::to_vec_pretty(cache)
|
|
.map_err(|err| io::Error::new(ErrorKind::InvalidData, err.to_string()))?;
|
|
fs::write(&self.cache_path, json).await
|
|
}
|
|
|
|
#[cfg(test)]
|
|
/// Set the cache TTL.
|
|
pub(crate) fn set_ttl(&mut self, ttl: Duration) {
|
|
self.cache_ttl = ttl;
|
|
}
|
|
|
|
#[cfg(test)]
|
|
/// Manipulate cache file for testing. Allows setting a custom fetched_at timestamp.
|
|
pub(crate) async fn manipulate_cache_for_test<F>(&self, f: F) -> io::Result<()>
|
|
where
|
|
F: FnOnce(&mut DateTime<Utc>),
|
|
{
|
|
let mut cache = match self.load().await? {
|
|
Some(cache) => cache,
|
|
None => return Err(io::Error::new(ErrorKind::NotFound, "cache not found")),
|
|
};
|
|
f(&mut cache.fetched_at);
|
|
self.save_internal(&cache).await
|
|
}
|
|
|
|
#[cfg(test)]
|
|
/// Mutate the full cache contents for testing.
|
|
pub(crate) async fn mutate_cache_for_test<F>(&self, f: F) -> io::Result<()>
|
|
where
|
|
F: FnOnce(&mut ModelsCache),
|
|
{
|
|
let mut cache = match self.load().await? {
|
|
Some(cache) => cache,
|
|
None => return Err(io::Error::new(ErrorKind::NotFound, "cache not found")),
|
|
};
|
|
f(&mut cache);
|
|
self.save_internal(&cache).await
|
|
}
|
|
}
|
|
|
|
/// Serialized snapshot of models and metadata cached on disk.
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
pub(crate) struct ModelsCache {
|
|
pub(crate) fetched_at: DateTime<Utc>,
|
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
pub(crate) etag: Option<String>,
|
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
pub(crate) client_version: Option<String>,
|
|
pub(crate) models: Vec<ModelInfo>,
|
|
}
|
|
|
|
impl ModelsCache {
|
|
/// Returns `true` when the cache entry has not exceeded the configured TTL.
|
|
fn is_fresh(&self, ttl: Duration) -> bool {
|
|
if ttl.is_zero() {
|
|
return false;
|
|
}
|
|
let Ok(ttl_duration) = chrono::Duration::from_std(ttl) else {
|
|
return false;
|
|
};
|
|
let age = Utc::now().signed_duration_since(self.fetched_at);
|
|
age <= ttl_duration
|
|
}
|
|
}
|