mirror of
https://github.com/openai/codex.git
synced 2026-04-28 08:34:54 +00:00
extract models manager and related ownership from core (#16508)
## Summary - split `models-manager` out of `core` and add `ModelsManagerConfig` plus `Config::to_models_manager_config()` so model metadata paths stop depending on `core::Config` - move login-owned/auth-owned code out of `core` into `codex-login`, move model provider config into `codex-model-provider-info`, move API bridge mapping into `codex-api`, move protocol-owned types/impls into `codex-protocol`, and move response debug helpers into a dedicated `response-debug-context` crate - move feedback tag emission into `codex-feedback`, relocate tests to the crates that now own the code, and keep broad temporary re-exports so this PR avoids a giant import-only rewrite ## Major moves and decisions - created `codex-models-manager` as the owner for model cache/catalog/config/model info logic, including the new `ModelsManagerConfig` struct - created `codex-model-provider-info` as the owner for provider config parsing/defaults and kept temporary `codex-login`/`codex-core` re-exports for old import paths - moved `api_bridge` error mapping + `CoreAuthProvider` into `codex-api`, while `codex-login::api_bridge` temporarily re-exports those symbols and keeps the `auth_provider_from_auth` wrapper - moved `auth_env_telemetry` and `provider_auth` ownership to `codex-login` - moved `CodexErr` ownership to `codex-protocol::error`, plus `StreamOutput`, `bytes_to_string_smart`, and network policy helpers to protocol-owned modules - created `codex-response-debug-context` for `extract_response_debug_context`, `telemetry_transport_error_message`, and related response-debug plumbing instead of leaving that behavior in `core` - moved `FeedbackRequestTags`, `emit_feedback_request_tags`, and `emit_feedback_request_tags_with_auth_env` to `codex-feedback` - deferred removal of temporary re-exports and the mechanical import rewrites to a stacked follow-up PR so this PR stays reviewable ## Test moves - moved auth refresh coverage from `core/tests/suite/auth_refresh.rs` to `login/tests/suite/auth_refresh.rs` - moved text encoding coverage from `core/tests/suite/text_encoding_fix.rs` to `protocol/src/exec_output_tests.rs` - moved model info override coverage from `core/tests/suite/model_info_overrides.rs` to `models-manager/src/model_info_overrides_tests.rs` --------- Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
@@ -105,7 +105,9 @@ async fn remote_models_get_model_info_uses_longest_matching_prefix() -> Result<(
|
||||
|
||||
manager.list_models(RefreshStrategy::OnlineIfUncached).await;
|
||||
|
||||
let model_info = manager.get_model_info("gpt-5.3-codex-test", &config).await;
|
||||
let model_info = manager
|
||||
.get_model_info("gpt-5.3-codex-test", &config.to_models_manager_config())
|
||||
.await;
|
||||
|
||||
assert_eq!(model_info.slug, "gpt-5.3-codex-test");
|
||||
assert_eq!(model_info.base_instructions, specific.base_instructions);
|
||||
@@ -348,7 +350,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
|
||||
assert_eq!(requests[0].url.path(), "/v1/models");
|
||||
|
||||
let model_info = models_manager
|
||||
.get_model_info(REMOTE_MODEL_SLUG, &config)
|
||||
.get_model_info(REMOTE_MODEL_SLUG, &config.to_models_manager_config())
|
||||
.await;
|
||||
assert_eq!(model_info.shell_type, ConfigShellToolType::UnifiedExec);
|
||||
|
||||
@@ -455,7 +457,9 @@ async fn remote_models_truncation_policy_without_override_preserves_remote() ->
|
||||
let models_manager = test.thread_manager.get_models_manager();
|
||||
wait_for_model_available(&models_manager, slug).await;
|
||||
|
||||
let model_info = models_manager.get_model_info(slug, &test.config).await;
|
||||
let model_info = models_manager
|
||||
.get_model_info(slug, &test.config.to_models_manager_config())
|
||||
.await;
|
||||
assert_eq!(
|
||||
model_info.truncation_policy,
|
||||
TruncationPolicyConfig::bytes(/*limit*/ 12_000)
|
||||
@@ -500,7 +504,9 @@ async fn remote_models_truncation_policy_with_tool_output_override() -> Result<(
|
||||
let models_manager = test.thread_manager.get_models_manager();
|
||||
wait_for_model_available(&models_manager, slug).await;
|
||||
|
||||
let model_info = models_manager.get_model_info(slug, &test.config).await;
|
||||
let model_info = models_manager
|
||||
.get_model_info(slug, &test.config.to_models_manager_config())
|
||||
.await;
|
||||
assert_eq!(
|
||||
model_info.truncation_policy,
|
||||
TruncationPolicyConfig::bytes(/*limit*/ 200)
|
||||
@@ -628,7 +634,9 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
|
||||
|
||||
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let base_model_info = models_manager.get_model_info("gpt-5.1", &config).await;
|
||||
let base_model_info = models_manager
|
||||
.get_model_info("gpt-5.1", &config.to_models_manager_config())
|
||||
.await;
|
||||
let body = response_mock.single_request().body_json();
|
||||
let instructions = body["instructions"].as_str().unwrap();
|
||||
assert_eq!(instructions, base_model_info.base_instructions);
|
||||
@@ -968,8 +976,8 @@ async fn wait_for_model_available(manager: &Arc<ModelsManager>, slug: &str) -> M
|
||||
}
|
||||
|
||||
fn bundled_model_slug() -> String {
|
||||
let response: ModelsResponse = serde_json::from_str(include_str!("../../models.json"))
|
||||
.expect("bundled models.json should deserialize");
|
||||
let response = codex_models_manager::bundled_models_response()
|
||||
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
|
||||
response
|
||||
.models
|
||||
.first()
|
||||
|
||||
Reference in New Issue
Block a user