remove temporary ownership re-exports (#16626)

Stacked on #16508.

This removes the temporary `codex-core` / `codex-login` re-export shims
from the ownership split and rewrites callsites to import directly from
`codex-model-provider-info`, `codex-models-manager`, `codex-api`,
`codex-protocol`, `codex-feedback`, and `codex-response-debug-context`.

No behavior change intended; this is the mechanical import cleanup layer
split out from the ownership move.

---------

Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
Ahmed Ibrahim
2026-04-03 00:33:34 -07:00
committed by GitHub
parent b15c918836
commit af8a9d2d2b
119 changed files with 323 additions and 324 deletions

View File

@@ -1,18 +1,18 @@
use crate::error::CodexErr;
use codex_core::ModelClient;
use codex_core::ModelProviderInfo;
use codex_core::NewThread;
use codex_core::Prompt;
use codex_core::ResponseEvent;
use codex_core::ThreadManager;
use codex_core::WireApi;
use codex_core::built_in_model_providers;
use codex_core::models_manager::collaboration_mode_presets::CollaborationModesConfig;
use codex_features::Feature;
use codex_login::AuthCredentialsStoreMode;
use codex_login::AuthManager;
use codex_login::CodexAuth;
use codex_login::default_client::originator;
use codex_model_provider_info::ModelProviderInfo;
use codex_model_provider_info::WireApi;
use codex_model_provider_info::built_in_model_providers;
use codex_models_manager::bundled_models_response;
use codex_models_manager::collaboration_mode_presets::CollaborationModesConfig;
use codex_otel::SessionTelemetry;
use codex_otel::TelemetryAuthMode;
use codex_protocol::ThreadId;
@@ -22,6 +22,7 @@ use codex_protocol::config_types::ModelProviderAuthInfo;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::Settings;
use codex_protocol::config_types::Verbosity;
use codex_protocol::error::CodexErr;
use codex_protocol::models::ContentItem;
use codex_protocol::models::FunctionCallOutputContentItem;
use codex_protocol::models::FunctionCallOutputPayload;
@@ -1635,7 +1636,7 @@ async fn user_turn_explicit_reasoning_summary_overrides_model_catalog_default()
)
.await;
let mut model_catalog = codex_models_manager::bundled_models_response()
let mut model_catalog = bundled_models_response()
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
let model = model_catalog
.models
@@ -1748,7 +1749,7 @@ async fn reasoning_summary_none_overrides_model_catalog_default() -> anyhow::Res
)
.await;
let mut model_catalog = codex_models_manager::bundled_models_response()
let mut model_catalog = bundled_models_response()
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
let model = model_catalog
.models

View File

@@ -3,13 +3,13 @@ use codex_api::WS_REQUEST_HEADER_TRACEPARENT_CLIENT_METADATA_KEY;
use codex_api::WS_REQUEST_HEADER_TRACESTATE_CLIENT_METADATA_KEY;
use codex_core::ModelClient;
use codex_core::ModelClientSession;
use codex_core::ModelProviderInfo;
use codex_core::Prompt;
use codex_core::ResponseEvent;
use codex_core::WireApi;
use codex_core::X_RESPONSESAPI_INCLUDE_TIMING_METRICS_HEADER;
use codex_features::Feature;
use codex_login::CodexAuth;
use codex_model_provider_info::ModelProviderInfo;
use codex_model_provider_info::WireApi;
use codex_otel::SessionTelemetry;
use codex_otel::TelemetryAuthMode;
use codex_otel::current_span_w3c_trace_context;

View File

@@ -1,11 +1,12 @@
#![allow(clippy::expect_used)]
use codex_core::ModelProviderInfo;
use codex_core::built_in_model_providers;
use codex_core::compact::SUMMARIZATION_PROMPT;
use codex_core::compact::SUMMARY_PREFIX;
use codex_core::config::Config;
use codex_features::Feature;
use codex_login::CodexAuth;
use codex_model_provider_info::ModelProviderInfo;
use codex_model_provider_info::built_in_model_providers;
use codex_models_manager::bundled_models_response;
use codex_protocol::items::TurnItem;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ModelsResponse;
@@ -103,7 +104,7 @@ fn non_openai_model_provider(server: &MockServer) -> ModelProviderInfo {
}
fn model_info_with_context_window(slug: &str, context_window: i64) -> ModelInfo {
let models_response = codex_models_manager::bundled_models_response()
let models_response = bundled_models_response()
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
let mut model_info = models_response
.models

View File

@@ -3,14 +3,14 @@
use std::collections::HashMap;
use std::string::ToString;
use crate::error::Result;
use codex_core::exec::ExecCapturePolicy;
use codex_core::exec::ExecParams;
use codex_core::exec::ExecToolCallOutput;
use codex_core::exec::process_exec_tool_call;
use codex_core::sandboxing::SandboxPermissions;
use codex_core::spawn::CODEX_SANDBOX_ENV_VAR;
use codex_protocol::config_types::WindowsSandboxLevel;
use codex_protocol::error::Result;
use codex_protocol::exec_output::ExecToolCallOutput;
use codex_protocol::permissions::FileSystemSandboxPolicy;
use codex_protocol::permissions::NetworkSandboxPolicy;
use codex_protocol::protocol::SandboxPolicy;

View File

@@ -1,8 +1,8 @@
use anyhow::Result;
use codex_config::types::Personality;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_features::Feature;
use codex_login::CodexAuth;
use codex_models_manager::manager::RefreshStrategy;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::openai_models::ConfigShellToolType;

View File

@@ -5,8 +5,9 @@ use anyhow::Result;
use chrono::DateTime;
use chrono::TimeZone;
use chrono::Utc;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_login::CodexAuth;
use codex_models_manager::client_version_to_whole;
use codex_models_manager::manager::RefreshStrategy;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::ModelInfo;
@@ -153,7 +154,7 @@ async fn uses_cache_when_version_matches() -> Result<()> {
let cache = ModelsCache {
fetched_at: Utc::now(),
etag: None,
client_version: Some(codex_core::models_manager::client_version_to_whole()),
client_version: Some(client_version_to_whole()),
models: vec![cached_model],
};
let cache_path = home.join(CACHE_FILE);
@@ -244,7 +245,7 @@ async fn refreshes_when_cache_version_differs() -> Result<()> {
let mut builder = test_codex().with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing());
builder = builder
.with_pre_build_hook(move |home| {
let client_version = codex_core::models_manager::client_version_to_whole();
let client_version = client_version_to_whole();
let cache = ModelsCache {
fetched_at: Utc::now(),
etag: None,

View File

@@ -1,7 +1,7 @@
use codex_config::types::Personality;
use codex_core::models_manager::manager::ModelsManager;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_features::Feature;
use codex_models_manager::manager::ModelsManager;
use codex_models_manager::manager::RefreshStrategy;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::ModelInfo;

View File

@@ -4,11 +4,12 @@
use std::sync::Arc;
use anyhow::Result;
use codex_core::ModelProviderInfo;
use codex_core::built_in_model_providers;
use codex_core::models_manager::manager::ModelsManager;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_login::CodexAuth;
use codex_model_provider_info::ModelProviderInfo;
use codex_model_provider_info::built_in_model_providers;
use codex_models_manager::bundled_models_response;
use codex_models_manager::manager::ModelsManager;
use codex_models_manager::manager::RefreshStrategy;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::ModelInfo;
@@ -976,7 +977,7 @@ async fn wait_for_model_available(manager: &Arc<ModelsManager>, slug: &str) -> M
}
fn bundled_model_slug() -> String {
let response = codex_models_manager::bundled_models_response()
let response = bundled_models_response()
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
response
.models

View File

@@ -11,8 +11,8 @@ use std::time::UNIX_EPOCH;
use codex_config::types::McpServerConfig;
use codex_config::types::McpServerTransportConfig;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_login::CodexAuth;
use codex_models_manager::manager::RefreshStrategy;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::openai_models::ConfigShellToolType;

View File

@@ -5,6 +5,7 @@ use anyhow::Result;
use codex_core::config::Config;
use codex_features::Feature;
use codex_login::CodexAuth;
use codex_models_manager::bundled_models_response;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::McpInvocation;
@@ -93,7 +94,7 @@ fn configure_apps_without_tool_search(config: &mut Config, apps_base_url: &str)
config.chatgpt_base_url = apps_base_url.to_string();
config.model = Some("gpt-5-codex".to_string());
let mut model_catalog = codex_models_manager::bundled_models_response()
let mut model_catalog = bundled_models_response()
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
let model = model_catalog
.models

View File

@@ -2,10 +2,10 @@
#![allow(clippy::unwrap_used, clippy::expect_used)]
use anyhow::Result;
use codex_core::models_manager::manager::ModelsManager;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_features::Feature;
use codex_login::CodexAuth;
use codex_models_manager::manager::ModelsManager;
use codex_models_manager::manager::RefreshStrategy;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::ModelInfo;

View File

@@ -1,5 +1,5 @@
use codex_core::ModelProviderInfo;
use codex_core::WireApi;
use codex_model_provider_info::ModelProviderInfo;
use codex_model_provider_info::WireApi;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::Op;
use codex_protocol::user_input::UserInput;

View File

@@ -1,8 +1,8 @@
//! Verifies that the agent retries when the SSE stream terminates before
//! delivering a `response.completed` event.
use codex_core::ModelProviderInfo;
use codex_core::WireApi;
use codex_model_provider_info::ModelProviderInfo;
use codex_model_provider_info::WireApi;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::Op;
use codex_protocol::user_input::UserInput;

View File

@@ -7,6 +7,7 @@ use codex_config::types::ToolSuggestDiscoverableType;
use codex_core::config::Config;
use codex_features::Feature;
use codex_login::CodexAuth;
use codex_models_manager::bundled_models_response;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::SandboxPolicy;
use core_test_support::apps_test_server::AppsTestServer;
@@ -77,7 +78,7 @@ fn configure_apps_without_search_tool(config: &mut Config, apps_base_url: &str)
id: DISCOVERABLE_GMAIL_ID.to_string(),
}];
let mut model_catalog = codex_models_manager::bundled_models_response()
let mut model_catalog = bundled_models_response()
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
let model = model_catalog
.models

View File

@@ -1,4 +1,5 @@
use anyhow::Result;
use codex_model_provider_info::WireApi;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::Op;
@@ -43,7 +44,7 @@ async fn websocket_fallback_switches_to_http_on_upgrade_required_connect() -> Re
let base_url = format!("{}/v1", server.uri());
move |config| {
config.model_provider.base_url = Some(base_url);
config.model_provider.wire_api = codex_core::WireApi::Responses;
config.model_provider.wire_api = WireApi::Responses;
config.model_provider.supports_websockets = true;
// If we don't treat 426 specially, the sampling loop would retry the WebSocket
// handshake before switching to the HTTP transport.
@@ -89,7 +90,7 @@ async fn websocket_fallback_switches_to_http_after_retries_exhausted() -> Result
let base_url = format!("{}/v1", server.uri());
move |config| {
config.model_provider.base_url = Some(base_url);
config.model_provider.wire_api = codex_core::WireApi::Responses;
config.model_provider.wire_api = WireApi::Responses;
config.model_provider.supports_websockets = true;
config.model_provider.stream_max_retries = Some(2);
config.model_provider.request_max_retries = Some(0);
@@ -134,7 +135,7 @@ async fn websocket_fallback_hides_first_websocket_retry_stream_error() -> Result
let base_url = format!("{}/v1", server.uri());
move |config| {
config.model_provider.base_url = Some(base_url);
config.model_provider.wire_api = codex_core::WireApi::Responses;
config.model_provider.wire_api = WireApi::Responses;
config.model_provider.supports_websockets = true;
config.model_provider.stream_max_retries = Some(2);
config.model_provider.request_max_retries = Some(0);
@@ -210,7 +211,7 @@ async fn websocket_fallback_is_sticky_across_turns() -> Result<()> {
let base_url = format!("{}/v1", server.uri());
move |config| {
config.model_provider.base_url = Some(base_url);
config.model_provider.wire_api = codex_core::WireApi::Responses;
config.model_provider.wire_api = WireApi::Responses;
config.model_provider.supports_websockets = true;
config.model_provider.stream_max_retries = Some(2);
config.model_provider.request_max_retries = Some(0);