remove temporary ownership re-exports (#16626)

Stacked on #16508.

This removes the temporary `codex-core` / `codex-login` re-export shims
from the ownership split and rewrites callsites to import directly from
`codex-model-provider-info`, `codex-models-manager`, `codex-api`,
`codex-protocol`, `codex-feedback`, and `codex-response-debug-context`.

No behavior change intended; this is the mechanical import cleanup layer
split out from the ownership move.

---------

Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
Ahmed Ibrahim
2026-04-03 00:33:34 -07:00
committed by GitHub
parent b15c918836
commit af8a9d2d2b
119 changed files with 323 additions and 324 deletions

View File

@@ -91,12 +91,12 @@ use codex_core::config::edit::ConfigEdit;
use codex_core::config::edit::ConfigEditsBuilder;
use codex_core::config_loader::ConfigLayerStackOrdering;
use codex_core::message_history;
use codex_core::models_manager::collaboration_mode_presets::CollaborationModesConfig;
use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG;
use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
#[cfg(target_os = "windows")]
use codex_core::windows_sandbox::WindowsSandboxLevelExt;
use codex_features::Feature;
use codex_models_manager::collaboration_mode_presets::CollaborationModesConfig;
use codex_models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG;
use codex_models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
use codex_otel::SessionTelemetry;
use codex_protocol::ThreadId;
use codex_protocol::approvals::ExecApprovalRequestEvent;

View File

@@ -93,12 +93,12 @@ pub(super) use codex_core::config_loader::ConfigLayerStack;
pub(super) use codex_core::config_loader::ConfigRequirements;
pub(super) use codex_core::config_loader::ConfigRequirementsToml;
pub(super) use codex_core::config_loader::RequirementSource;
pub(super) use codex_core::models_manager::collaboration_mode_presets::CollaborationModesConfig;
pub(super) use codex_core::plugins::OPENAI_CURATED_MARKETPLACE_NAME;
pub(super) use codex_core::skills::model::SkillMetadata;
pub(super) use codex_features::FEATURES;
pub(super) use codex_features::Feature;
pub(super) use codex_git_utils::CommitLogEntry;
pub(super) use codex_models_manager::collaboration_mode_presets::CollaborationModesConfig;
pub(super) use codex_otel::RuntimeMetricsSummary;
pub(super) use codex_otel::SessionTelemetry;
pub(super) use codex_protocol::ThreadId;

View File

@@ -1,5 +1,5 @@
use codex_core::models_manager::collaboration_mode_presets::CollaborationModesConfig;
use codex_core::models_manager::collaboration_mode_presets::builtin_collaboration_mode_presets;
use codex_models_manager::collaboration_mode_presets::CollaborationModesConfig;
use codex_models_manager::collaboration_mode_presets::builtin_collaboration_mode_presets;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::openai_models::ModelPreset;
use std::convert::Infallible;

View File

@@ -1,11 +1,11 @@
use std::io;
use std::sync::LazyLock;
use codex_core::DEFAULT_LMSTUDIO_PORT;
use codex_core::DEFAULT_OLLAMA_PORT;
use codex_core::LMSTUDIO_OSS_PROVIDER_ID;
use codex_core::OLLAMA_OSS_PROVIDER_ID;
use codex_core::config::set_default_oss_provider;
use codex_model_provider_info::DEFAULT_LMSTUDIO_PORT;
use codex_model_provider_info::DEFAULT_OLLAMA_PORT;
use codex_model_provider_info::LMSTUDIO_OSS_PROVIDER_ID;
use codex_model_provider_info::OLLAMA_OSS_PROVIDER_ID;
use crossterm::event::Event;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;

View File

@@ -5,8 +5,8 @@ use crate::history_cell::with_border_with_inner_width;
use crate::version::CODEX_CLI_VERSION;
use chrono::DateTime;
use chrono::Local;
use codex_core::WireApi;
use codex_core::config::Config;
use codex_model_provider_info::WireApi;
use codex_protocol::ThreadId;
use codex_protocol::account::PlanType;
use codex_protocol::openai_models::ReasoningEffort;