mirror of
https://github.com/openai/codex.git
synced 2026-05-02 18:37:01 +00:00
extract models manager and related ownership from core (#16508)
## Summary - split `models-manager` out of `core` and add `ModelsManagerConfig` plus `Config::to_models_manager_config()` so model metadata paths stop depending on `core::Config` - move login-owned/auth-owned code out of `core` into `codex-login`, move model provider config into `codex-model-provider-info`, move API bridge mapping into `codex-api`, move protocol-owned types/impls into `codex-protocol`, and move response debug helpers into a dedicated `response-debug-context` crate - move feedback tag emission into `codex-feedback`, relocate tests to the crates that now own the code, and keep broad temporary re-exports so this PR avoids a giant import-only rewrite ## Major moves and decisions - created `codex-models-manager` as the owner for model cache/catalog/config/model info logic, including the new `ModelsManagerConfig` struct - created `codex-model-provider-info` as the owner for provider config parsing/defaults and kept temporary `codex-login`/`codex-core` re-exports for old import paths - moved `api_bridge` error mapping + `CoreAuthProvider` into `codex-api`, while `codex-login::api_bridge` temporarily re-exports those symbols and keeps the `auth_provider_from_auth` wrapper - moved `auth_env_telemetry` and `provider_auth` ownership to `codex-login` - moved `CodexErr` ownership to `codex-protocol::error`, plus `StreamOutput`, `bytes_to_string_smart`, and network policy helpers to protocol-owned modules - created `codex-response-debug-context` for `extract_response_debug_context`, `telemetry_transport_error_message`, and related response-debug plumbing instead of leaving that behavior in `core` - moved `FeedbackRequestTags`, `emit_feedback_request_tags`, and `emit_feedback_request_tags_with_auth_env` to `codex-feedback` - deferred removal of temporary re-exports and the mechanical import rewrites to a stacked follow-up PR so this PR stays reviewable ## Test moves - moved auth refresh coverage from `core/tests/suite/auth_refresh.rs` to `login/tests/suite/auth_refresh.rs` - moved text encoding coverage from `core/tests/suite/text_encoding_fix.rs` to `protocol/src/exec_output_tests.rs` - moved model info override coverage from `core/tests/suite/model_info_overrides.rs` to `models-manager/src/model_info_overrides_tests.rs` --------- Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
@@ -1,3 +1,5 @@
|
||||
// Single integration test binary that aggregates all test modules.
|
||||
// The submodules live in `tests/all/`.
|
||||
pub use codex_core::error;
|
||||
|
||||
mod suite;
|
||||
|
||||
@@ -19,6 +19,7 @@ codex-core = { workspace = true }
|
||||
codex-exec-server = { workspace = true }
|
||||
codex-features = { workspace = true }
|
||||
codex-login = { workspace = true }
|
||||
codex-models-manager = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
|
||||
@@ -609,17 +609,8 @@ fn ensure_test_model_catalog(config: &mut Config) -> Result<()> {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let bundled_models_path = codex_utils_cargo_bin::find_resource!("../../models.json")
|
||||
.context("bundled models.json")?;
|
||||
let bundled_models_contents =
|
||||
std::fs::read_to_string(&bundled_models_path).with_context(|| {
|
||||
format!(
|
||||
"read bundled models.json from {}",
|
||||
bundled_models_path.display()
|
||||
)
|
||||
})?;
|
||||
let bundled_models: ModelsResponse =
|
||||
serde_json::from_str(&bundled_models_contents).context("parse bundled models.json")?;
|
||||
let bundled_models = codex_models_manager::bundled_models_response()
|
||||
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
|
||||
let mut model = bundled_models
|
||||
.models
|
||||
.iter()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,4 @@
|
||||
use crate::error::CodexErr;
|
||||
use codex_core::ModelClient;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::NewThread;
|
||||
@@ -6,7 +7,6 @@ use codex_core::ResponseEvent;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::WireApi;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::error::CodexErr;
|
||||
use codex_core::models_manager::collaboration_mode_presets::CollaborationModesConfig;
|
||||
use codex_features::Feature;
|
||||
use codex_login::AuthCredentialsStoreMode;
|
||||
@@ -34,7 +34,6 @@ use codex_protocol::models::ReasoningItemContent;
|
||||
use codex_protocol::models::ReasoningItemReasoningSummary;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::models::WebSearchAction;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::Op;
|
||||
@@ -1636,8 +1635,8 @@ async fn user_turn_explicit_reasoning_summary_overrides_model_catalog_default()
|
||||
)
|
||||
.await;
|
||||
|
||||
let mut model_catalog: ModelsResponse =
|
||||
serde_json::from_str(include_str!("../../models.json")).expect("valid models.json");
|
||||
let mut model_catalog = codex_models_manager::bundled_models_response()
|
||||
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
|
||||
let model = model_catalog
|
||||
.models
|
||||
.iter_mut()
|
||||
@@ -1749,8 +1748,8 @@ async fn reasoning_summary_none_overrides_model_catalog_default() -> anyhow::Res
|
||||
)
|
||||
.await;
|
||||
|
||||
let mut model_catalog: ModelsResponse =
|
||||
serde_json::from_str(include_str!("../../models.json")).expect("valid models.json");
|
||||
let mut model_catalog = codex_models_manager::bundled_models_response()
|
||||
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
|
||||
let model = model_catalog
|
||||
.models
|
||||
.iter_mut()
|
||||
|
||||
@@ -103,8 +103,8 @@ fn non_openai_model_provider(server: &MockServer) -> ModelProviderInfo {
|
||||
}
|
||||
|
||||
fn model_info_with_context_window(slug: &str, context_window: i64) -> ModelInfo {
|
||||
let models_response: ModelsResponse =
|
||||
serde_json::from_str(include_str!("../../models.json")).expect("valid models.json");
|
||||
let models_response = codex_models_manager::bundled_models_response()
|
||||
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
|
||||
let mut model_info = models_response
|
||||
.models
|
||||
.into_iter()
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::string::ToString;
|
||||
|
||||
use crate::error::Result;
|
||||
use codex_core::exec::ExecCapturePolicy;
|
||||
use codex_core::exec::ExecParams;
|
||||
use codex_core::exec::ExecToolCallOutput;
|
||||
@@ -17,8 +18,6 @@ use codex_sandboxing::SandboxType;
|
||||
use codex_sandboxing::get_platform_sandbox;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use codex_core::error::Result;
|
||||
|
||||
fn skip_test() -> bool {
|
||||
if std::env::var(CODEX_SANDBOX_ENV_VAR) == Ok("seatbelt".to_string()) {
|
||||
eprintln!("{CODEX_SANDBOX_ENV_VAR} is set to 'seatbelt', skipping test.");
|
||||
|
||||
@@ -77,7 +77,6 @@ mod agent_websocket;
|
||||
mod apply_patch_cli;
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
mod approvals;
|
||||
mod auth_refresh;
|
||||
mod cli_stream;
|
||||
mod client;
|
||||
mod client_websockets;
|
||||
@@ -101,7 +100,6 @@ mod json_result;
|
||||
mod live_cli;
|
||||
mod live_reload;
|
||||
mod memories;
|
||||
mod model_info_overrides;
|
||||
mod model_overrides;
|
||||
mod model_switching;
|
||||
mod model_visible_layout;
|
||||
@@ -142,7 +140,6 @@ mod sqlite_state;
|
||||
mod stream_error_allows_next_turn;
|
||||
mod stream_no_completed;
|
||||
mod subagent_notifications;
|
||||
mod text_encoding_fix;
|
||||
mod tool_harness;
|
||||
mod tool_parallelism;
|
||||
mod tool_suggest;
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
use codex_core::models_manager::collaboration_mode_presets::CollaborationModesConfig;
|
||||
use codex_core::models_manager::manager::ModelsManager;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn offline_model_info_without_tool_output_override() {
|
||||
let codex_home = TempDir::new().expect("create temp dir");
|
||||
let config = load_default_config_for_test(&codex_home).await;
|
||||
let auth_manager = codex_core::test_support::auth_manager_from_auth(
|
||||
CodexAuth::create_dummy_chatgpt_auth_for_testing(),
|
||||
);
|
||||
let manager = ModelsManager::new(
|
||||
config.codex_home.clone(),
|
||||
auth_manager,
|
||||
/*model_catalog*/ None,
|
||||
CollaborationModesConfig::default(),
|
||||
);
|
||||
|
||||
let model_info = manager.get_model_info("gpt-5.1", &config).await;
|
||||
|
||||
assert_eq!(
|
||||
model_info.truncation_policy,
|
||||
TruncationPolicyConfig::bytes(/*limit*/ 10_000)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn offline_model_info_with_tool_output_override() {
|
||||
let codex_home = TempDir::new().expect("create temp dir");
|
||||
let mut config = load_default_config_for_test(&codex_home).await;
|
||||
config.tool_output_token_limit = Some(123);
|
||||
let auth_manager = codex_core::test_support::auth_manager_from_auth(
|
||||
CodexAuth::create_dummy_chatgpt_auth_for_testing(),
|
||||
);
|
||||
let manager = ModelsManager::new(
|
||||
config.codex_home.clone(),
|
||||
auth_manager,
|
||||
/*model_catalog*/ None,
|
||||
CollaborationModesConfig::default(),
|
||||
);
|
||||
|
||||
let model_info = manager.get_model_info("gpt-5.1-codex", &config).await;
|
||||
|
||||
assert_eq!(
|
||||
model_info.truncation_policy,
|
||||
TruncationPolicyConfig::tokens(/*limit*/ 123)
|
||||
);
|
||||
}
|
||||
@@ -961,11 +961,11 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
|
||||
"expected {smaller_model_slug} to be available in remote model list"
|
||||
);
|
||||
let large_model_info = models_manager
|
||||
.get_model_info(large_model_slug, &test.config)
|
||||
.get_model_info(large_model_slug, &test.config.to_models_manager_config())
|
||||
.await;
|
||||
assert_eq!(large_model_info.context_window, Some(large_context_window));
|
||||
let smaller_model_info = models_manager
|
||||
.get_model_info(smaller_model_slug, &test.config)
|
||||
.get_model_info(smaller_model_slug, &test.config.to_models_manager_config())
|
||||
.await;
|
||||
assert_eq!(
|
||||
smaller_model_info.context_window,
|
||||
|
||||
@@ -139,7 +139,7 @@ async fn prompt_tools_are_consistent_across_requests() -> anyhow::Result<()> {
|
||||
.model
|
||||
.as_deref()
|
||||
.expect("test config should have a model"),
|
||||
&config,
|
||||
&config.to_models_manager_config(),
|
||||
)
|
||||
.await
|
||||
.base_instructions;
|
||||
|
||||
@@ -105,7 +105,9 @@ async fn remote_models_get_model_info_uses_longest_matching_prefix() -> Result<(
|
||||
|
||||
manager.list_models(RefreshStrategy::OnlineIfUncached).await;
|
||||
|
||||
let model_info = manager.get_model_info("gpt-5.3-codex-test", &config).await;
|
||||
let model_info = manager
|
||||
.get_model_info("gpt-5.3-codex-test", &config.to_models_manager_config())
|
||||
.await;
|
||||
|
||||
assert_eq!(model_info.slug, "gpt-5.3-codex-test");
|
||||
assert_eq!(model_info.base_instructions, specific.base_instructions);
|
||||
@@ -348,7 +350,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
|
||||
assert_eq!(requests[0].url.path(), "/v1/models");
|
||||
|
||||
let model_info = models_manager
|
||||
.get_model_info(REMOTE_MODEL_SLUG, &config)
|
||||
.get_model_info(REMOTE_MODEL_SLUG, &config.to_models_manager_config())
|
||||
.await;
|
||||
assert_eq!(model_info.shell_type, ConfigShellToolType::UnifiedExec);
|
||||
|
||||
@@ -455,7 +457,9 @@ async fn remote_models_truncation_policy_without_override_preserves_remote() ->
|
||||
let models_manager = test.thread_manager.get_models_manager();
|
||||
wait_for_model_available(&models_manager, slug).await;
|
||||
|
||||
let model_info = models_manager.get_model_info(slug, &test.config).await;
|
||||
let model_info = models_manager
|
||||
.get_model_info(slug, &test.config.to_models_manager_config())
|
||||
.await;
|
||||
assert_eq!(
|
||||
model_info.truncation_policy,
|
||||
TruncationPolicyConfig::bytes(/*limit*/ 12_000)
|
||||
@@ -500,7 +504,9 @@ async fn remote_models_truncation_policy_with_tool_output_override() -> Result<(
|
||||
let models_manager = test.thread_manager.get_models_manager();
|
||||
wait_for_model_available(&models_manager, slug).await;
|
||||
|
||||
let model_info = models_manager.get_model_info(slug, &test.config).await;
|
||||
let model_info = models_manager
|
||||
.get_model_info(slug, &test.config.to_models_manager_config())
|
||||
.await;
|
||||
assert_eq!(
|
||||
model_info.truncation_policy,
|
||||
TruncationPolicyConfig::bytes(/*limit*/ 200)
|
||||
@@ -628,7 +634,9 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
|
||||
|
||||
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let base_model_info = models_manager.get_model_info("gpt-5.1", &config).await;
|
||||
let base_model_info = models_manager
|
||||
.get_model_info("gpt-5.1", &config.to_models_manager_config())
|
||||
.await;
|
||||
let body = response_mock.single_request().body_json();
|
||||
let instructions = body["instructions"].as_str().unwrap();
|
||||
assert_eq!(instructions, base_model_info.base_instructions);
|
||||
@@ -968,8 +976,8 @@ async fn wait_for_model_available(manager: &Arc<ModelsManager>, slug: &str) -> M
|
||||
}
|
||||
|
||||
fn bundled_model_slug() -> String {
|
||||
let response: ModelsResponse = serde_json::from_str(include_str!("../../models.json"))
|
||||
.expect("bundled models.json should deserialize");
|
||||
let response = codex_models_manager::bundled_models_response()
|
||||
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
|
||||
response
|
||||
.models
|
||||
.first()
|
||||
|
||||
@@ -5,7 +5,6 @@ use anyhow::Result;
|
||||
use codex_core::config::Config;
|
||||
use codex_features::Feature;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::McpInvocation;
|
||||
@@ -94,8 +93,8 @@ fn configure_apps_without_tool_search(config: &mut Config, apps_base_url: &str)
|
||||
config.chatgpt_base_url = apps_base_url.to_string();
|
||||
config.model = Some("gpt-5-codex".to_string());
|
||||
|
||||
let mut model_catalog: ModelsResponse =
|
||||
serde_json::from_str(include_str!("../../models.json")).expect("valid models.json");
|
||||
let mut model_catalog = codex_models_manager::bundled_models_response()
|
||||
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
|
||||
let model = model_catalog
|
||||
.models
|
||||
.iter_mut()
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
//! Integration test for the text encoding fix for issue #6178.
|
||||
//!
|
||||
//! These tests simulate VSCode's shell preview on Windows/WSL where the output
|
||||
//! may be encoded with a legacy code page before it reaches Codex.
|
||||
|
||||
use codex_core::exec::StreamOutput;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn test_utf8_shell_output() {
|
||||
// Baseline: UTF-8 output should bypass the detector and remain unchanged.
|
||||
assert_eq!(decode_shell_output("пример".as_bytes()), "пример");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cp1251_shell_output() {
|
||||
// VS Code shells on Windows frequently surface CP1251 bytes for Cyrillic text.
|
||||
assert_eq!(decode_shell_output(b"\xEF\xF0\xE8\xEC\xE5\xF0"), "пример");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cp866_shell_output() {
|
||||
// Native cmd.exe still defaults to CP866; make sure we recognize that too.
|
||||
assert_eq!(decode_shell_output(b"\xAF\xE0\xA8\xAC\xA5\xE0"), "пример");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1252_smart_decoding() {
|
||||
// Smart detection should turn fancy quotes/dashes into the proper Unicode glyphs.
|
||||
assert_eq!(
|
||||
decode_shell_output(b"\x93\x94 test \x96 dash"),
|
||||
"\u{201C}\u{201D} test \u{2013} dash"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_smart_decoding_improves_over_lossy_utf8() {
|
||||
// Regression guard: String::from_utf8_lossy() alone used to emit replacement chars here.
|
||||
let bytes = b"\x93\x94 test \x96 dash";
|
||||
assert!(
|
||||
String::from_utf8_lossy(bytes).contains('\u{FFFD}'),
|
||||
"lossy UTF-8 should inject replacement chars"
|
||||
);
|
||||
assert_eq!(
|
||||
decode_shell_output(bytes),
|
||||
"\u{201C}\u{201D} test \u{2013} dash",
|
||||
"smart decoding should keep curly quotes intact"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mixed_ascii_and_legacy_encoding() {
|
||||
// Commands tend to mix ASCII status text with Latin-1 bytes (e.g. café).
|
||||
assert_eq!(decode_shell_output(b"Output: caf\xE9"), "Output: café"); // codespell:ignore caf
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pure_latin1_shell_output() {
|
||||
// Latin-1 by itself should still decode correctly (regression coverage for the older tests).
|
||||
assert_eq!(decode_shell_output(b"caf\xE9"), "café"); // codespell:ignore caf
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_bytes_still_fall_back_to_lossy() {
|
||||
// If detection fails, we still want the user to see replacement characters.
|
||||
let bytes = b"\xFF\xFE\xFD";
|
||||
assert_eq!(decode_shell_output(bytes), String::from_utf8_lossy(bytes));
|
||||
}
|
||||
|
||||
fn decode_shell_output(bytes: &[u8]) -> String {
|
||||
StreamOutput {
|
||||
text: bytes.to_vec(),
|
||||
truncated_after_lines: None,
|
||||
}
|
||||
.from_utf8_lossy()
|
||||
.text
|
||||
}
|
||||
@@ -7,7 +7,6 @@ use codex_config::types::ToolSuggestDiscoverableType;
|
||||
use codex_core::config::Config;
|
||||
use codex_features::Feature;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use core_test_support::apps_test_server::AppsTestServer;
|
||||
@@ -78,8 +77,8 @@ fn configure_apps_without_search_tool(config: &mut Config, apps_base_url: &str)
|
||||
id: DISCOVERABLE_GMAIL_ID.to_string(),
|
||||
}];
|
||||
|
||||
let mut model_catalog: ModelsResponse =
|
||||
serde_json::from_str(include_str!("../../models.json")).expect("valid models.json");
|
||||
let mut model_catalog = codex_models_manager::bundled_models_response()
|
||||
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
|
||||
let model = model_catalog
|
||||
.models
|
||||
.iter_mut()
|
||||
|
||||
Reference in New Issue
Block a user