Files
codex/codex-rs/core/src/test_support.rs
Celia Chen e8d8080818 feat: let model providers own model discovery (#18950)
## Why

`codex-models-manager` had grown to own provider-specific concerns:
constructing OpenAI-compatible `/models` requests, resolving provider
auth, emitting request telemetry, and deciding how provider catalogs
should be sourced. That made the manager harder to reuse for providers
whose model catalog is not fetched from the OpenAI `/models` endpoint,
such as Amazon Bedrock.

This change moves provider-specific model discovery behind
provider-owned implementations, so the models manager can focus on
refresh policy, cache behavior, picker ordering, and model metadata
merging.

## What Changed

- Introduced a `ModelsManager` trait with separate `OpenAiModelsManager`
and `StaticModelsManager` implementations.
- Added `ModelsEndpointClient` so OpenAI-compatible HTTP fetching lives
outside `codex-models-manager`.
- Moved `/models` request construction, provider auth resolution,
timeout handling, and request telemetry into `codex-model-provider` via
`OpenAiModelsEndpoint`.
- Added provider-owned `models_manager(...)` construction so configured
OpenAI-compatible providers use `OpenAiModelsManager`, while
static/catalog-backed providers can return `StaticModelsManager`.
- Added an Amazon Bedrock static model catalog for the GPT OSS Bedrock
model IDs.
- Updated core/session/thread manager code and tests to depend on
`Arc<dyn ModelsManager>`.
- Moved offline model test helpers into
`codex_models_manager::test_support`.
## Metadata References

The Bedrock catalog metadata is based on the official Amazon Bedrock
OpenAI model documentation:

- [Amazon Bedrock OpenAI
models](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-openai.html)
lists the Bedrock model IDs, text input/output modalities, and `128,000`
token context window for `gpt-oss-20b` and `gpt-oss-120b`.
- [Amazon Bedrock `gpt-oss-120b` model
card](https://docs.aws.amazon.com/bedrock/latest/userguide/model-card-openai-gpt-oss-120b.html)
lists the `bedrock-runtime` model ID `openai.gpt-oss-120b-1:0`, the
`bedrock-mantle` model ID `openai.gpt-oss-120b`, text-only modalities,
and `128K` context window.
- [OpenAI `gpt-oss-120b` model
docs](https://developers.openai.com/api/docs/models/gpt-oss-120b)
document configurable reasoning effort with `low`, `medium`, and `high`,
plus text input/output modality.

The display names, default reasoning effort, and priority ordering are
Codex-local catalog choices.

## Test Plan
- Manually verified app-server model listing with an AWS profile:

```shell
CODEX_HOME="$(mktemp -d)" cargo run -p codex-app-server-test-client -- \
  --codex-bin ./target/debug/codex \
  -c 'model_provider="amazon-bedrock"' \
  -c 'model_providers.amazon-bedrock.aws.profile="codex-bedrock"' \
  -c 'model_providers.amazon-bedrock.aws.region="us-west-2"' \
  model-list
```

The response returned the Bedrock catalog with `openai.gpt-oss-120b-1:0`
as the default model and `openai.gpt-oss-20b-1:0` as the second listed
model, both text-only and supporting low/medium/high reasoning effort.
2026-04-24 04:28:25 +00:00

133 lines
4.3 KiB
Rust

//! Test-only helpers exposed for cross-crate integration tests.
//!
//! Production code should not depend on this module.
//! We prefer this to using a crate feature to avoid building multiple
//! permutations of the crate.
use std::path::PathBuf;
use std::sync::Arc;
use codex_exec_server::EnvironmentManager;
use codex_login::AuthManager;
use codex_login::CodexAuth;
use codex_model_provider::create_model_provider;
use codex_model_provider_info::ModelProviderInfo;
use codex_models_manager::bundled_models_response;
use codex_models_manager::collaboration_mode_presets;
use codex_models_manager::manager::SharedModelsManager;
use codex_models_manager::test_support::construct_model_info_offline_for_tests;
use codex_models_manager::test_support::get_model_offline_for_tests;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ModelPreset;
use once_cell::sync::Lazy;
use crate::ThreadManager;
use crate::config::Config;
use crate::thread_manager;
use crate::unified_exec;
static TEST_MODEL_PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
let mut response = bundled_models_response()
.unwrap_or_else(|err| panic!("bundled models.json should parse: {err}"));
response.models.sort_by(|a, b| a.priority.cmp(&b.priority));
let mut presets: Vec<ModelPreset> = response.models.into_iter().map(Into::into).collect();
ModelPreset::mark_default_by_picker_visibility(&mut presets);
presets
});
pub fn set_thread_manager_test_mode(enabled: bool) {
thread_manager::set_thread_manager_test_mode_for_tests(enabled);
}
pub fn set_deterministic_process_ids(enabled: bool) {
unified_exec::set_deterministic_process_ids_for_tests(enabled);
}
pub fn auth_manager_from_auth(auth: CodexAuth) -> Arc<AuthManager> {
AuthManager::from_auth_for_testing(auth)
}
pub fn auth_manager_from_auth_with_home(auth: CodexAuth, codex_home: PathBuf) -> Arc<AuthManager> {
AuthManager::from_auth_for_testing_with_home(auth, codex_home)
}
pub fn thread_manager_with_models_provider(
auth: CodexAuth,
provider: ModelProviderInfo,
) -> ThreadManager {
ThreadManager::with_models_provider_for_tests(auth, provider)
}
pub fn thread_manager_with_models_provider_and_home(
auth: CodexAuth,
provider: ModelProviderInfo,
codex_home: PathBuf,
environment_manager: Arc<EnvironmentManager>,
) -> ThreadManager {
ThreadManager::with_models_provider_and_home_for_tests(
auth,
provider,
codex_home,
environment_manager,
)
}
pub async fn start_thread_with_user_shell_override(
thread_manager: &ThreadManager,
config: Config,
user_shell_override: crate::shell::Shell,
) -> codex_protocol::error::Result<crate::NewThread> {
thread_manager
.start_thread_with_user_shell_override_for_tests(config, user_shell_override)
.await
}
pub async fn resume_thread_from_rollout_with_user_shell_override(
thread_manager: &ThreadManager,
config: Config,
rollout_path: PathBuf,
auth_manager: Arc<AuthManager>,
user_shell_override: crate::shell::Shell,
) -> codex_protocol::error::Result<crate::NewThread> {
thread_manager
.resume_thread_from_rollout_with_user_shell_override_for_tests(
config,
rollout_path,
auth_manager,
user_shell_override,
)
.await
}
pub fn models_manager_with_provider(
codex_home: PathBuf,
auth_manager: Arc<AuthManager>,
provider: ModelProviderInfo,
) -> SharedModelsManager {
let provider = create_model_provider(provider, Some(auth_manager));
provider.models_manager(
codex_home,
/*config_model_catalog*/ None,
Default::default(),
)
}
pub fn get_model_offline(model: Option<&str>) -> String {
get_model_offline_for_tests(model)
}
pub fn construct_model_info_offline(model: &str, config: &Config) -> ModelInfo {
construct_model_info_offline_for_tests(model, &config.to_models_manager_config())
}
pub fn all_model_presets() -> &'static Vec<ModelPreset> {
&TEST_MODEL_PRESETS
}
pub fn builtin_collaboration_mode_presets() -> Vec<CollaborationModeMask> {
collaboration_mode_presets::builtin_collaboration_mode_presets(
collaboration_mode_presets::CollaborationModesConfig::default(),
)
}