mirror of
https://github.com/openai/codex.git
synced 2026-04-29 17:06:51 +00:00
## Summary - split `models-manager` out of `core` and add `ModelsManagerConfig` plus `Config::to_models_manager_config()` so model metadata paths stop depending on `core::Config` - move login-owned/auth-owned code out of `core` into `codex-login`, move model provider config into `codex-model-provider-info`, move API bridge mapping into `codex-api`, move protocol-owned types/impls into `codex-protocol`, and move response debug helpers into a dedicated `response-debug-context` crate - move feedback tag emission into `codex-feedback`, relocate tests to the crates that now own the code, and keep broad temporary re-exports so this PR avoids a giant import-only rewrite ## Major moves and decisions - created `codex-models-manager` as the owner for model cache/catalog/config/model info logic, including the new `ModelsManagerConfig` struct - created `codex-model-provider-info` as the owner for provider config parsing/defaults and kept temporary `codex-login`/`codex-core` re-exports for old import paths - moved `api_bridge` error mapping + `CoreAuthProvider` into `codex-api`, while `codex-login::api_bridge` temporarily re-exports those symbols and keeps the `auth_provider_from_auth` wrapper - moved `auth_env_telemetry` and `provider_auth` ownership to `codex-login` - moved `CodexErr` ownership to `codex-protocol::error`, plus `StreamOutput`, `bytes_to_string_smart`, and network policy helpers to protocol-owned modules - created `codex-response-debug-context` for `extract_response_debug_context`, `telemetry_transport_error_message`, and related response-debug plumbing instead of leaving that behavior in `core` - moved `FeedbackRequestTags`, `emit_feedback_request_tags`, and `emit_feedback_request_tags_with_auth_env` to `codex-feedback` - deferred removal of temporary re-exports and the mechanical import rewrites to a stacked follow-up PR so this PR stays reviewable ## Test moves - moved auth refresh coverage from `core/tests/suite/auth_refresh.rs` to `login/tests/suite/auth_refresh.rs` - moved text encoding coverage from `core/tests/suite/text_encoding_fix.rs` to `protocol/src/exec_output_tests.rs` - moved model info override coverage from `core/tests/suite/model_info_overrides.rs` to `models-manager/src/model_info_overrides_tests.rs` --------- Co-authored-by: Codex <noreply@openai.com>
116 lines
5.3 KiB
Rust
116 lines
5.3 KiB
Rust
use codex_collaboration_mode_templates::DEFAULT as COLLABORATION_MODE_DEFAULT;
|
|
use codex_collaboration_mode_templates::PLAN as COLLABORATION_MODE_PLAN;
|
|
use codex_protocol::config_types::CollaborationModeMask;
|
|
use codex_protocol::config_types::ModeKind;
|
|
use codex_protocol::config_types::TUI_VISIBLE_COLLABORATION_MODES;
|
|
use codex_protocol::openai_models::ReasoningEffort;
|
|
use codex_utils_template::Template;
|
|
use std::sync::LazyLock;
|
|
|
|
const KNOWN_MODE_NAMES_TEMPLATE_KEY: &str = "KNOWN_MODE_NAMES";
|
|
const REQUEST_USER_INPUT_AVAILABILITY_TEMPLATE_KEY: &str = "REQUEST_USER_INPUT_AVAILABILITY";
|
|
const ASKING_QUESTIONS_GUIDANCE_TEMPLATE_KEY: &str = "ASKING_QUESTIONS_GUIDANCE";
|
|
static COLLABORATION_MODE_DEFAULT_TEMPLATE: LazyLock<Template> = LazyLock::new(|| {
|
|
Template::parse(COLLABORATION_MODE_DEFAULT)
|
|
.unwrap_or_else(|err| panic!("collaboration mode default template must parse: {err}"))
|
|
});
|
|
|
|
/// Stores feature flags that control collaboration-mode behavior.
|
|
///
|
|
/// Keep mode-related flags here so new collaboration-mode capabilities can be
|
|
/// added without large cross-cutting diffs to constructor and call-site
|
|
/// signatures.
|
|
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
|
pub struct CollaborationModesConfig {
|
|
/// Enables `request_user_input` availability in Default mode.
|
|
pub default_mode_request_user_input: bool,
|
|
}
|
|
|
|
pub fn builtin_collaboration_mode_presets(
|
|
collaboration_modes_config: CollaborationModesConfig,
|
|
) -> Vec<CollaborationModeMask> {
|
|
vec![plan_preset(), default_preset(collaboration_modes_config)]
|
|
}
|
|
|
|
fn plan_preset() -> CollaborationModeMask {
|
|
CollaborationModeMask {
|
|
name: ModeKind::Plan.display_name().to_string(),
|
|
mode: Some(ModeKind::Plan),
|
|
model: None,
|
|
reasoning_effort: Some(Some(ReasoningEffort::Medium)),
|
|
developer_instructions: Some(Some(COLLABORATION_MODE_PLAN.to_string())),
|
|
}
|
|
}
|
|
|
|
fn default_preset(collaboration_modes_config: CollaborationModesConfig) -> CollaborationModeMask {
|
|
CollaborationModeMask {
|
|
name: ModeKind::Default.display_name().to_string(),
|
|
mode: Some(ModeKind::Default),
|
|
model: None,
|
|
reasoning_effort: None,
|
|
developer_instructions: Some(Some(default_mode_instructions(collaboration_modes_config))),
|
|
}
|
|
}
|
|
|
|
fn default_mode_instructions(collaboration_modes_config: CollaborationModesConfig) -> String {
|
|
let known_mode_names = format_mode_names(&TUI_VISIBLE_COLLABORATION_MODES);
|
|
let request_user_input_availability = request_user_input_availability_message(
|
|
ModeKind::Default,
|
|
collaboration_modes_config.default_mode_request_user_input,
|
|
);
|
|
let asking_questions_guidance = asking_questions_guidance_message(
|
|
collaboration_modes_config.default_mode_request_user_input,
|
|
);
|
|
COLLABORATION_MODE_DEFAULT_TEMPLATE
|
|
.render([
|
|
(KNOWN_MODE_NAMES_TEMPLATE_KEY, known_mode_names.as_str()),
|
|
(
|
|
REQUEST_USER_INPUT_AVAILABILITY_TEMPLATE_KEY,
|
|
request_user_input_availability.as_str(),
|
|
),
|
|
(
|
|
ASKING_QUESTIONS_GUIDANCE_TEMPLATE_KEY,
|
|
asking_questions_guidance.as_str(),
|
|
),
|
|
])
|
|
.unwrap_or_else(|err| panic!("collaboration mode default template must render: {err}"))
|
|
}
|
|
|
|
fn format_mode_names(modes: &[ModeKind]) -> String {
|
|
let mode_names: Vec<&str> = modes.iter().map(|mode| mode.display_name()).collect();
|
|
match mode_names.as_slice() {
|
|
[] => "none".to_string(),
|
|
[mode_name] => (*mode_name).to_string(),
|
|
[first, second] => format!("{first} and {second}"),
|
|
[..] => mode_names.join(", "),
|
|
}
|
|
}
|
|
|
|
fn request_user_input_availability_message(
|
|
mode: ModeKind,
|
|
default_mode_request_user_input: bool,
|
|
) -> String {
|
|
let mode_name = mode.display_name();
|
|
if mode.allows_request_user_input()
|
|
|| (default_mode_request_user_input && mode == ModeKind::Default)
|
|
{
|
|
format!("The `request_user_input` tool is available in {mode_name} mode.")
|
|
} else {
|
|
format!(
|
|
"The `request_user_input` tool is unavailable in {mode_name} mode. If you call it while in {mode_name} mode, it will return an error."
|
|
)
|
|
}
|
|
}
|
|
|
|
fn asking_questions_guidance_message(default_mode_request_user_input: bool) -> String {
|
|
if default_mode_request_user_input {
|
|
"In Default mode, strongly prefer making reasonable assumptions and executing the user's request rather than stopping to ask questions. If you absolutely must ask a question because the answer cannot be discovered from local context and a reasonable assumption would be risky, prefer using the `request_user_input` tool rather than writing a multiple choice question as a textual assistant message. Never write a multiple choice question as a textual assistant message.".to_string()
|
|
} else {
|
|
"In Default mode, strongly prefer making reasonable assumptions and executing the user's request rather than stopping to ask questions. If you absolutely must ask a question because the answer cannot be discovered from local context and a reasonable assumption would be risky, ask the user directly with a concise plain-text question. Never write a multiple choice question as a textual assistant message.".to_string()
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
#[path = "collaboration_mode_presets_tests.rs"]
|
|
mod tests;
|