add fast mode toggle (#13212)

- add a local Fast mode setting in codex-core (similar to how model id
is currently stored on disk locally)
- send `service_tier=priority` on requests when Fast is enabled
- add `/fast` in the TUI and persist it locally
- feature flag
This commit is contained in:
pash-openai
2026-03-02 20:29:33 -08:00
committed by GitHub
parent 56cc2c71f4
commit 2f5b01abd6
69 changed files with 929 additions and 127 deletions

View File

@@ -62,6 +62,7 @@ use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::config_types::Verbosity;
use codex_protocol::config_types::WebSearchMode;
@@ -185,6 +186,9 @@ pub struct Config {
/// Optional override of model selection.
pub model: Option<String>,
/// Effective service tier preference for new turns.
pub service_tier: Option<ServiceTier>,
/// Model used specifically for review sessions.
pub review_model: Option<String>,
@@ -1184,6 +1188,9 @@ pub struct ConfigToml {
/// Optionally specify a personality for the model
pub personality: Option<Personality>,
/// Optional explicit service tier preference for new turns.
pub service_tier: Option<ServiceTier>,
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
pub chatgpt_base_url: Option<String>,
@@ -1948,6 +1955,14 @@ impl Config {
let forced_login_method = cfg.forced_login_method;
let model = model.or(config_profile.model).or(cfg.model);
let service_tier = if features.enabled(Feature::FastMode) {
config_profile
.service_tier
.or(cfg.service_tier)
.filter(|tier| matches!(tier, ServiceTier::Fast))
} else {
None
};
let compact_prompt = compact_prompt.or(cfg.compact_prompt).and_then(|value| {
let trimmed = value.trim();
@@ -2094,6 +2109,7 @@ impl Config {
let config = Self {
model,
service_tier,
review_model,
model_context_window: cfg.model_context_window,
model_auto_compact_token_limit: cfg.model_auto_compact_token_limit,
@@ -4878,6 +4894,7 @@ model_verbosity = "high"
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
service_tier: None,
model_provider_id: "openai".to_string(),
model_provider: fixture.openai_provider.clone(),
permissions: Permissions {
@@ -5007,6 +5024,7 @@ model_verbosity = "high"
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
service_tier: None,
model_provider_id: "openai-custom".to_string(),
model_provider: fixture.openai_custom_provider.clone(),
permissions: Permissions {
@@ -5134,6 +5152,7 @@ model_verbosity = "high"
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
service_tier: None,
model_provider_id: "openai".to_string(),
model_provider: fixture.openai_provider.clone(),
permissions: Permissions {
@@ -5247,6 +5266,7 @@ model_verbosity = "high"
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
service_tier: None,
model_provider_id: "openai".to_string(),
model_provider: fixture.openai_provider.clone(),
permissions: Permissions {
@@ -5635,6 +5655,33 @@ trust_level = "untrusted"
Ok(())
}
#[test]
fn legacy_standard_service_tier_loads_as_default_none() -> anyhow::Result<()> {
let codex_home = TempDir::new()?;
let cfg = toml::from_str::<ConfigToml>(
r#"
service_tier = "standard"
[features]
fast_mode = true
"#,
)
.expect("TOML deserialization should succeed");
let config = Config::load_from_base_config_with_overrides(
cfg,
ConfigOverrides {
cwd: Some(codex_home.path().to_path_buf()),
..Default::default()
},
codex_home.path().to_path_buf(),
)?;
assert_eq!(config.service_tier, None);
Ok(())
}
#[test]
fn derive_sandbox_policy_falls_back_to_constraint_value_for_implicit_defaults()
-> anyhow::Result<()> {