mirror of
https://github.com/openai/codex.git
synced 2026-04-28 08:34:54 +00:00
add fast mode toggle (#13212)
- add a local Fast mode setting in codex-core (similar to how model id is currently stored on disk locally) - send `service_tier=priority` on requests when Fast is enabled - add `/fast` in the TUI and persist it locally - feature flag
This commit is contained in:
@@ -4,6 +4,7 @@ use codex_core::config::types::Personality;
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::models_manager::manager::RefreshStrategy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::config_types::ServiceTier;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::InputModality;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
@@ -59,6 +60,7 @@ async fn model_change_appends_model_instructions_developer_message() -> Result<(
|
||||
model: test.session_configured.model.clone(),
|
||||
effort: test.config.model_reasoning_effort,
|
||||
summary: None,
|
||||
service_tier: None,
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
@@ -74,6 +76,7 @@ async fn model_change_appends_model_instructions_developer_message() -> Result<(
|
||||
model: Some(next_model.to_string()),
|
||||
effort: None,
|
||||
summary: None,
|
||||
service_tier: None,
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
@@ -92,6 +95,7 @@ async fn model_change_appends_model_instructions_developer_message() -> Result<(
|
||||
model: next_model.to_string(),
|
||||
effort: test.config.model_reasoning_effort,
|
||||
summary: None,
|
||||
service_tier: None,
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
@@ -147,6 +151,7 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
|
||||
model: test.session_configured.model.clone(),
|
||||
effort: test.config.model_reasoning_effort,
|
||||
summary: None,
|
||||
service_tier: None,
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
@@ -162,6 +167,7 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
|
||||
model: Some(next_model.to_string()),
|
||||
effort: None,
|
||||
summary: None,
|
||||
service_tier: None,
|
||||
collaboration_mode: None,
|
||||
personality: Some(Personality::Pragmatic),
|
||||
})
|
||||
@@ -180,6 +186,7 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
|
||||
model: next_model.to_string(),
|
||||
effort: test.config.model_reasoning_effort,
|
||||
summary: None,
|
||||
service_tier: None,
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
@@ -207,6 +214,36 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn service_tier_change_is_applied_on_next_http_turn() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let resp_mock = mount_sse_sequence(
|
||||
&server,
|
||||
vec![sse_completed("resp-1"), sse_completed("resp-2")],
|
||||
)
|
||||
.await;
|
||||
|
||||
let test = test_codex().build(&server).await?;
|
||||
|
||||
test.submit_turn_with_service_tier("fast turn", Some(ServiceTier::Fast))
|
||||
.await?;
|
||||
test.submit_turn_with_service_tier("standard turn", None)
|
||||
.await?;
|
||||
|
||||
let requests = resp_mock.requests();
|
||||
assert_eq!(requests.len(), 2, "expected two model requests");
|
||||
|
||||
let first_body = requests[0].body_json();
|
||||
let second_body = requests[1].body_json();
|
||||
|
||||
assert_eq!(first_body["service_tier"].as_str(), Some("priority"));
|
||||
assert_eq!(second_body.get("service_tier"), None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn model_change_from_image_to_text_strips_prior_image_content() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
@@ -296,6 +333,7 @@ async fn model_change_from_image_to_text_strips_prior_image_content() -> Result<
|
||||
model: image_model_slug.to_string(),
|
||||
effort: test.config.model_reasoning_effort,
|
||||
summary: None,
|
||||
service_tier: None,
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
@@ -315,6 +353,7 @@ async fn model_change_from_image_to_text_strips_prior_image_content() -> Result<
|
||||
model: text_model_slug.to_string(),
|
||||
effort: test.config.model_reasoning_effort,
|
||||
summary: None,
|
||||
service_tier: None,
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
@@ -474,6 +513,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
|
||||
model: large_model_slug.to_string(),
|
||||
effort: test.config.model_reasoning_effort,
|
||||
summary: None,
|
||||
service_tier: None,
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
@@ -511,6 +551,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
|
||||
model: Some(smaller_model_slug.to_string()),
|
||||
effort: None,
|
||||
summary: None,
|
||||
service_tier: None,
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
@@ -529,6 +570,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
|
||||
model: smaller_model_slug.to_string(),
|
||||
effort: test.config.model_reasoning_effort,
|
||||
summary: None,
|
||||
service_tier: None,
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user