add fast mode toggle (#13212)

- add a local Fast mode setting in codex-core (similar to how model id
is currently stored on disk locally)
- send `service_tier=priority` on requests when Fast is enabled
- add `/fast` in the TUI and persist it locally
- feature flag
This commit is contained in:
pash-openai
2026-03-02 20:29:33 -08:00
committed by GitHub
parent 56cc2c71f4
commit 2f5b01abd6
69 changed files with 929 additions and 127 deletions

View File

@@ -1659,6 +1659,7 @@ async fn auto_compact_runs_after_resume_when_token_usage_is_over_limit() {
model: resumed.session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1748,6 +1749,7 @@ async fn pre_sampling_compact_runs_on_switch_to_smaller_context_model() {
model: previous_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1771,6 +1773,7 @@ async fn pre_sampling_compact_runs_on_switch_to_smaller_context_model() {
model: next_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1880,6 +1883,7 @@ async fn pre_sampling_compact_runs_after_resume_and_switch_to_smaller_model() {
model: previous_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1927,6 +1931,7 @@ async fn pre_sampling_compact_runs_after_resume_and_switch_to_smaller_model() {
model: next_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -3012,6 +3017,7 @@ async fn snapshot_request_shape_pre_turn_compaction_including_incoming_user_mess
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -3128,6 +3134,7 @@ async fn snapshot_request_shape_pre_turn_compaction_strips_incoming_model_switch
model: previous_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -3151,6 +3158,7 @@ async fn snapshot_request_shape_pre_turn_compaction_strips_incoming_model_switch
model: next_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})