add fast mode toggle (#13212)

- add a local Fast mode setting in codex-core (similar to how model id
is currently stored on disk locally)
- send `service_tier=priority` on requests when Fast is enabled
- add `/fast` in the TUI and persist it locally
- feature flag
This commit is contained in:
pash-openai
2026-03-02 20:29:33 -08:00
committed by GitHub
parent 56cc2c71f4
commit 2f5b01abd6
69 changed files with 929 additions and 127 deletions

View File

@@ -1,5 +1,6 @@
use anyhow::Result;
use codex_core::features::Feature;
use codex_protocol::config_types::ServiceTier;
use core_test_support::responses::WebSocketConnectionConfig;
use core_test_support::responses::ev_assistant_message;
use core_test_support::responses::ev_completed;
@@ -244,3 +245,171 @@ async fn websocket_v2_test_codex_shell_chain() -> Result<()> {
server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn websocket_v2_first_turn_uses_updated_fast_tier_after_startup_prewarm() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_websocket_server(vec![vec![
vec![ev_response_created("warm-1"), ev_done_with_id("warm-1")],
vec![
ev_response_created("resp-1"),
ev_assistant_message("msg-1", "fast"),
ev_completed("resp-1"),
],
]])
.await;
let mut builder = test_codex().with_config(|config| {
config.features.enable(Feature::ResponsesWebsocketsV2);
});
let test = builder.build_with_websocket_server(&server).await?;
let warmup = server.wait_for_request(0, 0).await.body_json();
assert_eq!(warmup["type"].as_str(), Some("response.create"));
assert_eq!(warmup["generate"].as_bool(), Some(false));
assert_eq!(warmup.get("service_tier"), None);
test.submit_turn_with_service_tier("hello", Some(ServiceTier::Fast))
.await?;
assert_eq!(server.handshakes().len(), 1);
let connection = server.single_connection();
assert_eq!(connection.len(), 2);
let first_turn = connection
.get(1)
.expect("missing first turn request")
.body_json();
assert_eq!(first_turn["type"].as_str(), Some("response.create"));
assert_eq!(first_turn["service_tier"].as_str(), Some("priority"));
assert_eq!(first_turn.get("previous_response_id"), None);
assert!(
first_turn
.get("input")
.and_then(Value::as_array)
.is_some_and(|items| !items.is_empty())
);
server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn websocket_v2_first_turn_drops_fast_tier_after_startup_prewarm() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_websocket_server(vec![vec![
vec![ev_response_created("warm-1"), ev_done_with_id("warm-1")],
vec![
ev_response_created("resp-1"),
ev_assistant_message("msg-1", "standard"),
ev_completed("resp-1"),
],
]])
.await;
let mut builder = test_codex().with_config(|config| {
config.features.enable(Feature::ResponsesWebsocketsV2);
config.service_tier = Some(ServiceTier::Fast);
});
let test = builder.build_with_websocket_server(&server).await?;
let warmup = server.wait_for_request(0, 0).await.body_json();
assert_eq!(warmup["type"].as_str(), Some("response.create"));
assert_eq!(warmup["generate"].as_bool(), Some(false));
assert_eq!(warmup["service_tier"].as_str(), Some("priority"));
test.submit_turn_with_service_tier("hello", None).await?;
assert_eq!(server.handshakes().len(), 1);
let connection = server.single_connection();
assert_eq!(connection.len(), 2);
let first_turn = connection
.get(1)
.expect("missing first turn request")
.body_json();
assert_eq!(first_turn["type"].as_str(), Some("response.create"));
assert_eq!(first_turn.get("service_tier"), None);
assert_eq!(first_turn.get("previous_response_id"), None);
assert!(
first_turn
.get("input")
.and_then(Value::as_array)
.is_some_and(|items| !items.is_empty())
);
server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn websocket_v2_next_turn_uses_updated_service_tier() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_websocket_server(vec![vec![
vec![ev_response_created("warm-1"), ev_done_with_id("warm-1")],
vec![
ev_response_created("resp-1"),
ev_assistant_message("msg-1", "fast"),
ev_completed("resp-1"),
],
vec![
ev_response_created("resp-2"),
ev_assistant_message("msg-2", "standard"),
ev_completed("resp-2"),
],
]])
.await;
let mut builder = test_codex().with_config(|config| {
config.features.enable(Feature::ResponsesWebsocketsV2);
});
let test = builder.build_with_websocket_server(&server).await?;
let warmup = server.wait_for_request(0, 0).await.body_json();
assert_eq!(warmup["type"].as_str(), Some("response.create"));
assert_eq!(warmup["generate"].as_bool(), Some(false));
assert_eq!(warmup.get("service_tier"), None);
test.submit_turn_with_service_tier("first", Some(ServiceTier::Fast))
.await?;
test.submit_turn_with_service_tier("second", None).await?;
assert_eq!(server.handshakes().len(), 1);
let connection = server.single_connection();
assert_eq!(connection.len(), 3);
let first_turn = connection
.get(1)
.expect("missing first turn request")
.body_json();
let second_turn = connection
.get(2)
.expect("missing second turn request")
.body_json();
assert_eq!(first_turn["type"].as_str(), Some("response.create"));
assert_eq!(first_turn["service_tier"].as_str(), Some("priority"));
assert_eq!(first_turn.get("previous_response_id"), None);
assert!(
first_turn
.get("input")
.and_then(Value::as_array)
.is_some_and(|items| !items.is_empty())
);
assert_eq!(second_turn["type"].as_str(), Some("response.create"));
assert_eq!(second_turn.get("service_tier"), None);
assert_eq!(second_turn.get("previous_response_id"), None);
assert!(
second_turn
.get("input")
.and_then(Value::as_array)
.is_some_and(|items| !items.is_empty())
);
server.shutdown().await;
Ok(())
}

View File

@@ -312,6 +312,7 @@ async fn apply_patch_cli_move_without_content_change_has_no_turn_diff(
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -907,6 +908,7 @@ async fn apply_patch_shell_command_heredoc_with_cd_emits_turn_diff() -> Result<(
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -987,6 +989,7 @@ async fn apply_patch_shell_command_failure_propagates_error_and_skips_diff() ->
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1137,6 +1140,7 @@ async fn apply_patch_emits_turn_diff_event_with_unified_diff(
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1200,6 +1204,7 @@ async fn apply_patch_turn_diff_for_rename_with_content_change(
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1271,6 +1276,7 @@ async fn apply_patch_aggregates_diff_across_multiple_tool_calls() -> Result<()>
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1342,6 +1348,7 @@ async fn apply_patch_aggregates_diff_preserves_success_after_failure() -> Result
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -554,6 +554,7 @@ async fn submit_turn(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -1128,6 +1128,7 @@ async fn user_turn_collaboration_mode_overrides_model_and_effort() -> anyhow::Re
.model_reasoning_summary
.unwrap_or(ReasoningSummary::Auto),
),
service_tier: None,
collaboration_mode: Some(collaboration_mode),
final_output_json_schema: None,
personality: None,
@@ -1240,6 +1241,7 @@ async fn user_turn_explicit_reasoning_summary_overrides_model_catalog_default()
model: session_configured.model,
effort: None,
summary: Some(ReasoningSummary::Concise),
service_tier: None,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
@@ -1718,6 +1720,7 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
effort,
summary.unwrap_or(ReasoningSummary::Auto),
None,
None,
)
.await
.expect("responses stream to start");

View File

@@ -16,6 +16,7 @@ use codex_otel::metrics::MetricsConfig;
use codex_protocol::ThreadId;
use codex_protocol::account::PlanType;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::models::BaseInstructions;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
@@ -140,6 +141,7 @@ async fn responses_websocket_request_prewarm_reuses_connection() {
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket prewarm failed");
@@ -220,6 +222,7 @@ async fn responses_websocket_preconnect_is_reused_even_with_header_changes() {
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket stream failed");
@@ -257,6 +260,7 @@ async fn responses_websocket_request_prewarm_is_reused_even_with_header_changes(
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket prewarm failed");
@@ -268,6 +272,7 @@ async fn responses_websocket_request_prewarm_is_reused_even_with_header_changes(
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket stream failed");
@@ -320,6 +325,7 @@ async fn responses_websocket_prewarm_uses_v2_when_model_prefers_websockets_and_f
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket prewarm failed");
@@ -693,6 +699,7 @@ async fn responses_websocket_emits_reasoning_included_event() {
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket stream failed");
@@ -764,6 +771,7 @@ async fn responses_websocket_emits_rate_limit_events() {
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket stream failed");
@@ -1054,6 +1062,7 @@ async fn responses_websocket_forwards_turn_metadata_on_create_and_append() {
&mut client_session,
&harness,
&prompt_one,
None,
Some(first_turn_metadata),
)
.await;
@@ -1061,6 +1070,7 @@ async fn responses_websocket_forwards_turn_metadata_on_create_and_append() {
&mut client_session,
&harness,
&prompt_two,
None,
Some(enriched_turn_metadata),
)
.await;
@@ -1324,6 +1334,7 @@ async fn responses_websocket_v2_after_error_uses_full_create_without_previous_re
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket stream failed");
@@ -1555,13 +1566,24 @@ async fn stream_until_complete(
harness: &WebsocketTestHarness,
prompt: &Prompt,
) {
stream_until_complete_with_turn_metadata(client_session, harness, prompt, None).await;
stream_until_complete_with_service_tier(client_session, harness, prompt, None).await;
}
async fn stream_until_complete_with_service_tier(
client_session: &mut ModelClientSession,
harness: &WebsocketTestHarness,
prompt: &Prompt,
service_tier: Option<ServiceTier>,
) {
stream_until_complete_with_turn_metadata(client_session, harness, prompt, service_tier, None)
.await;
}
async fn stream_until_complete_with_turn_metadata(
client_session: &mut ModelClientSession,
harness: &WebsocketTestHarness,
prompt: &Prompt,
service_tier: Option<ServiceTier>,
turn_metadata_header: Option<&str>,
) {
let mut stream = client_session
@@ -1571,6 +1593,7 @@ async fn stream_until_complete_with_turn_metadata(
&harness.otel_manager,
harness.effort,
harness.summary,
service_tier,
turn_metadata_header,
)
.await

View File

@@ -119,6 +119,7 @@ async fn user_input_includes_collaboration_instructions_after_override() -> Resu
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@@ -174,6 +175,7 @@ async fn collaboration_instructions_added_on_user_turn() -> Result<()> {
.model_reasoning_summary
.unwrap_or(codex_protocol::config_types::ReasoningSummary::Auto),
),
service_tier: None,
collaboration_mode: Some(collaboration_mode),
final_output_json_schema: None,
personality: None,
@@ -213,6 +215,7 @@ async fn override_then_next_turn_uses_updated_collaboration_instructions() -> Re
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@@ -263,6 +266,7 @@ async fn user_turn_overrides_collaboration_instructions_after_override() -> Resu
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(base_mode),
personality: None,
})
@@ -284,6 +288,7 @@ async fn user_turn_overrides_collaboration_instructions_after_override() -> Resu
.model_reasoning_summary
.unwrap_or(codex_protocol::config_types::ReasoningSummary::Auto),
),
service_tier: None,
collaboration_mode: Some(turn_mode),
final_output_json_schema: None,
personality: None,
@@ -330,6 +335,7 @@ async fn collaboration_mode_update_emits_new_instruction_message() -> Result<()>
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(first_text))),
personality: None,
})
@@ -355,6 +361,7 @@ async fn collaboration_mode_update_emits_new_instruction_message() -> Result<()>
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(second_text))),
personality: None,
})
@@ -409,6 +416,7 @@ async fn collaboration_mode_update_noop_does_not_append() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(collab_text))),
personality: None,
})
@@ -434,6 +442,7 @@ async fn collaboration_mode_update_noop_does_not_append() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(collab_text))),
personality: None,
})
@@ -487,6 +496,7 @@ async fn collaboration_mode_update_emits_new_instruction_message_when_mode_chang
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_mode_and_instructions(
ModeKind::Default,
Some(default_text),
@@ -515,6 +525,7 @@ async fn collaboration_mode_update_emits_new_instruction_message_when_mode_chang
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_mode_and_instructions(
ModeKind::Plan,
Some(plan_text),
@@ -572,6 +583,7 @@ async fn collaboration_mode_update_noop_does_not_append_when_mode_is_unchanged()
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_mode_and_instructions(
ModeKind::Default,
Some(collab_text),
@@ -600,6 +612,7 @@ async fn collaboration_mode_update_noop_does_not_append_when_mode_is_unchanged()
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_mode_and_instructions(
ModeKind::Default,
Some(collab_text),
@@ -663,6 +676,7 @@ async fn resume_replays_collaboration_instructions() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(collab_text))),
personality: None,
})
@@ -724,6 +738,7 @@ async fn empty_collaboration_instructions_are_ignored() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(CollaborationMode {
mode: ModeKind::Default,
settings: Settings {

View File

@@ -1659,6 +1659,7 @@ async fn auto_compact_runs_after_resume_when_token_usage_is_over_limit() {
model: resumed.session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1748,6 +1749,7 @@ async fn pre_sampling_compact_runs_on_switch_to_smaller_context_model() {
model: previous_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1771,6 +1773,7 @@ async fn pre_sampling_compact_runs_on_switch_to_smaller_context_model() {
model: next_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1880,6 +1883,7 @@ async fn pre_sampling_compact_runs_after_resume_and_switch_to_smaller_model() {
model: previous_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1927,6 +1931,7 @@ async fn pre_sampling_compact_runs_after_resume_and_switch_to_smaller_model() {
model: next_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -3012,6 +3017,7 @@ async fn snapshot_request_shape_pre_turn_compaction_including_incoming_user_mess
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -3128,6 +3134,7 @@ async fn snapshot_request_shape_pre_turn_compaction_strips_incoming_model_switch
model: previous_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -3151,6 +3158,7 @@ async fn snapshot_request_shape_pre_turn_compaction_strips_incoming_model_switch
model: next_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -1926,6 +1926,7 @@ async fn snapshot_request_shape_remote_pre_turn_compaction_including_incoming_us
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -2035,6 +2036,7 @@ async fn snapshot_request_shape_remote_pre_turn_compaction_strips_incoming_model
model: Some(next_model.to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -55,6 +55,7 @@ async fn submit_user_turn(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode,
personality: None,
})
@@ -134,6 +135,7 @@ async fn execpolicy_blocks_shell_invocation() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -126,6 +126,7 @@ async fn copy_paste_local_image_persists_rollout_request_shape() -> anyhow::Resu
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -208,6 +209,7 @@ async fn drag_drop_image_persists_rollout_request_shape() -> anyhow::Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -378,6 +378,7 @@ async fn plan_mode_emits_plan_item_from_proposed_plan_block() -> anyhow::Result<
model: session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@@ -453,6 +454,7 @@ async fn plan_mode_strips_plan_from_agent_messages() -> anyhow::Result<()> {
model: session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@@ -560,6 +562,7 @@ async fn plan_mode_streaming_citations_are_stripped_across_added_deltas_and_done
model: session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@@ -745,6 +748,7 @@ async fn plan_mode_streaming_proposed_plan_tag_split_across_added_and_delta_is_p
model: session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@@ -857,6 +861,7 @@ async fn plan_mode_handles_missing_plan_close_tag() -> anyhow::Result<()> {
model: session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})

View File

@@ -84,6 +84,7 @@ async fn codex_returns_json_result(model: String) -> anyhow::Result<()> {
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -65,6 +65,7 @@ async fn submit_skill_turn(test: &TestCodex, skill_path: PathBuf, prompt: &str)
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -33,6 +33,7 @@ async fn override_turn_context_does_not_persist_when_config_exists() {
model: Some("o3".to_string()),
effort: Some(Some(ReasoningEffort::High)),
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -69,6 +70,7 @@ async fn override_turn_context_does_not_create_config_file() {
model: Some("o3".to_string()),
effort: Some(Some(ReasoningEffort::Medium)),
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -4,6 +4,7 @@ use codex_core::config::types::Personality;
use codex_core::features::Feature;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::InputModality;
use codex_protocol::openai_models::ModelInfo;
@@ -59,6 +60,7 @@ async fn model_change_appends_model_instructions_developer_message() -> Result<(
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -74,6 +76,7 @@ async fn model_change_appends_model_instructions_developer_message() -> Result<(
model: Some(next_model.to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -92,6 +95,7 @@ async fn model_change_appends_model_instructions_developer_message() -> Result<(
model: next_model.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -147,6 +151,7 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -162,6 +167,7 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
model: Some(next_model.to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Pragmatic),
})
@@ -180,6 +186,7 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
model: next_model.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -207,6 +214,36 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn service_tier_change_is_applied_on_next_http_turn() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let resp_mock = mount_sse_sequence(
&server,
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;
let test = test_codex().build(&server).await?;
test.submit_turn_with_service_tier("fast turn", Some(ServiceTier::Fast))
.await?;
test.submit_turn_with_service_tier("standard turn", None)
.await?;
let requests = resp_mock.requests();
assert_eq!(requests.len(), 2, "expected two model requests");
let first_body = requests[0].body_json();
let second_body = requests[1].body_json();
assert_eq!(first_body["service_tier"].as_str(), Some("priority"));
assert_eq!(second_body.get("service_tier"), None);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn model_change_from_image_to_text_strips_prior_image_content() -> Result<()> {
skip_if_no_network!(Ok(()));
@@ -296,6 +333,7 @@ async fn model_change_from_image_to_text_strips_prior_image_content() -> Result<
model: image_model_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -315,6 +353,7 @@ async fn model_change_from_image_to_text_strips_prior_image_content() -> Result<
model: text_model_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -474,6 +513,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
model: large_model_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -511,6 +551,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
model: Some(smaller_model_slug.to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -529,6 +570,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
model: smaller_model_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -122,6 +122,7 @@ async fn snapshot_model_visible_layout_turn_overrides() -> Result<()> {
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -144,6 +145,7 @@ async fn snapshot_model_visible_layout_turn_overrides() -> Result<()> {
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Friendly),
})
@@ -221,6 +223,7 @@ async fn snapshot_model_visible_layout_cwd_change_does_not_refresh_agents() -> R
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -243,6 +246,7 @@ async fn snapshot_model_visible_layout_cwd_change_does_not_refresh_agents() -> R
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -348,6 +352,7 @@ async fn snapshot_model_visible_layout_resume_with_personality_change() -> Resul
model: resumed.session_configured.model.clone(),
effort: resumed.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Friendly),
})
@@ -436,6 +441,7 @@ async fn snapshot_model_visible_layout_resume_override_matches_rollout_model() -
model: Some("gpt-5.2".to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -100,6 +100,7 @@ async fn renews_cache_ttl_on_matching_models_etag() -> Result<()> {
model: test.session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -106,6 +106,7 @@ async fn refresh_models_on_models_etag_mismatch_and_avoid_duplicate_models_fetch
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -121,6 +121,7 @@ async fn override_turn_context_without_user_turn_does_not_record_permissions_upd
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -161,6 +162,7 @@ async fn override_turn_context_without_user_turn_does_not_record_environment_upd
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -198,6 +200,7 @@ async fn override_turn_context_without_user_turn_does_not_record_collaboration_u
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})

View File

@@ -120,6 +120,7 @@ async fn permissions_message_added_on_override_change() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -262,6 +263,7 @@ async fn resume_replays_permissions_messages() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -361,6 +363,7 @@ async fn resume_and_fork_append_permissions_messages() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -98,6 +98,7 @@ async fn user_turn_personality_none_does_not_add_update_message() -> anyhow::Res
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -144,6 +145,7 @@ async fn config_personality_some_sets_instructions_template() -> anyhow::Result<
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -197,6 +199,7 @@ async fn config_personality_none_sends_no_personality() -> anyhow::Result<()> {
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -256,6 +259,7 @@ async fn default_personality_is_pragmatic_without_config_toml() -> anyhow::Resul
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -303,6 +307,7 @@ async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()>
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -319,6 +324,7 @@ async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()>
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Friendly),
})
@@ -337,6 +343,7 @@ async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()>
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -399,6 +406,7 @@ async fn user_turn_personality_same_value_does_not_add_update_message() -> anyho
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -415,6 +423,7 @@ async fn user_turn_personality_same_value_does_not_add_update_message() -> anyho
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Pragmatic),
})
@@ -433,6 +442,7 @@ async fn user_turn_personality_same_value_does_not_add_update_message() -> anyho
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -505,6 +515,7 @@ async fn user_turn_personality_skips_if_feature_disabled() -> anyhow::Result<()>
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -521,6 +532,7 @@ async fn user_turn_personality_skips_if_feature_disabled() -> anyhow::Result<()>
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Pragmatic),
})
@@ -539,6 +551,7 @@ async fn user_turn_personality_skips_if_feature_disabled() -> anyhow::Result<()>
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -649,6 +662,7 @@ async fn remote_model_friendly_personality_instructions_with_feature() -> anyhow
model: remote_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Friendly),
})
@@ -761,6 +775,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -
model: remote_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -777,6 +792,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Friendly),
})
@@ -795,6 +811,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -
model: remote_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -413,6 +413,7 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() -> an
model: None,
effort: Some(Some(ReasoningEffort::High)),
summary: Some(ReasoningSummary::Detailed),
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -494,6 +495,7 @@ async fn override_before_first_turn_emits_environment_context() -> anyhow::Resul
model: Some("gpt-5.1-codex".to_string()),
effort: Some(Some(ReasoningEffort::Low)),
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@@ -680,6 +682,7 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() -> anyhow::Res
model: "o3".to_string(),
effort: Some(ReasoningEffort::High),
summary: Some(ReasoningSummary::Detailed),
service_tier: None,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
@@ -788,6 +791,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
model: default_model.clone(),
effort: default_effort,
summary: Some(default_summary.unwrap_or(ReasoningSummary::Auto)),
service_tier: None,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
@@ -807,6 +811,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
model: default_model.clone(),
effort: default_effort,
summary: Some(default_summary.unwrap_or(ReasoningSummary::Auto)),
service_tier: None,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
@@ -907,6 +912,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
model: default_model,
effort: default_effort,
summary: Some(default_summary.unwrap_or(ReasoningSummary::Auto)),
service_tier: None,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
@@ -926,6 +932,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
model: "o3".to_string(),
effort: Some(ReasoningEffort::High),
summary: Some(ReasoningSummary::Detailed),
service_tier: None,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,

View File

@@ -177,6 +177,7 @@ async fn remote_models_long_model_slug_is_sent_with_high_reasoning() -> Result<(
model: requested_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -238,6 +239,7 @@ async fn namespaced_model_slug_uses_catalog_metadata_without_fallback_warning()
.model_reasoning_summary
.unwrap_or(ReasoningSummary::Auto),
),
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -355,6 +357,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
model: Some(REMOTE_MODEL_SLUG.to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -392,6 +395,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
model: REMOTE_MODEL_SLUG.to_string(),
effort: None,
summary: Some(ReasoningSummary::Auto),
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -587,6 +591,7 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
model: Some(model.to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -605,6 +610,7 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
model: model.to_string(),
effort: None,
summary: Some(ReasoningSummary::Auto),
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -135,6 +135,7 @@ async fn submit_turn(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -138,6 +138,7 @@ async fn request_user_input_round_trip_for_mode(mode: ModeKind) -> anyhow::Resul
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(CollaborationMode {
mode,
settings: Settings {
@@ -254,6 +255,7 @@ where
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})

View File

@@ -340,6 +340,7 @@ async fn resume_model_switch_is_not_duplicated_after_pre_turn_override() -> Resu
model: Some("gpt-5.1-codex-max".to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -830,6 +830,7 @@ async fn review_uses_overridden_cwd_for_base_branch_merge_base() {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -130,6 +130,7 @@ async fn stdio_server_round_trip() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -298,6 +299,7 @@ async fn stdio_image_responses_round_trip() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -498,6 +500,7 @@ async fn stdio_image_responses_are_sanitized_for_text_only_model() -> anyhow::Re
model: text_only_model_slug.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -611,6 +614,7 @@ async fn stdio_server_propagates_whitelisted_env_vars() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -771,6 +775,7 @@ async fn streamable_http_tool_call_round_trip() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -991,6 +996,7 @@ async fn streamable_http_with_oauth_round_trip_impl() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -49,6 +49,7 @@ async fn openai_model_header_mismatch_emits_warning_event_and_warning_item() ->
model: REQUESTED_MODEL.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -146,6 +147,7 @@ async fn response_model_field_mismatch_emits_warning_when_header_matches_request
model: REQUESTED_MODEL.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -230,6 +232,7 @@ async fn openai_model_header_mismatch_only_emits_one_warning_per_turn() -> Resul
model: REQUESTED_MODEL.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -278,6 +281,7 @@ async fn openai_model_header_casing_only_mismatch_does_not_warn() -> Result<()>
model: REQUESTED_MODEL.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -162,6 +162,7 @@ async fn run_snapshot_command_with_options(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -248,6 +249,7 @@ async fn run_shell_command_snapshot_with_options(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -317,6 +319,7 @@ async fn run_tool_turn_on_harness(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -535,6 +538,7 @@ async fn shell_command_snapshot_still_intercepts_apply_patch() -> Result<()> {
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -67,6 +67,7 @@ async fn submit_turn_with_policies(
model: test.session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -78,6 +78,7 @@ async fn user_turn_includes_skill_instructions() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -381,6 +381,7 @@ async fn mcp_call_marks_thread_memory_mode_polluted_when_configured() -> Result<
model: test.session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -89,6 +89,7 @@ async fn shell_tool_executes_command_and_streams_output() -> anyhow::Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -158,6 +159,7 @@ async fn update_plan_tool_emits_plan_update_event() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -237,6 +239,7 @@ async fn update_plan_tool_rejects_malformed_payload() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -328,6 +331,7 @@ async fn apply_patch_tool_executes_and_emits_patch_events() -> anyhow::Result<()
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -427,6 +431,7 @@ async fn apply_patch_reports_parse_diagnostics() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -46,6 +46,7 @@ async fn run_turn(test: &TestCodex, prompt: &str) -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -361,6 +362,7 @@ async fn shell_tools_start_before_response_completed_when_stream_delayed() -> an
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -490,6 +490,7 @@ async fn mcp_image_output_preserves_image_and_no_text_summary() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -209,6 +209,7 @@ async fn unified_exec_intercepts_apply_patch_exec_command() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -338,6 +339,7 @@ async fn unified_exec_emits_exec_command_begin_event() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -416,6 +418,7 @@ async fn unified_exec_resolves_relative_workdir() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -497,6 +500,7 @@ async fn unified_exec_respects_workdir_override() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -590,6 +594,7 @@ async fn unified_exec_emits_exec_command_end_event() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -665,6 +670,7 @@ async fn unified_exec_emits_output_delta_for_exec_command() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -741,6 +747,7 @@ async fn unified_exec_full_lifecycle_with_background_end_event() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -871,6 +878,7 @@ async fn unified_exec_emits_terminal_interaction_for_write_stdin() -> Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1008,6 +1016,7 @@ async fn unified_exec_terminal_interaction_captures_delayed_output() -> Result<(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1168,6 +1177,7 @@ async fn unified_exec_emits_one_begin_and_one_end_event() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1266,6 +1276,7 @@ async fn exec_command_reports_chunk_and_exit_metadata() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1384,6 +1395,7 @@ async fn unified_exec_defaults_to_pipe() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1474,6 +1486,7 @@ async fn unified_exec_can_enable_tty() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1555,6 +1568,7 @@ async fn unified_exec_respects_early_exit_notifications() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1686,6 +1700,7 @@ async fn write_stdin_returns_exit_metadata_and_clears_session() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1854,6 +1869,7 @@ async fn unified_exec_emits_end_event_when_session_dies_via_stdin() -> Result<()
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -1931,6 +1947,7 @@ async fn unified_exec_keeps_long_running_session_after_turn_end() -> Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -2019,6 +2036,7 @@ async fn unified_exec_interrupt_terminates_long_running_session() -> Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -2116,6 +2134,7 @@ async fn unified_exec_reuses_session_via_stdin() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -2251,6 +2270,7 @@ PY
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -2365,6 +2385,7 @@ async fn unified_exec_timeout_and_followup_poll() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -2461,6 +2482,7 @@ PY
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -2543,6 +2565,7 @@ async fn unified_exec_runs_under_sandbox() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -2647,6 +2670,7 @@ async fn unified_exec_python_prompt_under_seatbelt() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -2742,6 +2766,7 @@ async fn unified_exec_runs_on_all_platforms() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -2877,6 +2902,7 @@ async fn unified_exec_prunes_exited_sessions_first() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -178,6 +178,7 @@ async fn user_shell_command_does_not_replace_active_turn() -> anyhow::Result<()>
model: fixture.session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -113,6 +113,7 @@ async fn user_turn_with_local_image_attaches_image() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -215,6 +216,7 @@ async fn view_image_tool_attaches_local_image() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -345,6 +347,7 @@ console.log(out.output?.body?.text ?? "");
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -434,6 +437,7 @@ async fn view_image_tool_errors_when_path_is_directory() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -509,6 +513,7 @@ async fn view_image_tool_placeholder_for_non_image_files() -> anyhow::Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -601,6 +606,7 @@ async fn view_image_tool_errors_when_file_missing() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -725,6 +731,7 @@ async fn view_image_tool_returns_unsupported_message_for_text_only_model() -> an
model: model_slug.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@@ -801,6 +808,7 @@ async fn replaces_invalid_local_image_after_bad_request() -> anyhow::Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View File

@@ -161,6 +161,7 @@ async fn websocket_fallback_hides_first_websocket_retry_stream_error() -> Result
model: session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})