feat(core) update Personality on turn (#9644)

## Summary
Support updating Personality mid-Thread via UserTurn/OverwriteTurn. This
is explicitly unused by the clients so far, to simplify PRs - app-server
and tui implementations will be follow-ups.

## Testing
- [x] added integration tests
This commit is contained in:
Dylan Hurd
2026-01-22 12:04:23 -08:00
committed by GitHub
parent 4210fb9e6c
commit 8b3521ee77
42 changed files with 604 additions and 4 deletions

View File

@@ -312,6 +312,7 @@ async fn apply_patch_cli_move_without_content_change_has_no_turn_diff(
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -900,6 +901,7 @@ async fn apply_patch_shell_command_heredoc_with_cd_emits_turn_diff() -> Result<(
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -979,6 +981,7 @@ async fn apply_patch_shell_command_failure_propagates_error_and_skips_diff() ->
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1128,6 +1131,7 @@ async fn apply_patch_emits_turn_diff_event_with_unified_diff(
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1190,6 +1194,7 @@ async fn apply_patch_turn_diff_for_rename_with_content_change(
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1260,6 +1265,7 @@ async fn apply_patch_aggregates_diff_across_multiple_tool_calls() -> Result<()>
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1330,6 +1336,7 @@ async fn apply_patch_aggregates_diff_preserves_success_after_failure() -> Result
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -502,6 +502,7 @@ async fn submit_turn(
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -907,6 +907,7 @@ async fn user_turn_collaboration_mode_overrides_model_and_effort() -> anyhow::Re
summary: config.model_reasoning_summary,
collaboration_mode: Some(collaboration_mode),
final_output_json_schema: None,
personality: None,
})
.await?;

View File

@@ -104,6 +104,7 @@ async fn user_input_includes_collaboration_instructions_after_override() -> Resu
effort: None,
summary: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
.await?;
@@ -151,6 +152,7 @@ async fn collaboration_instructions_added_on_user_turn() -> Result<()> {
summary: test.config.model_reasoning_summary,
collaboration_mode: Some(collaboration_mode),
final_output_json_schema: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
@@ -183,6 +185,7 @@ async fn override_then_user_turn_uses_updated_collaboration_instructions() -> Re
effort: None,
summary: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
.await?;
@@ -200,6 +203,7 @@ async fn override_then_user_turn_uses_updated_collaboration_instructions() -> Re
summary: test.config.model_reasoning_summary,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
@@ -234,6 +238,7 @@ async fn user_turn_overrides_collaboration_instructions_after_override() -> Resu
effort: None,
summary: None,
collaboration_mode: Some(base_mode),
personality: None,
})
.await?;
@@ -251,6 +256,7 @@ async fn user_turn_overrides_collaboration_instructions_after_override() -> Resu
summary: test.config.model_reasoning_summary,
collaboration_mode: Some(turn_mode),
final_output_json_schema: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
@@ -286,6 +292,7 @@ async fn collaboration_mode_update_emits_new_instruction_message() -> Result<()>
effort: None,
summary: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(first_text))),
personality: None,
})
.await?;
@@ -309,6 +316,7 @@ async fn collaboration_mode_update_emits_new_instruction_message() -> Result<()>
effort: None,
summary: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(second_text))),
personality: None,
})
.await?;
@@ -353,6 +361,7 @@ async fn collaboration_mode_update_noop_does_not_append() -> Result<()> {
effort: None,
summary: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(collab_text))),
personality: None,
})
.await?;
@@ -376,6 +385,7 @@ async fn collaboration_mode_update_noop_does_not_append() -> Result<()> {
effort: None,
summary: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(collab_text))),
personality: None,
})
.await?;
@@ -422,6 +432,7 @@ async fn resume_replays_collaboration_instructions() -> Result<()> {
effort: None,
summary: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(collab_text))),
personality: None,
})
.await?;
@@ -476,6 +487,7 @@ async fn empty_collaboration_instructions_are_ignored() -> Result<()> {
effort: None,
summary: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(""))),
personality: None,
})
.await?;

View File

@@ -1292,6 +1292,7 @@ async fn auto_compact_runs_after_resume_when_token_usage_is_over_limit() {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await
.unwrap();

View File

@@ -82,6 +82,7 @@ async fn execpolicy_blocks_shell_invocation() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -128,6 +128,7 @@ async fn copy_paste_local_image_persists_rollout_request_shape() -> anyhow::Resu
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -208,6 +209,7 @@ async fn drag_drop_image_persists_rollout_request_shape() -> anyhow::Result<()>
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -86,6 +86,7 @@ async fn codex_returns_json_result(model: String) -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -41,6 +41,7 @@ async fn override_turn_context_does_not_persist_when_config_exists() {
effort: Some(Some(ReasoningEffort::High)),
summary: None,
collaboration_mode: None,
personality: None,
})
.await
.expect("submit override");
@@ -84,6 +85,7 @@ async fn override_turn_context_does_not_create_config_file() {
effort: Some(Some(ReasoningEffort::Medium)),
summary: None,
collaboration_mode: None,
personality: None,
})
.await
.expect("submit override");

View File

@@ -99,6 +99,7 @@ async fn renews_cache_ttl_on_matching_models_etag() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -110,6 +110,7 @@ async fn refresh_models_on_models_etag_mismatch_and_avoid_duplicate_models_fetch
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -118,6 +118,7 @@ async fn override_turn_context_records_permissions_update() -> Result<()> {
effort: None,
summary: None,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -160,6 +161,7 @@ async fn override_turn_context_records_environment_update() -> Result<()> {
effort: None,
summary: None,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -196,6 +198,7 @@ async fn override_turn_context_records_collaboration_update() -> Result<()> {
effort: None,
summary: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
.await?;

View File

@@ -110,6 +110,7 @@ async fn permissions_message_added_on_override_change() -> Result<()> {
effort: None,
summary: None,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -226,6 +227,7 @@ async fn resume_replays_permissions_messages() -> Result<()> {
effort: None,
summary: None,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -303,6 +305,7 @@ async fn resume_and_fork_append_permissions_messages() -> Result<()> {
effort: None,
summary: None,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -1,8 +1,49 @@
use codex_core::config::types::Personality;
use codex_core::features::Feature;
use codex_core::models_manager::manager::ModelsManager;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
use codex_core::protocol::SandboxPolicy;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ModelInstructionsTemplate;
use codex_protocol::openai_models::ModelVisibility;
use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::openai_models::PersonalityMessages;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::TruncationPolicyConfig;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
use core_test_support::responses::ev_completed;
use core_test_support::responses::ev_response_created;
use core_test_support::responses::mount_models_once;
use core_test_support::responses::mount_sse_once;
use core_test_support::responses::mount_sse_sequence;
use core_test_support::responses::sse;
use core_test_support::responses::start_mock_server;
use core_test_support::skip_if_no_network;
use core_test_support::test_codex::test_codex;
use core_test_support::wait_for_event;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
use std::sync::Arc;
use tempfile::TempDir;
use tokio::time::Duration;
use tokio::time::Instant;
use tokio::time::sleep;
use wiremock::BodyPrintLimit;
use wiremock::MockServer;
const LOCAL_FRIENDLY_TEMPLATE: &str =
"You optimize for team morale and being a supportive teammate as much as code quality.";
fn sse_completed(id: &str) -> String {
sse(vec![ev_response_created(id), ev_completed(id)])
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn model_personality_does_not_mutate_base_instructions_without_template() {
@@ -32,3 +73,362 @@ async fn base_instructions_override_disables_personality_template() {
"override instructions"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn user_turn_personality_none_does_not_add_update_message() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;
let mut builder = test_codex().with_model("gpt-5.2-codex");
let test = builder.build(&server).await?;
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd_path().to_path_buf(),
approval_policy: test.config.approval_policy.value(),
sandbox_policy: SandboxPolicy::ReadOnly,
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
let request = resp_mock.single_request();
let developer_texts = request.message_input_texts("developer");
assert!(
!developer_texts
.iter()
.any(|text| text.contains("<personality_spec>")),
"did not expect a personality update message when personality is None"
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_personality_some_sets_instructions_template() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;
let mut builder = test_codex()
.with_model("gpt-5.2-codex")
.with_config(|config| {
config.model_personality = Some(Personality::Friendly);
config.features.disable(Feature::RemoteModels);
});
let test = builder.build(&server).await?;
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd_path().to_path_buf(),
approval_policy: test.config.approval_policy.value(),
sandbox_policy: SandboxPolicy::ReadOnly,
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
let request = resp_mock.single_request();
let instructions_text = request.instructions_text();
assert!(
instructions_text.contains(LOCAL_FRIENDLY_TEMPLATE),
"expected personality update to include the local friendly template, got: {instructions_text:?}"
);
let developer_texts = request.message_input_texts("developer");
for text in developer_texts {
assert!(
!text.contains("<personality_spec>"),
"expected no personality update message in developer input"
);
}
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let resp_mock = mount_sse_sequence(
&server,
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;
let mut builder = test_codex()
.with_model("gpt-5.2-codex")
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
});
let test = builder.build(&server).await?;
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd_path().to_path_buf(),
approval_policy: test.config.approval_policy.value(),
sandbox_policy: SandboxPolicy::ReadOnly,
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
test.codex
.submit(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: None,
effort: None,
summary: None,
collaboration_mode: None,
personality: Some(Personality::Friendly),
})
.await?;
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd_path().to_path_buf(),
approval_policy: test.config.approval_policy.value(),
sandbox_policy: SandboxPolicy::ReadOnly,
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
let requests = resp_mock.requests();
assert_eq!(requests.len(), 2, "expected two requests");
let request = requests
.last()
.expect("expected personality update request");
let developer_texts = request.message_input_texts("developer");
let personality_text = developer_texts
.iter()
.find(|text| text.contains("<personality_spec>"))
.expect("expected personality update message in developer input");
assert!(
personality_text.contains("The user has requested a new communication style."),
"expected personality update preamble, got {personality_text:?}"
);
assert!(
personality_text.contains(LOCAL_FRIENDLY_TEMPLATE),
"expected personality update to include the local friendly template, got: {personality_text:?}"
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn user_turn_personality_remote_model_template_includes_update_message() -> anyhow::Result<()>
{
skip_if_no_network!(Ok(()));
let server = MockServer::builder()
.body_print_limit(BodyPrintLimit::Limited(80_000))
.start()
.await;
let remote_slug = "codex-remote-personality";
let remote_personality_message = "Friendly from remote template";
let remote_model = ModelInfo {
slug: remote_slug.to_string(),
display_name: "Remote personality test".to_string(),
description: Some("Remote model with personality template".to_string()),
default_reasoning_level: Some(ReasoningEffort::Medium),
supported_reasoning_levels: vec![ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: ReasoningEffort::Medium.to_string(),
}],
shell_type: ConfigShellToolType::UnifiedExec,
visibility: ModelVisibility::List,
supported_in_api: true,
priority: 1,
upgrade: None,
base_instructions: "base instructions".to_string(),
model_instructions_template: Some(ModelInstructionsTemplate {
template: "Base instructions\n{{ personality_message }}\n".to_string(),
personality_messages: Some(PersonalityMessages(BTreeMap::from([(
Personality::Friendly,
remote_personality_message.to_string(),
)]))),
}),
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
context_window: Some(128_000),
auto_compact_token_limit: None,
effective_context_window_percent: 95,
experimental_supported_tools: Vec::new(),
};
let _models_mock = mount_models_once(
&server,
ModelsResponse {
models: vec![remote_model],
},
)
.await;
let resp_mock = mount_sse_sequence(
&server,
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;
let mut builder = test_codex()
.with_auth(codex_core::CodexAuth::create_dummy_chatgpt_auth_for_testing())
.with_config(|config| {
config.features.enable(Feature::RemoteModels);
config.model = Some("gpt-5.2-codex".to_string());
});
let test = builder.build(&server).await?;
wait_for_model_available(
&test.thread_manager.get_models_manager(),
remote_slug,
&test.config,
)
.await;
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd_path().to_path_buf(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
test.codex
.submit(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: Some(remote_slug.to_string()),
effort: None,
summary: None,
collaboration_mode: None,
personality: Some(Personality::Friendly),
})
.await?;
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd_path().to_path_buf(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
model: remote_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
let requests = resp_mock.requests();
assert_eq!(requests.len(), 2, "expected two requests");
let request = requests
.last()
.expect("expected personality update request");
let developer_texts = request.message_input_texts("developer");
let personality_text = developer_texts
.iter()
.find(|text| text.contains("<personality_spec>"))
.expect("expected personality update message in developer input");
assert!(
personality_text.contains("The user has requested a new communication style."),
"expected personality update preamble, got {personality_text:?}"
);
assert!(
personality_text.contains(remote_personality_message),
"expected personality update to include remote template, got: {personality_text:?}"
);
Ok(())
}
async fn wait_for_model_available(
manager: &Arc<ModelsManager>,
slug: &str,
config: &codex_core::config::Config,
) {
let deadline = Instant::now() + Duration::from_secs(2);
loop {
let models = manager
.list_models(config, RefreshStrategy::OnlineIfUncached)
.await;
if models.iter().any(|model| model.model == slug) {
return;
}
if Instant::now() >= deadline {
panic!("timed out waiting for the remote model {slug} to appear");
}
sleep(Duration::from_millis(25)).await;
}
}

View File

@@ -353,6 +353,7 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() -> an
effort: Some(Some(ReasoningEffort::High)),
summary: Some(ReasoningSummary::Detailed),
collaboration_mode: None,
personality: None,
})
.await?;
@@ -426,6 +427,7 @@ async fn override_before_first_turn_emits_environment_context() -> anyhow::Resul
effort: Some(Some(ReasoningEffort::Low)),
summary: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
.await?;
@@ -583,6 +585,7 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() -> anyhow::Res
summary: ReasoningSummary::Detailed,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
})
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
@@ -677,6 +680,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
summary: default_summary,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
})
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
@@ -695,6 +699,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
summary: default_summary,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
})
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
@@ -775,6 +780,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
summary: default_summary,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
})
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
@@ -793,6 +799,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
summary: ReasoningSummary::Detailed,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
})
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;

View File

@@ -142,6 +142,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
effort: None,
summary: None,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -178,6 +179,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -369,6 +371,7 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
effort: None,
summary: None,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -386,6 +389,7 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -137,6 +137,7 @@ async fn request_user_input_round_trip_resolves_pending() -> anyhow::Result<()>
reasoning_effort: None,
developer_instructions: None,
})),
personality: None,
})
.await?;
@@ -249,6 +250,7 @@ where
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
.await?;

View File

@@ -26,6 +26,7 @@ fn resume_history(
approval_policy: config.approval_policy.value(),
sandbox_policy: config.sandbox_policy.get().clone(),
model: previous_model.to_string(),
personality: None,
collaboration_mode: None,
effort: config.model_reasoning_effort,
summary: config.model_reasoning_summary,

View File

@@ -822,6 +822,7 @@ async fn review_uses_overridden_cwd_for_base_branch_merge_base() {
effort: None,
summary: None,
collaboration_mode: None,
personality: None,
})
.await
.unwrap();

View File

@@ -119,6 +119,7 @@ async fn stdio_server_round_trip() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -258,6 +259,7 @@ async fn stdio_image_responses_round_trip() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -455,6 +457,7 @@ async fn stdio_image_completions_round_trip() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -600,6 +603,7 @@ async fn stdio_server_propagates_whitelisted_env_vars() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -756,6 +760,7 @@ async fn streamable_http_tool_call_round_trip() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -944,6 +949,7 @@ async fn streamable_http_with_oauth_round_trip() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -101,6 +101,7 @@ async fn run_snapshot_command(command: &str) -> Result<SnapshotRun> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -174,6 +175,7 @@ async fn run_shell_command_snapshot(command: &str) -> Result<SnapshotRun> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -308,6 +310,7 @@ async fn shell_command_snapshot_still_intercepts_apply_patch() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -79,6 +79,7 @@ async fn user_turn_includes_skill_instructions() -> Result<()> {
effort: None,
summary: codex_protocol::config_types::ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -91,6 +91,7 @@ async fn shell_tool_executes_command_and_streams_output() -> anyhow::Result<()>
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -159,6 +160,7 @@ async fn update_plan_tool_emits_plan_update_event() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -237,6 +239,7 @@ async fn update_plan_tool_rejects_malformed_payload() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -327,6 +330,7 @@ async fn apply_patch_tool_executes_and_emits_patch_events() -> anyhow::Result<()
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -425,6 +429,7 @@ async fn apply_patch_reports_parse_diagnostics() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -48,6 +48,7 @@ async fn run_turn(test: &TestCodex, prompt: &str) -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -365,6 +366,7 @@ async fn shell_tools_start_before_response_completed_when_stream_delayed() -> an
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -548,6 +548,7 @@ async fn mcp_image_output_preserves_image_and_no_text_summary() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -210,6 +210,7 @@ async fn unified_exec_intercepts_apply_patch_exec_command() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -338,6 +339,7 @@ async fn unified_exec_emits_exec_command_begin_event() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -415,6 +417,7 @@ async fn unified_exec_resolves_relative_workdir() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -495,6 +498,7 @@ async fn unified_exec_respects_workdir_override() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -587,6 +591,7 @@ async fn unified_exec_emits_exec_command_end_event() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -661,6 +666,7 @@ async fn unified_exec_emits_output_delta_for_exec_command() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -736,6 +742,7 @@ async fn unified_exec_full_lifecycle_with_background_end_event() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -865,6 +872,7 @@ async fn unified_exec_emits_terminal_interaction_for_write_stdin() -> Result<()>
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1001,6 +1009,7 @@ async fn unified_exec_terminal_interaction_captures_delayed_output() -> Result<(
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1160,6 +1169,7 @@ async fn unified_exec_emits_one_begin_and_one_end_event() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1257,6 +1267,7 @@ async fn exec_command_reports_chunk_and_exit_metadata() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1374,6 +1385,7 @@ async fn unified_exec_defaults_to_pipe() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1463,6 +1475,7 @@ async fn unified_exec_can_enable_tty() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1543,6 +1556,7 @@ async fn unified_exec_respects_early_exit_notifications() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1673,6 +1687,7 @@ async fn write_stdin_returns_exit_metadata_and_clears_session() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1840,6 +1855,7 @@ async fn unified_exec_emits_end_event_when_session_dies_via_stdin() -> Result<()
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -1916,6 +1932,7 @@ async fn unified_exec_closes_long_running_session_at_turn_end() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -2037,6 +2054,7 @@ async fn unified_exec_reuses_session_via_stdin() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -2171,6 +2189,7 @@ PY
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
// This is a worst case scenario for the truncate logic.
@@ -2284,6 +2303,7 @@ async fn unified_exec_timeout_and_followup_poll() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -2379,6 +2399,7 @@ PY
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -2460,6 +2481,7 @@ async fn unified_exec_runs_under_sandbox() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -2563,6 +2585,7 @@ async fn unified_exec_python_prompt_under_seatbelt() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -2657,6 +2680,7 @@ async fn unified_exec_runs_on_all_platforms() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -2791,6 +2815,7 @@ async fn unified_exec_prunes_exited_sessions_first() -> Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

View File

@@ -89,6 +89,7 @@ async fn user_turn_with_local_image_attaches_image() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -182,6 +183,7 @@ async fn view_image_tool_attaches_local_image() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -314,6 +316,7 @@ async fn view_image_tool_errors_when_path_is_directory() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -388,6 +391,7 @@ async fn view_image_tool_placeholder_for_non_image_files() -> anyhow::Result<()>
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -481,6 +485,7 @@ async fn view_image_tool_errors_when_file_missing() -> anyhow::Result<()> {
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
@@ -563,6 +568,7 @@ async fn replaces_invalid_local_image_after_bad_request() -> anyhow::Result<()>
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;