Compare commits

...

6 Commits

Author SHA1 Message Date
Dylan Hurd
31d5ea192b codex: fix CI failure on PR #13187
Update core test fixtures for plan-mode developer instructions and accept the resulting snapshot changes.

Co-authored-by: Codex <noreply@openai.com>
2026-03-06 23:41:35 -08:00
Dylan Hurd
17db82635d codex: fix CI failure on PR #13187
Co-authored-by: Codex <noreply@openai.com>
2026-03-06 23:27:07 -08:00
Dylan Hurd
66cad6a734 add integration test 2026-03-06 23:27:07 -08:00
Dylan Hurd
ae2bb962c7 fix test 2026-03-06 23:27:07 -08:00
Dylan Hurd
88a6b2b8ca rebase 2026-03-06 23:27:07 -08:00
Dylan Hurd
0f47454974 feat(config) Plan mode instructions 2026-03-06 23:27:07 -08:00
29 changed files with 264 additions and 33 deletions

View File

@@ -193,6 +193,7 @@ impl MessageProcessor {
.features
.enabled(codex_core::features::Feature::DefaultModeRequestUserInput),
},
config.plan_mode_developer_instructions.clone(),
));
thread_manager
.plugins_manager()

View File

@@ -521,6 +521,9 @@
"personality": {
"$ref": "#/definitions/Personality"
},
"plan_mode_developer_instructions": {
"type": "string"
},
"plan_mode_reasoning_effort": {
"$ref": "#/definitions/ReasoningEffort"
},
@@ -2158,6 +2161,9 @@
],
"description": "Optionally specify a personality for the model"
},
"plan_mode_developer_instructions": {
"type": "string"
},
"plan_mode_reasoning_effort": {
"$ref": "#/definitions/ReasoningEffort"
},

View File

@@ -302,6 +302,7 @@ use codex_async_utils::OrCancelExt;
use codex_otel::SessionTelemetry;
use codex_otel::TelemetryAuthMode;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::config_types::ServiceTier;
@@ -973,6 +974,26 @@ impl SessionConfiguration {
}
}
fn merge_collaboration_mode_with_preset(
collaboration_mode: CollaborationMode,
preset: &CollaborationModeMask,
) -> CollaborationMode {
CollaborationMode {
mode: collaboration_mode.mode,
settings: Settings {
model: collaboration_mode.settings.model,
reasoning_effort: collaboration_mode
.settings
.reasoning_effort
.or(preset.reasoning_effort.flatten()),
developer_instructions: collaboration_mode
.settings
.developer_instructions
.or_else(|| preset.developer_instructions.clone().flatten()),
},
}
}
#[derive(Default, Clone)]
pub(crate) struct SessionSettingsUpdate {
pub(crate) cwd: Option<PathBuf>,
@@ -988,6 +1009,23 @@ pub(crate) struct SessionSettingsUpdate {
}
impl Session {
fn resolve_session_settings_update(
&self,
mut updates: SessionSettingsUpdate,
) -> SessionSettingsUpdate {
updates.collaboration_mode = updates.collaboration_mode.map(|collaboration_mode| {
self.services
.models_manager
.list_collaboration_modes()
.into_iter()
.find(|preset| preset.mode == Some(collaboration_mode.mode))
.map_or(collaboration_mode.clone(), |preset| {
merge_collaboration_mode_with_preset(collaboration_mode, &preset)
})
});
updates
}
/// Builds the `x-codex-beta-features` header value for this session.
///
/// `ModelClient` is session-scoped and intentionally does not depend on the full `Config`, so
@@ -2081,6 +2119,7 @@ impl Session {
&self,
updates: SessionSettingsUpdate,
) -> ConstraintResult<()> {
let updates = self.resolve_session_settings_update(updates);
let mut state = self.state.lock().await;
match state.session_configuration.apply(&updates) {
@@ -2113,6 +2152,7 @@ impl Session {
sub_id: String,
updates: SessionSettingsUpdate,
) -> ConstraintResult<Arc<TurnContext>> {
let updates = self.resolve_session_settings_update(updates);
let (
session_configuration,
sandbox_policy_changed,

View File

@@ -1899,6 +1899,7 @@ async fn session_new_fails_when_zsh_fork_enabled_without_zsh_path() {
auth_manager.clone(),
None,
CollaborationModesConfig::default(),
None,
));
let model = ModelsManager::get_model_offline_for_tests(config.model.as_deref());
let model_info = ModelsManager::construct_model_info_offline_for_tests(model.as_str(), &config);
@@ -1987,6 +1988,7 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) {
auth_manager.clone(),
None,
CollaborationModesConfig::default(),
None,
));
let agent_control = AgentControl::default();
let exec_policy = ExecPolicyManager::default();
@@ -2394,6 +2396,7 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx(
auth_manager.clone(),
None,
CollaborationModesConfig::default(),
None,
));
let agent_control = AgentControl::default();
let exec_policy = ExecPolicyManager::default();

View File

@@ -3066,6 +3066,7 @@ fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> {
experimental_realtime_ws_startup_context: None,
base_instructions: None,
developer_instructions: None,
plan_mode_developer_instructions: None,
compact_prompt: None,
commit_attribution: None,
forced_chatgpt_workspace_id: None,
@@ -3201,6 +3202,7 @@ fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> {
experimental_realtime_ws_startup_context: None,
base_instructions: None,
developer_instructions: None,
plan_mode_developer_instructions: None,
compact_prompt: None,
commit_attribution: None,
forced_chatgpt_workspace_id: None,
@@ -3334,6 +3336,7 @@ fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> {
experimental_realtime_ws_startup_context: None,
base_instructions: None,
developer_instructions: None,
plan_mode_developer_instructions: None,
compact_prompt: None,
commit_attribution: None,
forced_chatgpt_workspace_id: None,
@@ -3453,6 +3456,7 @@ fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> {
experimental_realtime_ws_startup_context: None,
base_instructions: None,
developer_instructions: None,
plan_mode_developer_instructions: None,
compact_prompt: None,
commit_attribution: None,
forced_chatgpt_workspace_id: None,

View File

@@ -433,6 +433,10 @@ pub struct Config {
/// Plan preset. The `none` value means "no reasoning" (not "inherit the
/// global default").
pub plan_mode_reasoning_effort: Option<ReasoningEffort>,
/// Optional Plan-mode-specific developer instructions override.
///
/// When unset, Plan mode uses the built-in Plan preset instructions.
pub plan_mode_developer_instructions: Option<String>,
/// Optional value to use for `reasoning.summary` when making a request
/// using the Responses API. When unset, the model catalog default is used.
@@ -1201,6 +1205,7 @@ pub struct ConfigToml {
pub model_reasoning_effort: Option<ReasoningEffort>,
pub plan_mode_reasoning_effort: Option<ReasoningEffort>,
pub plan_mode_developer_instructions: Option<String>,
pub model_reasoning_summary: Option<ReasoningSummary>,
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
pub model_verbosity: Option<Verbosity>,
@@ -2422,6 +2427,9 @@ impl Config {
plan_mode_reasoning_effort: config_profile
.plan_mode_reasoning_effort
.or(cfg.plan_mode_reasoning_effort),
plan_mode_developer_instructions: config_profile
.plan_mode_developer_instructions
.or(cfg.plan_mode_developer_instructions),
model_reasoning_summary: config_profile
.model_reasoning_summary
.or(cfg.model_reasoning_summary),

View File

@@ -28,6 +28,7 @@ pub struct ConfigProfile {
pub sandbox_mode: Option<SandboxMode>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub plan_mode_reasoning_effort: Option<ReasoningEffort>,
pub plan_mode_developer_instructions: Option<String>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
/// Optional path to a JSON model catalog (applied on startup only).

View File

@@ -23,17 +23,25 @@ pub struct CollaborationModesConfig {
pub(crate) fn builtin_collaboration_mode_presets(
collaboration_modes_config: CollaborationModesConfig,
plan_mode_developer_instructions: Option<&str>,
) -> Vec<CollaborationModeMask> {
vec![plan_preset(), default_preset(collaboration_modes_config)]
vec![
plan_preset(plan_mode_developer_instructions),
default_preset(collaboration_modes_config),
]
}
fn plan_preset() -> CollaborationModeMask {
fn plan_preset(plan_mode_developer_instructions: Option<&str>) -> CollaborationModeMask {
CollaborationModeMask {
name: ModeKind::Plan.display_name().to_string(),
mode: Some(ModeKind::Plan),
model: None,
reasoning_effort: Some(Some(ReasoningEffort::Medium)),
developer_instructions: Some(Some(COLLABORATION_MODE_PLAN.to_string())),
developer_instructions: Some(Some(
plan_mode_developer_instructions
.unwrap_or(COLLABORATION_MODE_PLAN)
.to_string(),
)),
}
}
@@ -109,17 +117,27 @@ mod tests {
#[test]
fn preset_names_use_mode_display_names() {
assert_eq!(plan_preset().name, ModeKind::Plan.display_name());
assert_eq!(plan_preset(None).name, ModeKind::Plan.display_name());
assert_eq!(
default_preset(CollaborationModesConfig::default()).name,
ModeKind::Default.display_name()
);
assert_eq!(
plan_preset().reasoning_effort,
plan_preset(None).reasoning_effort,
Some(Some(ReasoningEffort::Medium))
);
}
#[test]
fn plan_preset_uses_configured_developer_instructions_override() {
let override_instructions = "Use plan override.";
let plan = plan_preset(Some(override_instructions));
assert_eq!(
plan.developer_instructions,
Some(Some(override_instructions.to_string()))
);
}
#[test]
fn default_mode_instructions_replace_mode_names_placeholder() {
let default_instructions = default_preset(CollaborationModesConfig {

View File

@@ -61,6 +61,7 @@ pub struct ModelsManager {
etag: RwLock<Option<String>>,
cache_manager: ModelsCacheManager,
provider: ModelProviderInfo,
plan_mode_developer_instructions: Option<String>,
}
impl ModelsManager {
@@ -74,6 +75,7 @@ impl ModelsManager {
auth_manager: Arc<AuthManager>,
model_catalog: Option<ModelsResponse>,
collaboration_modes_config: CollaborationModesConfig,
plan_mode_developer_instructions: Option<String>,
) -> Self {
let cache_path = codex_home.join(MODEL_CACHE_FILE);
let cache_manager = ModelsCacheManager::new(cache_path, DEFAULT_MODEL_CACHE_TTL);
@@ -96,6 +98,7 @@ impl ModelsManager {
etag: RwLock::new(None),
cache_manager,
provider: ModelProviderInfo::create_openai_provider(),
plan_mode_developer_instructions,
}
}
@@ -121,7 +124,10 @@ impl ModelsManager {
&self,
collaboration_modes_config: CollaborationModesConfig,
) -> Vec<CollaborationModeMask> {
builtin_collaboration_mode_presets(collaboration_modes_config)
builtin_collaboration_mode_presets(
collaboration_modes_config,
self.plan_mode_developer_instructions.as_deref(),
)
}
/// Attempt to list models without blocking, using the current cached state.
@@ -380,6 +386,20 @@ impl ModelsManager {
codex_home: PathBuf,
auth_manager: Arc<AuthManager>,
provider: ModelProviderInfo,
) -> Self {
Self::with_provider_and_plan_instructions_for_tests(
codex_home,
auth_manager,
provider,
None,
)
}
pub(crate) fn with_provider_and_plan_instructions_for_tests(
codex_home: PathBuf,
auth_manager: Arc<AuthManager>,
provider: ModelProviderInfo,
plan_mode_developer_instructions: Option<String>,
) -> Self {
let cache_path = codex_home.join(MODEL_CACHE_FILE);
let cache_manager = ModelsCacheManager::new(cache_path, DEFAULT_MODEL_CACHE_TTL);
@@ -394,6 +414,7 @@ impl ModelsManager {
etag: RwLock::new(None),
cache_manager,
provider,
plan_mode_developer_instructions,
}
}
@@ -522,6 +543,7 @@ mod tests {
auth_manager,
None,
CollaborationModesConfig::default(),
None,
);
let known_slug = manager
.get_remote_models()
@@ -562,6 +584,7 @@ mod tests {
models: vec![overlay],
}),
CollaborationModesConfig::default(),
None,
);
let model_info = manager
@@ -595,6 +618,7 @@ mod tests {
models: vec![remote],
}),
CollaborationModesConfig::default(),
None,
);
let namespaced_model = "custom/gpt-image".to_string();
@@ -620,6 +644,7 @@ mod tests {
auth_manager,
None,
CollaborationModesConfig::default(),
None,
);
let known_slug = manager
.get_remote_models()

View File

@@ -60,8 +60,14 @@ pub fn thread_manager_with_models_provider_and_home(
auth: CodexAuth,
provider: ModelProviderInfo,
codex_home: PathBuf,
plan_mode_developer_instructions: Option<String>,
) -> ThreadManager {
ThreadManager::with_models_provider_and_home_for_tests(auth, provider, codex_home)
ThreadManager::with_models_provider_and_home_and_plan_instructions_for_tests(
auth,
provider,
codex_home,
plan_mode_developer_instructions,
)
}
pub fn models_manager_with_provider(
@@ -87,5 +93,6 @@ pub fn all_model_presets() -> &'static Vec<ModelPreset> {
pub fn builtin_collaboration_mode_presets() -> Vec<CollaborationModeMask> {
collaboration_mode_presets::builtin_collaboration_mode_presets(
collaboration_mode_presets::CollaborationModesConfig::default(),
None,
)
}

View File

@@ -150,6 +150,7 @@ impl ThreadManager {
session_source: SessionSource,
model_catalog: Option<ModelsResponse>,
collaboration_modes_config: CollaborationModesConfig,
plan_mode_developer_instructions: Option<String>,
) -> Self {
let (thread_created_tx, _) = broadcast::channel(THREAD_CREATED_CHANNEL_CAPACITY);
let plugins_manager = Arc::new(PluginsManager::new(codex_home.clone()));
@@ -168,6 +169,7 @@ impl ThreadManager {
auth_manager.clone(),
model_catalog,
collaboration_modes_config,
plan_mode_developer_instructions,
)),
skills_manager,
plugins_manager,
@@ -207,6 +209,17 @@ impl ThreadManager {
auth: CodexAuth,
provider: ModelProviderInfo,
codex_home: PathBuf,
) -> Self {
Self::with_models_provider_and_home_and_plan_instructions_for_tests(
auth, provider, codex_home, None,
)
}
pub(crate) fn with_models_provider_and_home_and_plan_instructions_for_tests(
auth: CodexAuth,
provider: ModelProviderInfo,
codex_home: PathBuf,
plan_mode_developer_instructions: Option<String>,
) -> Self {
set_thread_manager_test_mode_for_tests(true);
let auth_manager = AuthManager::from_auth_for_testing(auth);
@@ -222,11 +235,14 @@ impl ThreadManager {
state: Arc::new(ThreadManagerState {
threads: Arc::new(RwLock::new(HashMap::new())),
thread_created_tx,
models_manager: Arc::new(ModelsManager::with_provider_for_tests(
codex_home,
auth_manager.clone(),
provider,
)),
models_manager: Arc::new(
ModelsManager::with_provider_and_plan_instructions_for_tests(
codex_home,
auth_manager.clone(),
provider,
plan_mode_developer_instructions,
),
),
skills_manager,
plugins_manager,
mcp_manager,

View File

@@ -186,12 +186,14 @@ impl TestCodexBuilder {
SessionSource::Exec,
Some(model_catalog),
CollaborationModesConfig::default(),
config.plan_mode_developer_instructions.clone(),
)
} else {
codex_core::test_support::thread_manager_with_models_provider_and_home(
auth.clone(),
config.model_provider.clone(),
config.codex_home.clone(),
config.plan_mode_developer_instructions.clone(),
)
};
let thread_manager = Arc::new(thread_manager);

View File

@@ -824,6 +824,7 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
.features
.enabled(Feature::DefaultModeRequestUserInput),
},
config.plan_mode_developer_instructions.clone(),
);
let NewThread { thread: codex, .. } = thread_manager
.start_thread(config)

View File

@@ -191,6 +191,70 @@ async fn collaboration_instructions_added_on_user_turn() -> Result<()> {
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn plan_mode_uses_configured_developer_instructions_override() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let req = mount_sse_once(
&server,
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
)
.await;
let override_text = "configured plan instructions";
let test = test_codex()
.with_config(move |config| {
config.plan_mode_developer_instructions = Some(override_text.to_string());
})
.build(&server)
.await?;
let collaboration_mode = CollaborationMode {
mode: ModeKind::Plan,
settings: Settings {
model: test.session_configured.model.clone(),
reasoning_effort: None,
developer_instructions: None,
},
};
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
cwd: test.config.cwd.clone(),
approval_policy: test.config.permissions.approval_policy.value(),
sandbox_policy: test.config.permissions.sandbox_policy.get().clone(),
model: test.session_configured.model.clone(),
effort: None,
summary: Some(
test.config
.model_reasoning_summary
.unwrap_or(codex_protocol::config_types::ReasoningSummary::Auto),
),
service_tier: None,
collaboration_mode: Some(collaboration_mode),
final_output_json_schema: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
let input = req.single_request().input();
let dev_texts = developer_texts(&input);
let expected_text = collab_xml(override_text);
let collab_blocks = dev_texts
.iter()
.filter(|text| text.starts_with(COLLABORATION_MODE_OPEN_TAG))
.count();
assert_eq!(collab_blocks, 1);
assert_eq!(count_exact(&dev_texts, &expected_text), 1);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn override_then_next_turn_uses_updated_collaboration_instructions() -> Result<()> {
skip_if_no_network!(Ok(()));

View File

@@ -18,6 +18,7 @@ async fn offline_model_info_without_tool_output_override() {
auth_manager,
None,
CollaborationModesConfig::default(),
None,
);
let model_info = manager.get_model_info("gpt-5.1", &config).await;
@@ -41,6 +42,7 @@ async fn offline_model_info_with_tool_output_override() {
auth_manager,
None,
CollaborationModesConfig::default(),
None,
);
let model_info = manager.get_model_info("gpt-5.1-codex", &config).await;

View File

@@ -847,6 +847,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
let expected_permissions_msg = body1["input"][0].clone();
let expected_ui_msg = body1["input"][1].clone();
let expected_collaboration_mode_msg = body1["input"][2].clone();
let shell = default_user_shell();
let default_cwd_lossy = default_cwd.to_string_lossy();
@@ -868,6 +869,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
let expected_input_1 = serde_json::Value::Array(vec![
expected_permissions_msg.clone(),
expected_contextual_user_msg_1.clone(),
expected_collaboration_mode_msg.clone(),
expected_user_message_1.clone(),
]);
assert_eq!(body1["input"], expected_input_1);
@@ -876,6 +878,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
let expected_input_2 = serde_json::Value::Array(vec![
expected_permissions_msg,
expected_contextual_user_msg_1,
expected_collaboration_mode_msg,
expected_user_message_1,
expected_user_message_2,
]);
@@ -971,6 +974,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
let expected_permissions_msg = body1["input"][0].clone();
let expected_ui_msg = body1["input"][1].clone();
let expected_collaboration_mode_msg = body1["input"][2].clone();
let shell = default_user_shell();
let expected_env_text_1 = expected_ui_msg["content"][1]["text"]
@@ -989,6 +993,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
let expected_input_1 = serde_json::Value::Array(vec![
expected_permissions_msg.clone(),
expected_contextual_user_msg_1.clone(),
expected_collaboration_mode_msg.clone(),
expected_user_message_1.clone(),
]);
assert_eq!(body1["input"], expected_input_1);
@@ -1013,6 +1018,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
let expected_input_2 = serde_json::Value::Array(vec![
expected_permissions_msg,
expected_contextual_user_msg_1,
expected_collaboration_mode_msg,
expected_user_message_1,
expected_settings_update_msg,
expected_user_message_2,

View File

@@ -1,6 +1,5 @@
---
source: core/tests/suite/compact.rs
assertion_line: 1791
expression: "format_labeled_requests_snapshot(\"Pre-sampling compaction on model switch to a smaller context window: current behavior compacts using prior-turn history only (incoming user message excluded), and the follow-up request carries compacted history plus the new user message.\",\n&[(\"Initial Request (Previous Model)\", &requests[0]),\n(\"Pre-sampling Compaction Request\", &requests[1]),\n(\"Post-Compaction Follow-up Request (Next Model)\", &requests[2]),])"
---
Scenario: Pre-sampling compaction on model switch to a smaller context window: current behavior compacts using prior-turn history only (incoming user message excluded), and the follow-up request carries compacted history plus the new user message.
@@ -10,7 +9,9 @@ Scenario: Pre-sampling compaction on model switch to a smaller context window: c
01:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>
02:message/developer:<PERMISSIONS_INSTRUCTIONS>
02:message/developer[2]:
[01] <PERMISSIONS_INSTRUCTIONS>
[02] <collaboration_mode># Collaboration Mode: Default\n\nYou are now...
03:message/user:before switch
## Pre-sampling Compaction Request
@@ -18,7 +19,9 @@ Scenario: Pre-sampling compaction on model switch to a smaller context window: c
01:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>
02:message/developer:<PERMISSIONS_INSTRUCTIONS>
02:message/developer[2]:
[01] <PERMISSIONS_INSTRUCTIONS>
[02] <collaboration_mode># Collaboration Mode: Default\n\nYou are now...
03:message/user:before switch
04:message/assistant:before switch
05:message/user:<SUMMARIZATION_PROMPT>
@@ -26,9 +29,10 @@ Scenario: Pre-sampling compaction on model switch to a smaller context window: c
## Post-Compaction Follow-up Request (Next Model)
00:message/user:before switch
01:message/user:<COMPACTION_SUMMARY>\nPRE_SAMPLING_SUMMARY
02:message/developer[2]:
02:message/developer[3]:
[01] <model_switch>\nThe user was previously using a different model....
[02] <PERMISSIONS_INSTRUCTIONS>
[03] <collaboration_mode># Collaboration Mode: Default\n\nYou are now...
03:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>

View File

@@ -19,7 +19,9 @@ Scenario: Pre-turn auto-compaction with a context override emits the context dif
00:message/user:USER_ONE
01:message/user:USER_TWO
02:message/user:<COMPACTION_SUMMARY>\nPRE_TURN_SUMMARY
03:message/developer:<PERMISSIONS_INSTRUCTIONS>
03:message/developer[2]:
[01] <PERMISSIONS_INSTRUCTIONS>
[02] <collaboration_mode># Collaboration Mode: Default\n\nYou are now...
04:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=PRETURN_CONTEXT_DIFF_CWD>

View File

@@ -1,6 +1,5 @@
---
source: core/tests/suite/compact.rs
assertion_line: 3188
expression: "format_labeled_requests_snapshot(\"Pre-turn compaction during model switch (without pre-sampling model-switch compaction): current behavior strips incoming <model_switch> from the compact request and restores it in the post-compaction follow-up request.\",\n&[(\"Initial Request (Previous Model)\", &requests[0]),\n(\"Local Compaction Request\", &requests[1]),\n(\"Local Post-Compaction History Layout\", &requests[2]),])"
---
Scenario: Pre-turn compaction during model switch (without pre-sampling model-switch compaction): current behavior strips incoming <model_switch> from the compact request and restores it in the post-compaction follow-up request.
@@ -10,7 +9,9 @@ Scenario: Pre-turn compaction during model switch (without pre-sampling model-sw
01:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>
02:message/developer:<PERMISSIONS_INSTRUCTIONS>
02:message/developer[2]:
[01] <PERMISSIONS_INSTRUCTIONS>
[02] <collaboration_mode># Collaboration Mode: Default\n\nYou are now...
03:message/user:BEFORE_SWITCH_USER
## Local Compaction Request
@@ -18,7 +19,9 @@ Scenario: Pre-turn compaction during model switch (without pre-sampling model-sw
01:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>
02:message/developer:<PERMISSIONS_INSTRUCTIONS>
02:message/developer[2]:
[01] <PERMISSIONS_INSTRUCTIONS>
[02] <collaboration_mode># Collaboration Mode: Default\n\nYou are now...
03:message/user:BEFORE_SWITCH_USER
04:message/assistant:BEFORE_SWITCH_REPLY
05:message/user:<SUMMARIZATION_PROMPT>
@@ -26,10 +29,11 @@ Scenario: Pre-turn compaction during model switch (without pre-sampling model-sw
## Local Post-Compaction History Layout
00:message/user:BEFORE_SWITCH_USER
01:message/user:<COMPACTION_SUMMARY>\nPRETURN_SWITCH_SUMMARY
02:message/developer[3]:
02:message/developer[4]:
[01] <model_switch>\nThe user was previously using a different model....
[02] <PERMISSIONS_INSTRUCTIONS>
[03] <personality_spec> The user has requested a new communication st...
[03] <collaboration_mode># Collaboration Mode: Default\n\nYou are now...
[04] <personality_spec> The user has requested a new communication st...
03:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>

View File

@@ -18,7 +18,9 @@ Scenario: Remote pre-turn auto-compaction with a context override emits the cont
00:message/user:USER_ONE
01:message/user:USER_TWO
02:compaction:encrypted=true
03:message/developer:<PERMISSIONS_INSTRUCTIONS>
03:message/developer[2]:
[01] <PERMISSIONS_INSTRUCTIONS>
[02] <collaboration_mode># Collaboration Mode: Default\n\nYou are now...
04:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=PRETURN_CONTEXT_DIFF_CWD>

View File

@@ -1,6 +1,5 @@
---
source: core/tests/suite/compact_remote.rs
assertion_line: 1514
expression: "format_labeled_requests_snapshot(\"Remote pre-turn compaction during model switch currently excludes incoming user input, strips incoming <model_switch> from the compact request payload, and restores it in the post-compaction follow-up request.\",\n&[(\"Initial Request (Previous Model)\", &initial_turn_request),\n(\"Remote Compaction Request\", &compact_request),\n(\"Remote Post-Compaction History Layout\", &post_compact_turn_request),])"
---
Scenario: Remote pre-turn compaction during model switch currently excludes incoming user input, strips incoming <model_switch> from the compact request payload, and restores it in the post-compaction follow-up request.
@@ -23,10 +22,11 @@ Scenario: Remote pre-turn compaction during model switch currently excludes inco
## Remote Post-Compaction History Layout
00:message/user:BEFORE_SWITCH_USER
01:compaction:encrypted=true
02:message/developer[3]:
02:message/developer[4]:
[01] <model_switch>\nThe user was previously using a different model....
[02] <PERMISSIONS_INSTRUCTIONS>
[03] <personality_spec> The user has requested a new communication st...
[03] <collaboration_mode># Collaboration Mode: Default\n\nYou are now...
[04] <personality_spec> The user has requested a new communication st...
03:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>

View File

@@ -9,7 +9,9 @@ Scenario: Second turn changes cwd to a directory with different AGENTS.md; curre
01:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>
02:message/developer:<PERMISSIONS_INSTRUCTIONS>
02:message/developer[2]:
[01] <PERMISSIONS_INSTRUCTIONS>
[02] <collaboration_mode># Collaboration Mode: Default\n\nYou are now in Default mode. Any previous i...
03:message/user:<ENVIRONMENT_CONTEXT:cwd=<CWD>>
04:message/user:first turn in agents_one
@@ -18,7 +20,9 @@ Scenario: Second turn changes cwd to a directory with different AGENTS.md; curre
01:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>
02:message/developer:<PERMISSIONS_INSTRUCTIONS>
02:message/developer[2]:
[01] <PERMISSIONS_INSTRUCTIONS>
[02] <collaboration_mode># Collaboration Mode: Default\n\nYou are now in Default mode. Any previous i...
03:message/user:<ENVIRONMENT_CONTEXT:cwd=<CWD>>
04:message/user:first turn in agents_one
05:message/assistant:turn one complete

View File

@@ -18,5 +18,6 @@ Scenario: First post-resume turn where pre-turn override sets model to rollout m
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>
02:message/user:seed resume history
03:message/assistant:recorded before resume
04:message/user:<ENVIRONMENT_CONTEXT:cwd=PRETURN_CONTEXT_DIFF_CWD>
05:message/user:first resumed turn after model override
04:message/developer:<collaboration_mode># Collaboration Mode: Default\n\nYou are now in Default mode. Any previous i...
05:message/user:<ENVIRONMENT_CONTEXT:cwd=PRETURN_CONTEXT_DIFF_CWD>
06:message/user:first resumed turn after model override

View File

@@ -18,8 +18,9 @@ Scenario: First post-resume turn where resumed config model differs from rollout
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>
02:message/user:seed resume history
03:message/assistant:recorded before resume
04:message/developer[2]:
04:message/developer[3]:
[01] <model_switch>\nThe user was previously using a different model. Please continue the conversatio...
[02] <PERMISSIONS_INSTRUCTIONS>
[03] <collaboration_mode># Collaboration Mode: Default\n\nYou are now in Default mode. Any previous i...
05:message/user:<ENVIRONMENT_CONTEXT:cwd=PRETURN_CONTEXT_DIFF_CWD>
06:message/user:resume and change personality

View File

@@ -9,7 +9,9 @@ Scenario: Second turn changes cwd, approval policy, and personality while keepin
01:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>
02:message/developer:<PERMISSIONS_INSTRUCTIONS>
02:message/developer[2]:
[01] <PERMISSIONS_INSTRUCTIONS>
[02] <collaboration_mode># Collaboration Mode: Default\n\nYou are now in Default mode. Any previous i...
03:message/user:first turn
## Second Request (Turn Overrides)
@@ -17,7 +19,9 @@ Scenario: Second turn changes cwd, approval policy, and personality while keepin
01:message/user[2]:
[01] <AGENTS_MD>
[02] <ENVIRONMENT_CONTEXT:cwd=<CWD>>
02:message/developer:<PERMISSIONS_INSTRUCTIONS>
02:message/developer[2]:
[01] <PERMISSIONS_INSTRUCTIONS>
[02] <collaboration_mode># Collaboration Mode: Default\n\nYou are now in Default mode. Any previous i...
03:message/user:first turn
04:message/assistant:turn one complete
05:message/developer[2]:

View File

@@ -484,6 +484,7 @@ async fn run_exec_session(args: ExecRunArgs) -> anyhow::Result<()> {
.features
.enabled(codex_core::features::Feature::DefaultModeRequestUserInput),
},
config.plan_mode_developer_instructions.clone(),
));
let default_model = thread_manager
.get_models_manager()

View File

@@ -68,6 +68,7 @@ impl MessageProcessor {
.features
.enabled(codex_core::features::Feature::DefaultModeRequestUserInput),
},
config.plan_mode_developer_instructions.clone(),
));
Self {
outgoing,

View File

@@ -1585,6 +1585,7 @@ impl App {
.features
.enabled(codex_core::features::Feature::DefaultModeRequestUserInput),
},
config.plan_mode_developer_instructions.clone(),
));
let mut model = thread_manager
.get_models_manager()

View File

@@ -1754,6 +1754,7 @@ async fn make_chatwidget_manual(
auth_manager.clone(),
None,
CollaborationModesConfig::default(),
None,
));
let reasoning_effort = None;
let base_mode = CollaborationMode {
@@ -1884,6 +1885,7 @@ fn set_chatgpt_auth(chat: &mut ChatWidget) {
chat.auth_manager.clone(),
None,
CollaborationModesConfig::default(),
None,
));
}