Honor null thread instructions (#16964)

- Treat explicit null thread instructions as a blank-slate override
while preserving omitted-field fallback behavior.
- Preserve null through rollout resume/fork and keep explicit empty
strings distinct.
- Add app-server v2 start/fork coverage for the tri-state instruction
params.
This commit is contained in:
Ahmed Ibrahim
2026-04-06 21:10:19 -07:00
committed by GitHub
parent 4bb507d2c4
commit 24c598e8a9
39 changed files with 550 additions and 101 deletions

View File

@@ -921,7 +921,7 @@ async fn includes_base_instructions_override_in_request() {
let mut builder = test_codex()
.with_auth(CodexAuth::from_api_key("Test API Key"))
.with_config(|config| {
config.base_instructions = Some("test instructions".to_string());
config.base_instructions = Some(Some("test instructions".to_string()));
});
let codex = builder
.build(&server)
@@ -953,6 +953,47 @@ async fn includes_base_instructions_override_in_request() {
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn omits_explicit_null_base_instructions_from_request() {
skip_if_no_network!();
// Mock server
let server = MockServer::start().await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let mut builder = test_codex()
.with_auth(CodexAuth::from_api_key("Test API Key"))
.with_config(|config| {
config.base_instructions = Some(None);
});
let codex = builder
.build(&server)
.await
.expect("create new conversation")
.codex;
codex
.submit(Op::UserInput {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
let request = resp_mock.single_request();
let request_body = request.body_json();
assert_eq!(request_body.get("instructions"), None);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn chatgpt_auth_sends_correct_request() {
skip_if_no_network!();

View File

@@ -1654,9 +1654,9 @@ fn prompt_with_input(input: Vec<ResponseItem>) -> Prompt {
fn prompt_with_input_and_instructions(input: Vec<ResponseItem>, instructions: &str) -> Prompt {
let mut prompt = prompt_with_input(input);
prompt.base_instructions = BaseInstructions {
prompt.base_instructions = Some(BaseInstructions {
text: instructions.to_string(),
};
});
prompt
}

View File

@@ -869,7 +869,7 @@ async fn remote_compact_trim_estimate_uses_session_base_instructions() -> Result
let override_base_instructions = override_base_instructions.clone();
move |config| {
config.model_context_window = Some(override_context_window);
config.base_instructions = Some(override_base_instructions);
config.base_instructions = Some(Some(override_base_instructions));
}
}),
)

View File

@@ -66,7 +66,7 @@ async fn base_instructions_override_disables_personality_template() {
.enable(Feature::Personality)
.expect("test config should allow feature update");
config.personality = Some(Personality::Friendly);
config.base_instructions = Some("override instructions".to_string());
config.base_instructions = Some(Some("override instructions".to_string()));
let model_info =
codex_core::test_support::construct_model_info_offline("gpt-5.2-codex", &config);

View File

@@ -173,7 +173,7 @@ async fn find_locates_rollout_file_written_by_recorder() -> std::io::Result<()>
thread_id,
/*forked_from_id*/ None,
SessionSource::Exec,
BaseInstructions::default(),
Some(BaseInstructions::default()),
Vec::new(),
EventPersistenceMode::Limited,
),

View File

@@ -84,7 +84,7 @@ async fn continue_after_stream_error() {
let TestCodex { codex, .. } = test_codex()
.with_config(move |config| {
config.base_instructions = Some("You are a helpful assistant".to_string());
config.base_instructions = Some(Some("You are a helpful assistant".to_string()));
config.model_provider = provider;
})
.build(&server)