mirror of
https://github.com/openai/codex.git
synced 2026-04-29 17:06:51 +00:00
Unify remote compaction snapshot mocks around default endpoint behavior (#12050)
## Summary - standardize remote compaction test mocking around one default behavior in shared helpers - make default remote compact mocks mirror production shape: keep `message/user` + `message/developer`, drop assistant/tool artifacts, then append a summary user message - switch non-special `compact_remote` tests to the shared default mock instead of ad-hoc JSON payloads ## Special-case tests that still use explicit mocks - remote compaction error payload / HTTP failure behavior - summary-only compact output behavior - manual `/compact` with no prior user messages - stale developer-instruction injection coverage ## Why This removes inconsistent manual remote compaction fixtures and gives us one source of truth for normal remote compact behavior, while preserving explicit mocks only where tests intentionally cover non-default behavior.
This commit is contained in:
committed by
GitHub
parent
db4d2599b5
commit
eb68767f2f
@@ -3038,6 +3038,131 @@ async fn snapshot_request_shape_pre_turn_compaction_including_incoming_user_mess
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
// TODO(ccunningham): Update once pre-turn compaction context-overflow handling includes incoming
|
||||
// user input and emits richer oversized-input messaging.
|
||||
async fn snapshot_request_shape_pre_turn_compaction_strips_incoming_model_switch() {
|
||||
skip_if_no_network!();
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let previous_model = "gpt-5.1-codex-max";
|
||||
let next_model = "gpt-5.2-codex";
|
||||
|
||||
let request_log = mount_sse_sequence(
|
||||
&server,
|
||||
vec![
|
||||
sse(vec![
|
||||
ev_assistant_message("m1", "BEFORE_SWITCH_REPLY"),
|
||||
ev_completed_with_tokens("r1", 500),
|
||||
]),
|
||||
sse(vec![
|
||||
ev_assistant_message("m2", "PRETURN_SWITCH_SUMMARY"),
|
||||
ev_completed_with_tokens("r2", 100),
|
||||
]),
|
||||
sse(vec![
|
||||
ev_assistant_message("m3", "AFTER_SWITCH_REPLY"),
|
||||
ev_completed_with_tokens("r3", 100),
|
||||
]),
|
||||
],
|
||||
)
|
||||
.await;
|
||||
|
||||
let model_provider = non_openai_model_provider(&server);
|
||||
let test = test_codex()
|
||||
.with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing())
|
||||
.with_model(previous_model)
|
||||
.with_config(move |config| {
|
||||
config.model_provider = model_provider;
|
||||
set_test_compact_prompt(config);
|
||||
config
|
||||
.features
|
||||
.enable(codex_core::features::Feature::RemoteModels);
|
||||
config.model_auto_compact_token_limit = Some(200);
|
||||
})
|
||||
.build(&server)
|
||||
.await
|
||||
.expect("build codex");
|
||||
|
||||
test.codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
text: "BEFORE_SWITCH_USER".into(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: test.cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: previous_model.to_string(),
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
.await
|
||||
.expect("submit first user turn");
|
||||
wait_for_event(&test.codex, |event| {
|
||||
matches!(event, EventMsg::TurnComplete(_))
|
||||
})
|
||||
.await;
|
||||
|
||||
test.codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
text: "AFTER_SWITCH_USER".into(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: test.cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: next_model.to_string(),
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
collaboration_mode: None,
|
||||
personality: None,
|
||||
})
|
||||
.await
|
||||
.expect("submit second user turn");
|
||||
wait_for_event(&test.codex, |event| {
|
||||
matches!(event, EventMsg::TurnComplete(_))
|
||||
})
|
||||
.await;
|
||||
|
||||
let requests = request_log.requests();
|
||||
assert_eq!(
|
||||
requests.len(),
|
||||
3,
|
||||
"expected first turn, pre-turn compact, and post-compact follow-up requests"
|
||||
);
|
||||
|
||||
let compact_body = requests[1].body_json().to_string();
|
||||
assert!(
|
||||
body_contains_text(&compact_body, SUMMARIZATION_PROMPT),
|
||||
"pre-turn compaction request should include summarization prompt"
|
||||
);
|
||||
assert!(
|
||||
!compact_body.contains("<model_switch>"),
|
||||
"pre-turn compaction request should strip incoming model-switch update item"
|
||||
);
|
||||
|
||||
let follow_up_body = requests[2].body_json().to_string();
|
||||
assert!(
|
||||
follow_up_body.contains("<model_switch>"),
|
||||
"post-compaction follow-up should include model-switch update item"
|
||||
);
|
||||
|
||||
insta::assert_snapshot!(
|
||||
"pre_turn_compaction_strips_incoming_model_switch_shapes",
|
||||
format_labeled_requests_snapshot(
|
||||
"Pre-turn compaction during model switch (without pre-sampling model-switch compaction): current behavior strips incoming <model_switch> from the compact request and restores it in the post-compaction follow-up request.",
|
||||
&[
|
||||
("Initial Request (Previous Model)", &requests[0]),
|
||||
("Local Compaction Request", &requests[1]),
|
||||
("Local Post-Compaction History Layout", &requests[2]),
|
||||
]
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn snapshot_request_shape_pre_turn_compaction_context_window_exceeded() {
|
||||
skip_if_no_network!();
|
||||
|
||||
|
||||
Reference in New Issue
Block a user