Unify remote compaction snapshot mocks around default endpoint behavior (#12050)

## Summary
- standardize remote compaction test mocking around one default behavior
in shared helpers
- make default remote compact mocks mirror production shape: keep
`message/user` + `message/developer`, drop assistant/tool artifacts,
then append a summary user message
- switch non-special `compact_remote` tests to the shared default mock
instead of ad-hoc JSON payloads

## Special-case tests that still use explicit mocks
- remote compaction error payload / HTTP failure behavior
- summary-only compact output behavior
- manual `/compact` with no prior user messages
- stale developer-instruction injection coverage

## Why
This removes inconsistent manual remote compaction fixtures and gives us
one source of truth for normal remote compact behavior, while preserving
explicit mocks only where tests intentionally cover non-default
behavior.
This commit is contained in:
Charley Cunningham
2026-02-17 18:18:47 -08:00
committed by GitHub
parent db4d2599b5
commit eb68767f2f
7 changed files with 655 additions and 40 deletions

View File

@@ -865,6 +865,88 @@ pub async fn mount_compact_json_once(server: &MockServer, body: serde_json::Valu
.await
}
/// Mount a `/responses/compact` mock that mirrors the default remote compaction shape:
/// keep user+developer messages from the request, drop assistant/tool artifacts, and append one
/// summary user message.
pub async fn mount_compact_user_history_with_summary_once(
server: &MockServer,
summary_text: &str,
) -> ResponseMock {
mount_compact_user_history_with_summary_sequence(server, vec![summary_text.to_string()]).await
}
/// Same as [`mount_compact_user_history_with_summary_once`], but for multiple compact calls.
/// Each incoming compact request receives the next summary text in order.
pub async fn mount_compact_user_history_with_summary_sequence(
server: &MockServer,
summary_texts: Vec<String>,
) -> ResponseMock {
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
#[derive(Debug)]
struct UserHistorySummaryResponder {
num_calls: AtomicUsize,
summary_texts: Vec<String>,
}
impl Respond for UserHistorySummaryResponder {
fn respond(&self, request: &wiremock::Request) -> ResponseTemplate {
let call_num = self.num_calls.fetch_add(1, Ordering::SeqCst);
let Some(summary_text) = self.summary_texts.get(call_num) else {
panic!("no summary text for compact request {call_num}");
};
let body_bytes = decode_body_bytes(
&request.body,
request
.headers
.get("content-encoding")
.and_then(|value| value.to_str().ok()),
);
let body_json: Value = serde_json::from_slice(&body_bytes)
.unwrap_or_else(|err| panic!("failed to parse compact request body: {err}"));
let mut output = body_json
.get("input")
.and_then(Value::as_array)
.cloned()
.unwrap_or_default()
.into_iter()
// Match current remote compaction behavior: keep user/developer messages and
// omit assistant/tool history entries.
.filter(|item| {
item.get("type").and_then(Value::as_str) == Some("message")
&& matches!(
item.get("role").and_then(Value::as_str),
Some("user") | Some("developer")
)
})
.collect::<Vec<Value>>();
// Append the synthetic summary message as the newest user item.
output.push(serde_json::json!({
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": summary_text}],
}));
ResponseTemplate::new(200)
.insert_header("content-type", "application/json")
.set_body_json(serde_json::json!({ "output": output }))
}
}
let num_calls = summary_texts.len();
let responder = UserHistorySummaryResponder {
num_calls: AtomicUsize::new(0),
summary_texts,
};
let (mock, response_mock) = compact_mock();
mock.respond_with(responder)
.up_to_n_times(num_calls as u64)
.expect(num_calls as u64)
.mount(server)
.await;
response_mock
}
pub async fn mount_compact_response_once(
server: &MockServer,
response: ResponseTemplate,

View File

@@ -3038,6 +3038,131 @@ async fn snapshot_request_shape_pre_turn_compaction_including_incoming_user_mess
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
// TODO(ccunningham): Update once pre-turn compaction context-overflow handling includes incoming
// user input and emits richer oversized-input messaging.
async fn snapshot_request_shape_pre_turn_compaction_strips_incoming_model_switch() {
skip_if_no_network!();
let server = start_mock_server().await;
let previous_model = "gpt-5.1-codex-max";
let next_model = "gpt-5.2-codex";
let request_log = mount_sse_sequence(
&server,
vec![
sse(vec![
ev_assistant_message("m1", "BEFORE_SWITCH_REPLY"),
ev_completed_with_tokens("r1", 500),
]),
sse(vec![
ev_assistant_message("m2", "PRETURN_SWITCH_SUMMARY"),
ev_completed_with_tokens("r2", 100),
]),
sse(vec![
ev_assistant_message("m3", "AFTER_SWITCH_REPLY"),
ev_completed_with_tokens("r3", 100),
]),
],
)
.await;
let model_provider = non_openai_model_provider(&server);
let test = test_codex()
.with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing())
.with_model(previous_model)
.with_config(move |config| {
config.model_provider = model_provider;
set_test_compact_prompt(config);
config
.features
.enable(codex_core::features::Feature::RemoteModels);
config.model_auto_compact_token_limit = Some(200);
})
.build(&server)
.await
.expect("build codex");
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "BEFORE_SWITCH_USER".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd.path().to_path_buf(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::DangerFullAccess,
model: previous_model.to_string(),
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await
.expect("submit first user turn");
wait_for_event(&test.codex, |event| {
matches!(event, EventMsg::TurnComplete(_))
})
.await;
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "AFTER_SWITCH_USER".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd.path().to_path_buf(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::DangerFullAccess,
model: next_model.to_string(),
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await
.expect("submit second user turn");
wait_for_event(&test.codex, |event| {
matches!(event, EventMsg::TurnComplete(_))
})
.await;
let requests = request_log.requests();
assert_eq!(
requests.len(),
3,
"expected first turn, pre-turn compact, and post-compact follow-up requests"
);
let compact_body = requests[1].body_json().to_string();
assert!(
body_contains_text(&compact_body, SUMMARIZATION_PROMPT),
"pre-turn compaction request should include summarization prompt"
);
assert!(
!compact_body.contains("<model_switch>"),
"pre-turn compaction request should strip incoming model-switch update item"
);
let follow_up_body = requests[2].body_json().to_string();
assert!(
follow_up_body.contains("<model_switch>"),
"post-compaction follow-up should include model-switch update item"
);
insta::assert_snapshot!(
"pre_turn_compaction_strips_incoming_model_switch_shapes",
format_labeled_requests_snapshot(
"Pre-turn compaction during model switch (without pre-sampling model-switch compaction): current behavior strips incoming <model_switch> from the compact request and restores it in the post-compaction follow-up request.",
&[
("Initial Request (Previous Model)", &requests[0]),
("Local Compaction Request", &requests[1]),
("Local Post-Compaction History Layout", &requests[2]),
]
)
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn snapshot_request_shape_pre_turn_compaction_context_window_exceeded() {
skip_if_no_network!();

View File

@@ -216,15 +216,9 @@ async fn remote_compact_runs_automatically() -> Result<()> {
)
.await;
let compacted_history = vec![
responses::user_message_item("REMOTE_COMPACTED_SUMMARY"),
ResponseItem::Compaction {
encrypted_content: "ENCRYPTED_COMPACTION_SUMMARY".to_string(),
},
];
let compact_mock = responses::mount_compact_json_once(
let compact_mock = responses::mount_compact_user_history_with_summary_once(
harness.server(),
serde_json::json!({ "output": compacted_history.clone() }),
"REMOTE_COMPACTED_SUMMARY",
)
.await;
@@ -249,7 +243,6 @@ async fn remote_compact_runs_automatically() -> Result<()> {
let follow_up_request = responses_mock.single_request();
let follow_up_body = follow_up_request.body_json().to_string();
assert!(follow_up_body.contains("REMOTE_COMPACTED_SUMMARY"));
assert!(follow_up_body.contains("ENCRYPTED_COMPACTION_SUMMARY"));
Ok(())
}
@@ -318,9 +311,11 @@ async fn remote_compact_trims_function_call_history_to_fit_context_window() -> R
.await?;
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
let compact_mock =
responses::mount_compact_json_once(harness.server(), serde_json::json!({ "output": [] }))
.await;
let compact_mock = responses::mount_compact_user_history_with_summary_once(
harness.server(),
"REMOTE_COMPACT_SUMMARY",
)
.await;
codex.submit(Op::Compact).await?;
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
@@ -436,9 +431,11 @@ async fn auto_remote_compact_trims_function_call_history_to_fit_context_window()
.await?;
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
let compact_mock =
responses::mount_compact_json_once(harness.server(), serde_json::json!({ "output": [] }))
.await;
let compact_mock = responses::mount_compact_user_history_with_summary_once(
harness.server(),
"REMOTE_AUTO_COMPACT_SUMMARY",
)
.await;
codex
.submit(Op::UserInput {
@@ -667,9 +664,9 @@ async fn remote_compact_trim_estimate_uses_session_base_instructions() -> Result
})
.await;
let baseline_compact_mock = responses::mount_compact_json_once(
let baseline_compact_mock = responses::mount_compact_user_history_with_summary_once(
baseline_harness.server(),
serde_json::json!({ "output": [] }),
"REMOTE_BASELINE_SUMMARY",
)
.await;
@@ -766,9 +763,9 @@ async fn remote_compact_trim_estimate_uses_session_base_instructions() -> Result
})
.await;
let override_compact_mock = responses::mount_compact_json_once(
let override_compact_mock = responses::mount_compact_user_history_with_summary_once(
override_harness.server(),
serde_json::json!({ "output": [] }),
"REMOTE_OVERRIDE_SUMMARY",
)
.await;
@@ -814,15 +811,9 @@ async fn remote_manual_compact_emits_context_compaction_items() -> Result<()> {
)
.await;
let compacted_history = vec![
responses::user_message_item("REMOTE_COMPACTED_SUMMARY"),
ResponseItem::Compaction {
encrypted_content: "ENCRYPTED_COMPACTION_SUMMARY".to_string(),
},
];
let compact_mock = responses::mount_compact_json_once(
let compact_mock = responses::mount_compact_user_history_with_summary_once(
harness.server(),
serde_json::json!({ "output": compacted_history.clone() }),
"REMOTE_COMPACTED_SUMMARY",
)
.await;
@@ -1353,14 +1344,9 @@ async fn snapshot_request_shape_remote_pre_turn_compaction_including_incoming_us
)
.await;
let compacted_history = vec![
responses::user_message_item("USER_ONE"),
responses::user_message_item("USER_TWO"),
responses::user_message_item(&summary_with_prefix("REMOTE_PRE_TURN_SUMMARY")),
];
let compact_mock = responses::mount_compact_json_once(
let compact_mock = responses::mount_compact_user_history_with_summary_once(
harness.server(),
serde_json::json!({ "output": compacted_history }),
&summary_with_prefix("REMOTE_PRE_TURN_SUMMARY"),
)
.await;
@@ -1424,6 +1410,142 @@ async fn snapshot_request_shape_remote_pre_turn_compaction_including_incoming_us
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn snapshot_request_shape_remote_pre_turn_compaction_strips_incoming_model_switch()
-> Result<()> {
skip_if_no_network!(Ok(()));
let previous_model = "gpt-5.1-codex-max";
let next_model = "gpt-5.2-codex";
let harness = TestCodexHarness::with_builder(
test_codex()
.with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing())
.with_model(previous_model)
.with_config(|config| {
config.model_auto_compact_token_limit = Some(200);
}),
)
.await?;
let codex = harness.test().codex.clone();
let initial_turn_request_mock = responses::mount_sse_once(
harness.server(),
responses::sse(vec![
responses::ev_assistant_message("m1", "BEFORE_SWITCH_REPLY"),
responses::ev_completed_with_tokens("r1", 500),
]),
)
.await;
let post_compact_turn_request_mock = responses::mount_sse_once(
harness.server(),
responses::sse(vec![
responses::ev_assistant_message("m2", "AFTER_SWITCH_REPLY"),
responses::ev_completed_with_tokens("r2", 80),
]),
)
.await;
let compact_mock = responses::mount_compact_user_history_with_summary_once(
harness.server(),
&summary_with_prefix("REMOTE_SWITCH_SUMMARY"),
)
.await;
codex
.submit(Op::UserInput {
items: vec![UserInput::Text {
text: "BEFORE_SWITCH_USER".to_string(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
})
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
codex
.submit(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
windows_sandbox_level: None,
model: Some(next_model.to_string()),
effort: None,
summary: None,
collaboration_mode: None,
personality: None,
})
.await?;
codex
.submit(Op::UserInput {
items: vec![UserInput::Text {
text: "AFTER_SWITCH_USER".to_string(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
})
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
assert_eq!(
compact_mock.requests().len(),
1,
"expected a single remote pre-turn compaction request"
);
assert_eq!(
initial_turn_request_mock.requests().len(),
1,
"expected initial turn request"
);
assert_eq!(
post_compact_turn_request_mock.requests().len(),
1,
"expected post-compaction follow-up request"
);
let initial_turn_request = initial_turn_request_mock.single_request();
let compact_request = compact_mock.single_request();
let post_compact_turn_request = post_compact_turn_request_mock.single_request();
let compact_body = compact_request.body_json().to_string();
assert!(
!compact_body.contains("AFTER_SWITCH_USER"),
"current behavior excludes incoming user from the pre-turn remote compaction request"
);
assert!(
!compact_body.contains("<model_switch>"),
"pre-turn remote compaction request should strip incoming model-switch update item"
);
let follow_up_body = post_compact_turn_request.body_json().to_string();
assert!(
follow_up_body.contains("BEFORE_SWITCH_USER"),
"post-compaction follow-up should preserve older user messages when they fit"
);
assert!(
follow_up_body.contains("AFTER_SWITCH_USER"),
"post-compaction follow-up should preserve incoming user message via runtime append"
);
assert!(
follow_up_body.contains("<model_switch>"),
"post-compaction follow-up should include the model-switch update item"
);
insta::assert_snapshot!(
"remote_pre_turn_compaction_strips_incoming_model_switch_shapes",
format_labeled_requests_snapshot(
"Remote pre-turn compaction during model switch currently excludes incoming user input, strips incoming <model_switch> from the compact request payload, and restores it in the post-compaction follow-up request.",
&[
("Initial Request (Previous Model)", &initial_turn_request),
("Remote Compaction Request", &compact_request),
(
"Remote Post-Compaction History Layout",
&post_compact_turn_request
),
]
)
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
// TODO(ccunningham): Update once remote pre-turn compaction context-overflow handling includes
// incoming user input and emits richer oversized-input messaging.
@@ -1555,13 +1677,9 @@ async fn snapshot_request_shape_remote_mid_turn_continuation_compaction() -> Res
)
.await;
let compacted_history = vec![
responses::user_message_item("USER_ONE"),
responses::user_message_item(&summary_with_prefix("REMOTE_MID_TURN_SUMMARY")),
];
let compact_mock = responses::mount_compact_json_once(
let compact_mock = responses::mount_compact_user_history_with_summary_once(
harness.server(),
serde_json::json!({ "output": compacted_history }),
&summary_with_prefix("REMOTE_MID_TURN_SUMMARY"),
)
.await;
@@ -1599,6 +1717,196 @@ async fn snapshot_request_shape_remote_mid_turn_continuation_compaction() -> Res
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn snapshot_request_shape_remote_mid_turn_compaction_summary_only_reinjects_context()
-> Result<()> {
skip_if_no_network!(Ok(()));
let harness = TestCodexHarness::with_builder(
test_codex()
.with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing())
.with_config(|config| {
config.model_auto_compact_token_limit = Some(200);
}),
)
.await?;
let codex = harness.test().codex.clone();
let initial_turn_request_mock = responses::mount_sse_once(
harness.server(),
responses::sse(vec![
responses::ev_function_call("call-remote-summary-only", DUMMY_FUNCTION_NAME, "{}"),
responses::ev_completed_with_tokens("r1", 500),
]),
)
.await;
let post_compact_turn_request_mock = responses::mount_sse_once(
harness.server(),
responses::sse(vec![
responses::ev_assistant_message("m2", "REMOTE_SUMMARY_ONLY_FINAL_REPLY"),
responses::ev_completed_with_tokens("r2", 80),
]),
)
.await;
let compacted_history = vec![responses::user_message_item(&summary_with_prefix(
"REMOTE_SUMMARY_ONLY",
))];
let compact_mock = responses::mount_compact_json_once(
harness.server(),
serde_json::json!({ "output": compacted_history }),
)
.await;
codex
.submit(Op::UserInput {
items: vec![UserInput::Text {
text: "USER_ONE".to_string(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
})
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
assert_eq!(compact_mock.requests().len(), 1);
assert_eq!(
initial_turn_request_mock.requests().len(),
1,
"expected initial turn request"
);
assert_eq!(
post_compact_turn_request_mock.requests().len(),
1,
"expected post-compaction request"
);
let compact_request = compact_mock.single_request();
let post_compact_turn_request = post_compact_turn_request_mock.single_request();
insta::assert_snapshot!(
"remote_mid_turn_compaction_summary_only_reinjects_context_shapes",
format_labeled_requests_snapshot(
"Remote mid-turn compaction where compact output has only summary user content: continuation layout reinjects canonical context before that summary.",
&[
("Remote Compaction Request", &compact_request),
(
"Remote Post-Compaction History Layout",
&post_compact_turn_request
),
]
)
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn snapshot_request_shape_remote_mid_turn_compaction_multi_summary_reinjects_above_last_summary()
-> Result<()> {
skip_if_no_network!(Ok(()));
let harness = TestCodexHarness::with_builder(
test_codex()
.with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing())
.with_config(|config| {
config.model_auto_compact_token_limit = Some(200);
}),
)
.await?;
let codex = harness.test().codex.clone();
let setup_turn_request_mock = responses::mount_sse_once(
harness.server(),
responses::sse(vec![
responses::ev_assistant_message("setup", "REMOTE_SETUP_REPLY"),
responses::ev_completed_with_tokens("setup-response", 60),
]),
)
.await;
let second_turn_request_mock = responses::mount_sse_once(
harness.server(),
responses::sse(vec![
responses::ev_shell_command_call("call-remote-multi-summary", "echo multi-summary"),
responses::ev_completed_with_tokens("r1", 1_000),
]),
)
.await;
let compact_mock = responses::mount_compact_user_history_with_summary_sequence(
harness.server(),
vec![
summary_with_prefix("REMOTE_OLDER_SUMMARY"),
summary_with_prefix("REMOTE_LATEST_SUMMARY"),
],
)
.await;
codex
.submit(Op::UserInput {
items: vec![UserInput::Text {
text: "USER_ONE".to_string(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
})
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
codex.submit(Op::Compact).await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
codex
.submit(Op::UserInput {
items: vec![UserInput::Text {
text: "USER_TWO".to_string(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
})
.await?;
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
assert_eq!(compact_mock.requests().len(), 2);
assert_eq!(
setup_turn_request_mock.requests().len(),
1,
"expected setup turn request"
);
assert_eq!(
second_turn_request_mock.requests().len(),
1,
"expected second-turn pre-compaction request"
);
let compact_requests = compact_mock.requests();
assert_eq!(
compact_requests.len(),
2,
"expected one setup compact and one mid-turn compact request"
);
let compact_request = compact_requests[1].clone();
let second_turn_request = second_turn_request_mock.single_request();
assert!(
compact_request.body_contains_text("REMOTE_OLDER_SUMMARY"),
"older summary should round-trip from conversation history into the next compact request"
);
insta::assert_snapshot!(
"remote_mid_turn_compaction_multi_summary_reinjects_above_last_summary_shapes",
format_labeled_requests_snapshot(
"Remote mid-turn compaction after an earlier summary compaction: the older summary remains in model-visible history and round-trips into the next compact request.",
&[
(
"Second Turn Request (Before Mid-Turn Compaction)",
&second_turn_request
),
("Remote Compaction Request", &compact_request),
]
)
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
// TODO(ccunningham): Update once manual remote /compact with no prior user turn becomes a no-op.
async fn snapshot_request_shape_remote_manual_compact_without_previous_user_messages() -> Result<()>

View File

@@ -0,0 +1,32 @@
---
source: core/tests/suite/compact.rs
assertion_line: 3152
expression: "format_labeled_requests_snapshot(\"Pre-turn compaction during model switch (without pre-sampling model-switch compaction): current behavior strips incoming <model_switch> from the compact request and restores it in the post-compaction follow-up request.\",\n&[(\"Initial Request (Previous Model)\", &requests[0]),\n(\"Local Compaction Request\", &requests[1]),\n(\"Local Post-Compaction History Layout\", &requests[2]),])"
---
Scenario: Pre-turn compaction during model switch (without pre-sampling model-switch compaction): current behavior strips incoming <model_switch> from the compact request and restores it in the post-compaction follow-up request.
## Initial Request (Previous Model)
00:message/developer:<PERMISSIONS_INSTRUCTIONS>
01:message/user:<AGENTS_MD>
02:message/user:<ENVIRONMENT_CONTEXT:cwd=<CWD>>
03:message/developer:<PERMISSIONS_INSTRUCTIONS>
04:message/user:BEFORE_SWITCH_USER
## Local Compaction Request
00:message/developer:<PERMISSIONS_INSTRUCTIONS>
01:message/user:<AGENTS_MD>
02:message/user:<ENVIRONMENT_CONTEXT:cwd=<CWD>>
03:message/developer:<PERMISSIONS_INSTRUCTIONS>
04:message/user:BEFORE_SWITCH_USER
05:message/assistant:BEFORE_SWITCH_REPLY
06:message/user:<SUMMARIZATION_PROMPT>
## Local Post-Compaction History Layout
00:message/developer:<PERMISSIONS_INSTRUCTIONS>
01:message/developer:<personality_spec> The user has requested a new communication st...
02:message/user:<AGENTS_MD>
03:message/user:<ENVIRONMENT_CONTEXT:cwd=<CWD>>
04:message/user:BEFORE_SWITCH_USER
05:message/user:<COMPACTION_SUMMARY>\nPRETURN_SWITCH_SUMMARY
06:message/developer:<model_switch>\nThe user was previously using a different model....
07:message/user:AFTER_SWITCH_USER

View File

@@ -0,0 +1,21 @@
---
source: core/tests/suite/compact_remote.rs
expression: "format_labeled_requests_snapshot(\"Remote mid-turn compaction after an earlier summary compaction: the older summary remains in model-visible history and round-trips into the next compact request.\",\n&[(\"Second Turn Request (Before Mid-Turn Compaction)\", &requests[1]),\n(\"Remote Compaction Request\", &compact_request),])"
---
Scenario: Remote mid-turn compaction after an earlier summary compaction: the older summary remains in model-visible history and round-trips into the next compact request.
## Second Turn Request (Before Mid-Turn Compaction)
00:message/user:USER_ONE
01:message/user:<COMPACTION_SUMMARY>\nREMOTE_OLDER_SUMMARY
02:message/developer:<PERMISSIONS_INSTRUCTIONS>
03:message/user:<AGENTS_MD>
04:message/user:<ENVIRONMENT_CONTEXT:cwd=<CWD>>
05:message/user:<COMPACTION_SUMMARY>\nREMOTE_LATEST_SUMMARY
06:message/user:USER_TWO
## Remote Compaction Request
00:message/user:USER_ONE
01:message/developer:<PERMISSIONS_INSTRUCTIONS>
02:message/user:<AGENTS_MD>
03:message/user:<ENVIRONMENT_CONTEXT:cwd=<CWD>>
04:message/user:<COMPACTION_SUMMARY>\nREMOTE_OLDER_SUMMARY

View File

@@ -0,0 +1,19 @@
---
source: core/tests/suite/compact_remote.rs
expression: "format_labeled_requests_snapshot(\"Remote mid-turn compaction where compact output has only summary user content: continuation layout reinjects canonical context before that summary.\",\n&[(\"Remote Compaction Request\", &compact_request),\n(\"Remote Post-Compaction History Layout\", &requests[1]),])"
---
Scenario: Remote mid-turn compaction where compact output has only summary user content: continuation layout reinjects canonical context before that summary.
## Remote Compaction Request
00:message/developer:<PERMISSIONS_INSTRUCTIONS>
01:message/user:<AGENTS_MD>
02:message/user:<ENVIRONMENT_CONTEXT:cwd=<CWD>>
03:message/user:USER_ONE
04:function_call/test_tool
05:function_call_output:unsupported call: test_tool
## Remote Post-Compaction History Layout
00:message/developer:<PERMISSIONS_INSTRUCTIONS>
01:message/user:<AGENTS_MD>
02:message/user:<ENVIRONMENT_CONTEXT:cwd=<CWD>>
03:message/user:<COMPACTION_SUMMARY>\nREMOTE_SUMMARY_ONLY

View File

@@ -0,0 +1,28 @@
---
source: core/tests/suite/compact_remote.rs
expression: "format_labeled_requests_snapshot(\"Remote pre-turn compaction during model switch currently excludes incoming user input, strips incoming <model_switch> from the compact request payload, and restores it in the post-compaction follow-up request.\",\n&[(\"Initial Request (Previous Model)\", &requests[0]),\n(\"Remote Compaction Request\", &compact_request),\n(\"Remote Post-Compaction History Layout\", &requests[1]),])"
---
Scenario: Remote pre-turn compaction during model switch currently excludes incoming user input, strips incoming <model_switch> from the compact request payload, and restores it in the post-compaction follow-up request.
## Initial Request (Previous Model)
00:message/developer:<PERMISSIONS_INSTRUCTIONS>
01:message/user:<AGENTS_MD>
02:message/user:<ENVIRONMENT_CONTEXT:cwd=<CWD>>
03:message/user:BEFORE_SWITCH_USER
## Remote Compaction Request
00:message/developer:<PERMISSIONS_INSTRUCTIONS>
01:message/user:<AGENTS_MD>
02:message/user:<ENVIRONMENT_CONTEXT:cwd=<CWD>>
03:message/user:BEFORE_SWITCH_USER
04:message/assistant:BEFORE_SWITCH_REPLY
## Remote Post-Compaction History Layout
00:message/user:BEFORE_SWITCH_USER
01:message/developer:<PERMISSIONS_INSTRUCTIONS>
02:message/developer:<personality_spec> The user has requested a new communication st...
03:message/user:<AGENTS_MD>
04:message/user:<ENVIRONMENT_CONTEXT:cwd=<CWD>>
05:message/user:<COMPACTION_SUMMARY>\nREMOTE_SWITCH_SUMMARY
06:message/developer:<model_switch>\nThe user was previously using a different model....
07:message/user:AFTER_SWITCH_USER