Compare commits

...

5 Commits

Author SHA1 Message Date
alexsong-oai
48300ac211 Release 0.117.0-alpha.12 2026-03-23 19:13:58 -07:00
alexsong-oai
db8bb7236d Add plugin-creator as system skill (#15554) 2026-03-23 19:08:30 -07:00
Charley Cunningham
f547b79bd0 Add fork snapshot modes (#15239)
## Summary
- add `ForkSnapshotMode` to `ThreadManager::fork_thread` so callers can
request either a committed snapshot or an interrupted snapshot
- share the model-visible `<turn_aborted>` history marker between the
live interrupt path and interrupted forks
- update the small set of direct fork callsites to pass
`ForkSnapshotMode::Committed`

Note: this enables /btw to work similarly as Esc to interrupt (hopefully
somewhat in distribution)

---------

Co-authored-by: Codex <noreply@openai.com>
2026-03-23 19:05:42 -07:00
Michael Bolin
84fb180eeb fix: build PATH env var using OsString instead of String (#15360) 2026-03-23 18:59:04 -07:00
jif-oai
527244910f feat: custom watcher for multi-agent v2 (#15576)
The new wait tool just returns `Wait timed out.` or `Wait completed.`.
The actual content is done through the notification watcher
2026-03-23 23:27:55 +00:00
27 changed files with 1570 additions and 117 deletions

View File

@@ -78,7 +78,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.0.0"
version = "0.117.0-alpha.12"
# Track the edition for all workspace crates in one place. Individual
# crates can still override this value, but keeping it here means new
# crates created with `cargo new -w ...` automatically inherit the 2024

View File

@@ -74,6 +74,8 @@ pub struct ThreadHistoryBuilder {
turns: Vec<Turn>,
current_turn: Option<PendingTurn>,
next_item_index: i64,
current_rollout_index: usize,
next_rollout_index: usize,
}
impl Default for ThreadHistoryBuilder {
@@ -88,6 +90,8 @@ impl ThreadHistoryBuilder {
turns: Vec::new(),
current_turn: None,
next_item_index: 1,
current_rollout_index: 0,
next_rollout_index: 0,
}
}
@@ -111,6 +115,19 @@ impl ThreadHistoryBuilder {
self.current_turn.is_some()
}
pub fn active_turn_id_if_explicit(&self) -> Option<String> {
self.current_turn
.as_ref()
.filter(|turn| turn.opened_explicitly)
.map(|turn| turn.id.clone())
}
pub fn active_turn_start_index(&self) -> Option<usize> {
self.current_turn
.as_ref()
.map(|turn| turn.rollout_start_index)
}
/// Shared reducer for persisted rollout replay and in-memory current-turn
/// tracking used by running thread resume/rejoin.
///
@@ -182,6 +199,8 @@ impl ThreadHistoryBuilder {
}
pub fn handle_rollout_item(&mut self, item: &RolloutItem) {
self.current_rollout_index = self.next_rollout_index;
self.next_rollout_index += 1;
match item {
RolloutItem::EventMsg(event) => self.handle_event(event),
RolloutItem::Compacted(payload) => self.handle_compacted(payload),
@@ -974,6 +993,7 @@ impl ThreadHistoryBuilder {
status: TurnStatus::Completed,
opened_explicitly: false,
saw_compaction: false,
rollout_start_index: self.current_rollout_index,
}
}
@@ -1137,6 +1157,8 @@ struct PendingTurn {
/// True when this turn includes a persisted `RolloutItem::Compacted`, which
/// should keep the turn from being dropped even without normal items.
saw_compaction: bool,
/// Index of the rollout item that opened this turn during replay.
rollout_start_index: usize,
}
impl PendingTurn {

View File

@@ -125,7 +125,7 @@ Example with notification opt-out:
- `thread/start` — create a new thread; emits `thread/started` (including the current `thread.status`) and auto-subscribes you to turn/item events for that thread.
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it.
- `thread/fork` — fork an existing thread into a new thread id by copying the stored history; accepts `ephemeral: true` for an in-memory temporary fork, emits `thread/started` (including the current `thread.status`), and auto-subscribes you to turn/item events for the new thread.
- `thread/fork` — fork an existing thread into a new thread id by copying the stored history; if the source thread is currently mid-turn, the fork records the same interruption marker as `turn/interrupt` instead of inheriting an unmarked partial turn suffix. Accepts `ephemeral: true` for an in-memory temporary fork, emits `thread/started` (including the current `thread.status`), and auto-subscribes you to turn/item events for the new thread.
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders`, `sourceKinds`, `archived`, `cwd`, and `searchTerm` filters. Each returned `thread` includes `status` (`ThreadStatus`), defaulting to `notLoaded` when the thread is not currently loaded.
- `thread/loaded/list` — list the thread ids currently loaded in memory.
- `thread/read` — read a stored thread by id without resuming it; optionally include turns via `includeTurns`. The returned `thread` includes `status` (`ThreadStatus`), defaulting to `notLoaded` when the thread is not currently loaded.
@@ -240,7 +240,7 @@ Example:
{ "id": 11, "result": { "thread": { "id": "thr_123", } } }
```
To branch from a stored session, call `thread/fork` with the `thread.id`. This creates a new thread id and emits a `thread/started` notification for it. Pass `ephemeral: true` when the fork should stay in-memory only:
To branch from a stored session, call `thread/fork` with the `thread.id`. This creates a new thread id and emits a `thread/started` notification for it. If the source thread is actively running, the fork snapshots it as if the current turn had been interrupted first. Pass `ephemeral: true` when the fork should stay in-memory only:
```json
{ "method": "thread/fork", "id": 12, "params": { "threadId": "thr_123", "ephemeral": true } }

View File

@@ -183,6 +183,7 @@ use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::CodexThread;
use codex_core::Cursor as RolloutCursor;
use codex_core::ForkSnapshot;
use codex_core::NewThread;
use codex_core::RolloutRecorder;
use codex_core::SessionMeta;
@@ -4039,7 +4040,7 @@ impl CodexMessageProcessor {
} = match self
.thread_manager
.fork_thread(
usize::MAX,
ForkSnapshot::Interrupted,
config,
rollout_path.clone(),
persist_extended_history,
@@ -6508,7 +6509,7 @@ impl CodexMessageProcessor {
} = self
.thread_manager
.fork_thread(
usize::MAX,
ForkSnapshot::Interrupted,
config,
rollout_path,
/*persist_extended_history*/ false,

View File

@@ -123,7 +123,7 @@ async fn thread_fork_creates_new_thread_and_emits_started() -> Result<()> {
"expected forked thread to include one turn"
);
let turn = &thread.turns[0];
assert_eq!(turn.status, TurnStatus::Completed);
assert_eq!(turn.status, TurnStatus::Interrupted);
assert_eq!(turn.items.len(), 1, "expected user message item");
match &turn.items[0] {
ThreadItem::UserMessage { content, .. } => {

View File

@@ -309,14 +309,16 @@ pub fn prepend_path_entry_for_codex_aliases() -> std::io::Result<Arg0PathEntryGu
#[cfg(windows)]
const PATH_SEPARATOR: &str = ";";
let path_element = path.display();
let updated_path_env_var = match std::env::var("PATH") {
Ok(existing_path) => {
format!("{path_element}{PATH_SEPARATOR}{existing_path}")
}
Err(_) => {
format!("{path_element}")
let updated_path_env_var = match std::env::var_os("PATH") {
Some(existing_path) => {
let mut path_env_var =
std::ffi::OsString::with_capacity(path.as_os_str().len() + 1 + existing_path.len());
path_env_var.push(path);
path_env_var.push(PATH_SEPARATOR);
path_env_var.push(existing_path);
path_env_var
}
None => path.as_os_str().to_owned(),
};
unsafe {

View File

@@ -97,6 +97,7 @@ mod seatbelt_permissions;
mod thread_manager;
pub mod web_search;
pub mod windows_sandbox_read_grants;
pub use thread_manager::ForkSnapshot;
pub use thread_manager::NewThread;
pub use thread_manager::ThreadManager;
#[deprecated(note = "use ThreadManager")]

View File

@@ -46,8 +46,8 @@ pub(crate) fn user_message_positions_in_rollout(items: &[RolloutItem]) -> Vec<us
/// a prefix that excludes the first user message and everything after it).
///
/// If `n_from_start` is `usize::MAX`, this returns the full rollout (no truncation).
/// If fewer than or equal to `n_from_start` user messages exist, this returns an empty
/// vector (out of range).
/// If fewer than or equal to `n_from_start` user messages exist, this returns the full
/// rollout unchanged.
pub(crate) fn truncate_rollout_before_nth_user_message_from_start(
items: &[RolloutItem],
n_from_start: usize,
@@ -58,9 +58,9 @@ pub(crate) fn truncate_rollout_before_nth_user_message_from_start(
let user_positions = user_message_positions_in_rollout(items);
// If fewer than or equal to n user messages exist, treat as empty (out of range).
// If fewer than or equal to n user messages exist, keep the full rollout.
if user_positions.len() <= n_from_start {
return Vec::new();
return items.to_vec();
}
// Cut strictly before the nth user message (do not keep the nth itself).

View File

@@ -1,6 +1,5 @@
use super::*;
use crate::codex::make_session_and_context;
use assert_matches::assert_matches;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ReasoningItemReasoningSummary;
use codex_protocol::protocol::ThreadRolledBackEvent;
@@ -74,7 +73,10 @@ fn truncates_rollout_from_start_before_nth_user_only() {
);
let truncated2 = truncate_rollout_before_nth_user_message_from_start(&rollout, 2);
assert_matches!(truncated2.as_slice(), []);
assert_eq!(
serde_json::to_value(&truncated2).unwrap(),
serde_json::to_value(&rollout).unwrap()
);
}
#[test]

View File

@@ -22,6 +22,7 @@ use tracing::warn;
use crate::AuthManager;
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::contextual_user_message::TURN_ABORTED_CLOSE_TAG;
use crate::contextual_user_message::TURN_ABORTED_OPEN_TAG;
use crate::hook_runtime::PendingInputHookDisposition;
use crate::hook_runtime::inspect_pending_input;
@@ -60,6 +61,22 @@ pub(crate) use user_shell::execute_user_shell_command;
const GRACEFULL_INTERRUPTION_TIMEOUT_MS: u64 = 100;
const TURN_ABORTED_INTERRUPTED_GUIDANCE: &str = "The user interrupted the previous turn on purpose. Any running unified exec processes may still be running in the background. If any tools/commands were aborted, they may have partially executed; verify current state before retrying.";
/// Shared model-visible marker used by both the real interrupt path and
/// interrupted fork snapshots.
pub(crate) fn interrupted_turn_history_marker() -> ResponseItem {
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: format!(
"{TURN_ABORTED_OPEN_TAG}\n{TURN_ABORTED_INTERRUPTED_GUIDANCE}\n{TURN_ABORTED_CLOSE_TAG}"
),
}],
end_turn: None,
phase: None,
}
}
fn emit_turn_network_proxy_metric(
session_telemetry: &SessionTelemetry,
network_proxy_active: bool,
@@ -457,17 +474,7 @@ impl Session {
if reason == TurnAbortReason::Interrupted {
self.cleanup_after_interrupt(&task.turn_context).await;
let marker = ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: format!(
"{TURN_ABORTED_OPEN_TAG}\n{TURN_ABORTED_INTERRUPTED_GUIDANCE}\n</turn_aborted>"
),
}],
end_turn: None,
phase: None,
};
let marker = interrupted_turn_history_marker();
self.record_into_history(std::slice::from_ref(&marker), task.turn_context.as_ref())
.await;
self.persist_rollout_items(&[RolloutItem::ResponseItem(marker)])

View File

@@ -24,6 +24,9 @@ use crate::rollout::RolloutRecorder;
use crate::rollout::truncation;
use crate::shell_snapshot::ShellSnapshot;
use crate::skills::SkillsManager;
use crate::tasks::interrupted_turn_history_marker;
use codex_app_server_protocol::ThreadHistoryBuilder;
use codex_app_server_protocol::TurnStatus;
use codex_protocol::ThreadId;
use codex_protocol::config_types::CollaborationModeMask;
#[cfg(test)]
@@ -34,6 +37,8 @@ use codex_protocol::protocol::McpServerRefreshConfig;
use codex_protocol::protocol::Op;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::TurnAbortReason;
use codex_protocol::protocol::TurnAbortedEvent;
use codex_protocol::protocol::W3cTraceContext;
use futures::StreamExt;
use futures::stream::FuturesUnordered;
@@ -126,6 +131,45 @@ pub struct NewThread {
pub session_configured: SessionConfiguredEvent,
}
// TODO(ccunningham): Add an explicit non-interrupting live-turn snapshot once
// core can represent sampling boundaries directly instead of relying on
// whichever items happened to be persisted mid-turn.
//
// Two likely future variants:
// - `TruncateToLastSamplingBoundary` for callers that want a coherent fork from
// the last stable model boundary without synthesizing an interrupt.
// - `WaitUntilNextSamplingBoundary` (or similar) for callers that prefer to
// fork after the next sampling boundary rather than interrupting immediately.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ForkSnapshot {
/// Fork a committed prefix ending strictly before the nth user message.
///
/// When `n` is within range, this cuts before that 0-based user-message
/// boundary. When `n` is out of range and the source thread is currently
/// mid-turn, this instead cuts before the active turn's opening boundary
/// so the fork drops the unfinished turn suffix. When `n` is out of range
/// and the source thread is already at a turn boundary, this returns the
/// full committed history unchanged.
TruncateBeforeNthUserMessage(usize),
/// Fork the current persisted history as if the source thread had been
/// interrupted now.
///
/// If the persisted snapshot ends mid-turn, this appends the same
/// `<turn_aborted>` marker produced by a real interrupt. If the snapshot is
/// already at a turn boundary, this returns the current persisted history
/// unchanged.
Interrupted,
}
/// Preserve legacy `fork_thread(usize, ...)` callsites by mapping them to the
/// existing truncate-before-nth-user-message snapshot mode.
impl From<usize> for ForkSnapshot {
fn from(value: usize) -> Self {
Self::TruncateBeforeNthUserMessage(value)
}
}
#[derive(Debug, Default, PartialEq, Eq)]
pub struct ThreadShutdownReport {
pub completed: Vec<ThreadId>,
@@ -541,20 +585,41 @@ impl ThreadManager {
report
}
/// Fork an existing thread by taking messages up to the given position (not including
/// the message at the given position) and starting a new thread with identical
/// configuration (unless overridden by the caller's `config`). The new thread will have
/// a fresh id. Pass `usize::MAX` to keep the full rollout history.
pub async fn fork_thread(
/// Fork an existing thread by snapshotting rollout history according to
/// `snapshot` and starting a new thread with identical configuration
/// (unless overridden by the caller's `config`). The new thread will have
/// a fresh id.
pub async fn fork_thread<S>(
&self,
nth_user_message: usize,
snapshot: S,
config: Config,
path: PathBuf,
persist_extended_history: bool,
parent_trace: Option<W3cTraceContext>,
) -> CodexResult<NewThread> {
) -> CodexResult<NewThread>
where
S: Into<ForkSnapshot>,
{
let snapshot = snapshot.into();
let history = RolloutRecorder::get_rollout_history(&path).await?;
let history = truncate_before_nth_user_message(history, nth_user_message);
let snapshot_state = snapshot_turn_state(&history);
let history = match snapshot {
ForkSnapshot::TruncateBeforeNthUserMessage(nth_user_message) => {
truncate_before_nth_user_message(history, nth_user_message, &snapshot_state)
}
ForkSnapshot::Interrupted => {
let history = match history {
InitialHistory::New => InitialHistory::New,
InitialHistory::Forked(history) => InitialHistory::Forked(history),
InitialHistory::Resumed(resumed) => InitialHistory::Forked(resumed.history),
};
if snapshot_state.ends_mid_turn {
append_interrupted_boundary(history, snapshot_state.active_turn_id)
} else {
history
}
}
};
Box::pin(self.state.spawn_thread(
config,
history,
@@ -838,11 +903,31 @@ impl ThreadManagerState {
}
}
/// Return a prefix of `items` obtained by cutting strictly before the nth user message
/// (0-based) and all items that follow it.
fn truncate_before_nth_user_message(history: InitialHistory, n: usize) -> InitialHistory {
/// Return a fork snapshot cut strictly before the nth user message (0-based).
///
/// Out-of-range values keep the full committed history at a turn boundary, but
/// when the source thread is currently mid-turn they fall back to cutting
/// before the active turn's opening boundary so the fork omits the unfinished
/// suffix entirely.
fn truncate_before_nth_user_message(
history: InitialHistory,
n: usize,
snapshot_state: &SnapshotTurnState,
) -> InitialHistory {
let items: Vec<RolloutItem> = history.get_rollout_items();
let rolled = truncation::truncate_rollout_before_nth_user_message_from_start(&items, n);
let user_positions = truncation::user_message_positions_in_rollout(&items);
let rolled = if snapshot_state.ends_mid_turn && n >= user_positions.len() {
if let Some(cut_idx) = snapshot_state
.active_turn_start_index
.or_else(|| user_positions.last().copied())
{
items[..cut_idx].to_vec()
} else {
items
}
} else {
truncation::truncate_rollout_before_nth_user_message_from_start(&items, n)
};
if rolled.is_empty() {
InitialHistory::New
@@ -851,6 +936,95 @@ fn truncate_before_nth_user_message(history: InitialHistory, n: usize) -> Initia
}
}
#[derive(Debug, Eq, PartialEq)]
struct SnapshotTurnState {
ends_mid_turn: bool,
active_turn_id: Option<String>,
active_turn_start_index: Option<usize>,
}
fn snapshot_turn_state(history: &InitialHistory) -> SnapshotTurnState {
let rollout_items = history.get_rollout_items();
let mut builder = ThreadHistoryBuilder::new();
for item in &rollout_items {
builder.handle_rollout_item(item);
}
let active_turn_id = builder.active_turn_id_if_explicit();
if builder.has_active_turn() && active_turn_id.is_some() {
let active_turn_snapshot = builder.active_turn_snapshot();
if active_turn_snapshot
.as_ref()
.is_some_and(|turn| turn.status != TurnStatus::InProgress)
{
return SnapshotTurnState {
ends_mid_turn: false,
active_turn_id: None,
active_turn_start_index: None,
};
}
return SnapshotTurnState {
ends_mid_turn: true,
active_turn_id,
active_turn_start_index: builder.active_turn_start_index(),
};
}
let Some(last_user_position) = truncation::user_message_positions_in_rollout(&rollout_items)
.last()
.copied()
else {
return SnapshotTurnState {
ends_mid_turn: false,
active_turn_id: None,
active_turn_start_index: None,
};
};
// Synthetic fork/resume histories can contain user/assistant response items
// without explicit turn lifecycle events. If the persisted snapshot has no
// terminating boundary after its last user message, treat it as mid-turn.
SnapshotTurnState {
ends_mid_turn: !rollout_items[last_user_position + 1..].iter().any(|item| {
matches!(
item,
RolloutItem::EventMsg(EventMsg::TurnComplete(_) | EventMsg::TurnAborted(_))
)
}),
active_turn_id: None,
active_turn_start_index: None,
}
}
/// Append the same persisted interrupt boundary used by the live interrupt path
/// to an existing fork snapshot after the source thread has been confirmed to
/// be mid-turn.
fn append_interrupted_boundary(history: InitialHistory, turn_id: Option<String>) -> InitialHistory {
let aborted_event = RolloutItem::EventMsg(EventMsg::TurnAborted(TurnAbortedEvent {
turn_id,
reason: TurnAbortReason::Interrupted,
}));
match history {
InitialHistory::New => InitialHistory::Forked(vec![
RolloutItem::ResponseItem(interrupted_turn_history_marker()),
aborted_event,
]),
InitialHistory::Forked(mut history) => {
history.push(RolloutItem::ResponseItem(interrupted_turn_history_marker()));
history.push(aborted_event);
InitialHistory::Forked(history)
}
InitialHistory::Resumed(mut resumed) => {
resumed
.history
.push(RolloutItem::ResponseItem(interrupted_turn_history_marker()));
resumed.history.push(aborted_event);
InitialHistory::Forked(resumed.history)
}
}
}
#[cfg(test)]
#[path = "thread_manager_tests.rs"]
mod tests;

View File

@@ -3,11 +3,15 @@ use crate::codex::make_session_and_context;
use crate::config::test_config;
use crate::models_manager::collaboration_mode_presets::CollaborationModesConfig;
use crate::models_manager::manager::RefreshStrategy;
use assert_matches::assert_matches;
use crate::rollout::RolloutRecorder;
use crate::tasks::interrupted_turn_history_marker;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ReasoningItemReasoningSummary;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::protocol::AgentMessageEvent;
use codex_protocol::protocol::TurnStartedEvent;
use codex_protocol::protocol::UserMessageEvent;
use core_test_support::responses::mount_models_once;
use pretty_assertions::assert_eq;
use std::time::Duration;
@@ -38,7 +42,7 @@ fn assistant_msg(text: &str) -> ResponseItem {
}
#[test]
fn drops_from_last_user_only() {
fn truncates_before_requested_user_message() {
let items = [
user_msg("u1"),
assistant_msg("a1"),
@@ -68,7 +72,15 @@ fn drops_from_last_user_only() {
.cloned()
.map(RolloutItem::ResponseItem)
.collect();
let truncated = truncate_before_nth_user_message(InitialHistory::Forked(initial), 1);
let truncated = truncate_before_nth_user_message(
InitialHistory::Forked(initial),
1,
&SnapshotTurnState {
ends_mid_turn: false,
active_turn_id: None,
active_turn_start_index: None,
},
);
let got_items = truncated.get_rollout_items();
let expected_items = vec![
RolloutItem::ResponseItem(items[0].clone()),
@@ -85,8 +97,99 @@ fn drops_from_last_user_only() {
.cloned()
.map(RolloutItem::ResponseItem)
.collect();
let truncated2 = truncate_before_nth_user_message(InitialHistory::Forked(initial2), 2);
assert_matches!(truncated2, InitialHistory::New);
let truncated2 = truncate_before_nth_user_message(
InitialHistory::Forked(initial2.clone()),
2,
&SnapshotTurnState {
ends_mid_turn: false,
active_turn_id: None,
active_turn_start_index: None,
},
);
assert_eq!(
serde_json::to_value(truncated2.get_rollout_items()).unwrap(),
serde_json::to_value(initial2).unwrap()
);
}
#[test]
fn out_of_range_truncation_drops_only_unfinished_suffix_mid_turn() {
let items = vec![
RolloutItem::ResponseItem(user_msg("u1")),
RolloutItem::ResponseItem(assistant_msg("a1")),
RolloutItem::ResponseItem(user_msg("u2")),
RolloutItem::ResponseItem(assistant_msg("partial")),
];
let truncated = truncate_before_nth_user_message(
InitialHistory::Forked(items.clone()),
usize::MAX,
&SnapshotTurnState {
ends_mid_turn: true,
active_turn_id: None,
active_turn_start_index: None,
},
);
assert_eq!(
serde_json::to_value(truncated.get_rollout_items()).unwrap(),
serde_json::to_value(items[..2].to_vec()).unwrap()
);
}
#[test]
fn fork_thread_accepts_legacy_usize_snapshot_argument() {
fn assert_legacy_snapshot_callsite(
manager: &ThreadManager,
config: Config,
path: std::path::PathBuf,
) {
let _future = manager.fork_thread(
usize::MAX,
config,
path,
/*persist_extended_history*/ false,
/*parent_trace*/ None,
);
}
let _: fn(&ThreadManager, Config, std::path::PathBuf) = assert_legacy_snapshot_callsite;
}
#[test]
fn out_of_range_truncation_drops_pre_user_active_turn_prefix() {
let items = vec![
RolloutItem::ResponseItem(user_msg("u1")),
RolloutItem::ResponseItem(assistant_msg("a1")),
RolloutItem::EventMsg(EventMsg::TurnStarted(TurnStartedEvent {
turn_id: "turn-2".to_string(),
model_context_window: None,
collaboration_mode_kind: Default::default(),
})),
RolloutItem::ResponseItem(user_msg("u2")),
RolloutItem::ResponseItem(assistant_msg("partial")),
];
let snapshot_state = snapshot_turn_state(&InitialHistory::Forked(items.clone()));
assert_eq!(
snapshot_state,
SnapshotTurnState {
ends_mid_turn: true,
active_turn_id: Some("turn-2".to_string()),
active_turn_start_index: Some(2),
},
);
let truncated = truncate_before_nth_user_message(
InitialHistory::Forked(items.clone()),
usize::MAX,
&snapshot_state,
);
assert_eq!(
serde_json::to_value(truncated.get_rollout_items()).unwrap(),
serde_json::to_value(items[..2].to_vec()).unwrap()
);
}
#[tokio::test]
@@ -104,7 +207,15 @@ async fn ignores_session_prefix_messages_when_truncating() {
.map(RolloutItem::ResponseItem)
.collect();
let truncated = truncate_before_nth_user_message(InitialHistory::Forked(rollout_items), 1);
let truncated = truncate_before_nth_user_message(
InitialHistory::Forked(rollout_items),
1,
&SnapshotTurnState {
ends_mid_turn: false,
active_turn_id: None,
active_turn_start_index: None,
},
);
let got_items = truncated.get_rollout_items();
let expected: Vec<RolloutItem> = vec![
@@ -185,3 +296,421 @@ async fn new_uses_configured_openai_provider_for_model_refresh() {
let _ = manager.list_models(RefreshStrategy::Online).await;
assert_eq!(models_mock.requests().len(), 1);
}
#[test]
fn interrupted_fork_snapshot_appends_interrupt_boundary() {
let committed_history =
InitialHistory::Forked(vec![RolloutItem::ResponseItem(user_msg("hello"))]);
assert_eq!(
serde_json::to_value(
append_interrupted_boundary(committed_history, /*turn_id*/ None).get_rollout_items()
)
.expect("serialize interrupted fork history"),
serde_json::to_value(vec![
RolloutItem::ResponseItem(user_msg("hello")),
RolloutItem::ResponseItem(interrupted_turn_history_marker()),
RolloutItem::EventMsg(EventMsg::TurnAborted(TurnAbortedEvent {
turn_id: None,
reason: TurnAbortReason::Interrupted,
})),
])
.expect("serialize expected interrupted fork history"),
);
assert_eq!(
serde_json::to_value(
append_interrupted_boundary(InitialHistory::New, /*turn_id*/ None).get_rollout_items()
)
.expect("serialize interrupted empty fork history"),
serde_json::to_value(vec![
RolloutItem::ResponseItem(interrupted_turn_history_marker()),
RolloutItem::EventMsg(EventMsg::TurnAborted(TurnAbortedEvent {
turn_id: None,
reason: TurnAbortReason::Interrupted,
})),
])
.expect("serialize expected interrupted empty history"),
);
}
#[test]
fn interrupted_snapshot_is_not_mid_turn() {
let interrupted_history = InitialHistory::Forked(vec![
RolloutItem::ResponseItem(user_msg("hello")),
RolloutItem::ResponseItem(assistant_msg("partial")),
RolloutItem::ResponseItem(interrupted_turn_history_marker()),
RolloutItem::EventMsg(EventMsg::TurnAborted(TurnAbortedEvent {
turn_id: Some("turn-1".to_string()),
reason: TurnAbortReason::Interrupted,
})),
]);
assert_eq!(
snapshot_turn_state(&interrupted_history),
SnapshotTurnState {
ends_mid_turn: false,
active_turn_id: None,
active_turn_start_index: None,
},
);
}
#[test]
fn completed_legacy_event_history_is_not_mid_turn() {
let completed_history = InitialHistory::Forked(vec![
RolloutItem::EventMsg(EventMsg::UserMessage(UserMessageEvent {
message: "hello".to_string(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
})),
RolloutItem::EventMsg(EventMsg::AgentMessage(AgentMessageEvent {
message: "done".to_string(),
phase: None,
memory_citation: None,
})),
]);
assert_eq!(
snapshot_turn_state(&completed_history),
SnapshotTurnState {
ends_mid_turn: false,
active_turn_id: None,
active_turn_start_index: None,
},
);
}
#[test]
fn mixed_response_and_legacy_user_event_history_is_mid_turn() {
let mixed_history = InitialHistory::Forked(vec![
RolloutItem::ResponseItem(user_msg("hello")),
RolloutItem::EventMsg(EventMsg::UserMessage(UserMessageEvent {
message: "hello".to_string(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
})),
]);
assert_eq!(
snapshot_turn_state(&mixed_history),
SnapshotTurnState {
ends_mid_turn: true,
active_turn_id: None,
active_turn_start_index: None,
},
);
}
#[tokio::test]
async fn interrupted_fork_snapshot_does_not_synthesize_turn_id_for_legacy_history() {
let temp_dir = tempdir().expect("tempdir");
let mut config = test_config();
config.codex_home = temp_dir.path().join("codex-home");
config.cwd = config.codex_home.clone();
std::fs::create_dir_all(&config.codex_home).expect("create codex home");
let auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
let manager = ThreadManager::new(
&config,
auth_manager.clone(),
SessionSource::Exec,
CollaborationModesConfig::default(),
);
let source = manager
.resume_thread_with_history(
config.clone(),
InitialHistory::Forked(vec![
RolloutItem::ResponseItem(user_msg("hello")),
RolloutItem::ResponseItem(assistant_msg("partial")),
]),
auth_manager,
/*persist_extended_history*/ false,
/*parent_trace*/ None,
)
.await
.expect("create source thread from completed history");
let source_path = source
.thread
.rollout_path()
.expect("source rollout path should exist");
let source_history = RolloutRecorder::get_rollout_history(&source_path)
.await
.expect("read source rollout history");
let source_snapshot_state = snapshot_turn_state(&source_history);
assert!(source_snapshot_state.ends_mid_turn);
let expected_turn_id = source_snapshot_state.active_turn_id.clone();
assert_eq!(expected_turn_id, None);
let forked = manager
.fork_thread(
ForkSnapshot::Interrupted,
config,
source_path,
/*persist_extended_history*/ false,
/*parent_trace*/ None,
)
.await
.expect("fork interrupted snapshot");
let forked_path = forked
.thread
.rollout_path()
.expect("forked rollout path should exist");
let history = RolloutRecorder::get_rollout_history(&forked_path)
.await
.expect("read forked rollout history");
assert!(!snapshot_turn_state(&history).ends_mid_turn);
let rollout_items: Vec<_> = history
.get_rollout_items()
.into_iter()
.filter(|item| !matches!(item, RolloutItem::SessionMeta(_)))
.collect();
let interrupted_marker_json =
serde_json::to_value(RolloutItem::ResponseItem(interrupted_turn_history_marker()))
.expect("serialize interrupted marker");
let interrupted_abort_json = serde_json::to_value(RolloutItem::EventMsg(
EventMsg::TurnAborted(TurnAbortedEvent {
turn_id: expected_turn_id,
reason: TurnAbortReason::Interrupted,
}),
))
.expect("serialize interrupted abort event");
assert_eq!(
rollout_items
.iter()
.filter(|item| {
serde_json::to_value(item).expect("serialize rollout item")
== interrupted_marker_json
})
.count(),
1,
);
assert_eq!(
rollout_items
.iter()
.filter(|item| {
serde_json::to_value(item).expect("serialize rollout item")
== interrupted_abort_json
})
.count(),
1,
);
}
#[tokio::test]
async fn interrupted_fork_snapshot_preserves_explicit_turn_id() {
let temp_dir = tempdir().expect("tempdir");
let mut config = test_config();
config.codex_home = temp_dir.path().join("codex-home");
config.cwd = config.codex_home.clone();
std::fs::create_dir_all(&config.codex_home).expect("create codex home");
let auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
let manager = ThreadManager::new(
&config,
auth_manager.clone(),
SessionSource::Exec,
CollaborationModesConfig::default(),
);
let source = manager
.resume_thread_with_history(
config.clone(),
InitialHistory::Forked(vec![
RolloutItem::EventMsg(EventMsg::TurnStarted(TurnStartedEvent {
turn_id: "turn-explicit".to_string(),
model_context_window: None,
collaboration_mode_kind: Default::default(),
})),
RolloutItem::ResponseItem(user_msg("hello")),
RolloutItem::ResponseItem(assistant_msg("partial")),
]),
auth_manager,
/*persist_extended_history*/ false,
/*parent_trace*/ None,
)
.await
.expect("create source thread from explicit partial history");
let source_path = source
.thread
.rollout_path()
.expect("source rollout path should exist");
let source_history = RolloutRecorder::get_rollout_history(&source_path)
.await
.expect("read source rollout history");
let source_snapshot_state = snapshot_turn_state(&source_history);
assert_eq!(
source_snapshot_state,
SnapshotTurnState {
ends_mid_turn: true,
active_turn_id: Some("turn-explicit".to_string()),
active_turn_start_index: Some(1),
},
);
let forked = manager
.fork_thread(
ForkSnapshot::Interrupted,
config,
source_path,
/*persist_extended_history*/ false,
/*parent_trace*/ None,
)
.await
.expect("fork interrupted snapshot");
let forked_path = forked
.thread
.rollout_path()
.expect("forked rollout path should exist");
let history = RolloutRecorder::get_rollout_history(&forked_path)
.await
.expect("read forked rollout history");
let rollout_items: Vec<_> = history
.get_rollout_items()
.into_iter()
.filter(|item| !matches!(item, RolloutItem::SessionMeta(_)))
.collect();
assert!(rollout_items.iter().any(|item| {
matches!(
item,
RolloutItem::EventMsg(EventMsg::TurnAborted(TurnAbortedEvent {
turn_id: Some(turn_id),
reason: TurnAbortReason::Interrupted,
})) if turn_id == "turn-explicit"
)
}));
}
#[tokio::test]
async fn interrupted_fork_snapshot_uses_persisted_mid_turn_history_without_live_source() {
let temp_dir = tempdir().expect("tempdir");
let mut config = test_config();
config.codex_home = temp_dir.path().join("codex-home");
config.cwd = config.codex_home.clone();
std::fs::create_dir_all(&config.codex_home).expect("create codex home");
let auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
let manager = ThreadManager::new(
&config,
auth_manager.clone(),
SessionSource::Exec,
CollaborationModesConfig::default(),
);
let source = manager
.resume_thread_with_history(
config.clone(),
InitialHistory::Forked(vec![
RolloutItem::ResponseItem(user_msg("hello")),
RolloutItem::ResponseItem(assistant_msg("partial")),
]),
auth_manager,
/*persist_extended_history*/ false,
/*parent_trace*/ None,
)
.await
.expect("create source thread from partial history");
let source_path = source
.thread
.rollout_path()
.expect("source rollout path should exist");
let source_history = RolloutRecorder::get_rollout_history(&source_path)
.await
.expect("read source rollout history");
assert!(snapshot_turn_state(&source_history).ends_mid_turn);
manager.remove_thread(&source.thread_id).await;
let forked = manager
.fork_thread(
ForkSnapshot::Interrupted,
config.clone(),
source_path,
/*persist_extended_history*/ false,
/*parent_trace*/ None,
)
.await
.expect("fork interrupted snapshot");
let forked_path = forked
.thread
.rollout_path()
.expect("forked rollout path should exist");
let history = RolloutRecorder::get_rollout_history(&forked_path)
.await
.expect("read forked rollout history");
assert!(!snapshot_turn_state(&history).ends_mid_turn);
let forked_rollout_items: Vec<_> = history
.get_rollout_items()
.into_iter()
.filter(|item| !matches!(item, RolloutItem::SessionMeta(_)))
.collect();
let interrupted_marker_json =
serde_json::to_value(RolloutItem::ResponseItem(interrupted_turn_history_marker()))
.expect("serialize interrupted marker");
assert_eq!(
forked_rollout_items
.iter()
.filter(|item| {
serde_json::to_value(item).expect("serialize forked rollout item")
== interrupted_marker_json
})
.count(),
1,
);
manager.remove_thread(&forked.thread_id).await;
let reforked = manager
.fork_thread(
ForkSnapshot::Interrupted,
config,
forked_path,
/*persist_extended_history*/ false,
/*parent_trace*/ None,
)
.await
.expect("re-fork interrupted snapshot");
let reforked_path = reforked
.thread
.rollout_path()
.expect("re-forked rollout path should exist");
let reforked_history = RolloutRecorder::get_rollout_history(&reforked_path)
.await
.expect("read re-forked rollout history");
let reforked_rollout_items: Vec<_> = reforked_history
.get_rollout_items()
.into_iter()
.filter(|item| !matches!(item, RolloutItem::SessionMeta(_)))
.collect();
assert_eq!(
reforked_rollout_items
.iter()
.filter(|item| {
serde_json::to_value(item).expect("serialize re-forked rollout item")
== interrupted_marker_json
})
.count(),
1,
);
assert_eq!(
reforked_rollout_items
.iter()
.filter(|item| {
matches!(
item,
RolloutItem::EventMsg(EventMsg::TurnAborted(TurnAbortedEvent {
reason: TurnAbortReason::Interrupted,
..
}))
)
})
.count(),
1,
);
}

View File

@@ -9,12 +9,14 @@ use crate::config::types::ShellEnvironmentPolicy;
use crate::function_tool::FunctionCallError;
use crate::protocol::AgentStatus;
use crate::protocol::AskForApproval;
use crate::protocol::EventMsg;
use crate::protocol::FileSystemSandboxPolicy;
use crate::protocol::NetworkSandboxPolicy;
use crate::protocol::Op;
use crate::protocol::SandboxPolicy;
use crate::protocol::SessionSource;
use crate::protocol::SubAgentSource;
use crate::protocol::TurnCompleteEvent;
use crate::state::TaskKind;
use crate::tasks::SessionTask;
use crate::tasks::SessionTaskContext;
@@ -1414,7 +1416,7 @@ async fn multi_agent_v2_wait_agent_accepts_targets_argument() {
assert_eq!(
result,
crate::tools::handlers::multi_agents_v2::wait::WaitAgentResult {
status: HashMap::from([(target, AgentStatus::NotFound)]),
message: "Wait completed.".to_string(),
timed_out: false,
}
);
@@ -1582,12 +1584,7 @@ async fn wait_agent_returns_final_status_without_timeout() {
}
#[tokio::test]
async fn multi_agent_v2_wait_agent_returns_statuses_keyed_by_path() {
#[derive(Debug, Deserialize)]
struct SpawnAgentResult {
task_name: String,
}
async fn multi_agent_v2_wait_agent_returns_summary_for_named_targets() {
let (mut session, mut turn) = make_session_and_context().await;
let manager = thread_manager();
let root = manager
@@ -1617,9 +1614,7 @@ async fn multi_agent_v2_wait_agent_returns_statuses_keyed_by_path() {
))
.await
.expect("spawn_agent should succeed");
let (content, _) = expect_text_output(spawn_output);
let spawn_result: SpawnAgentResult =
serde_json::from_str(&content).expect("spawn result should parse");
let _ = expect_text_output(spawn_output);
let agent_id = session
.services
@@ -1667,13 +1662,67 @@ async fn multi_agent_v2_wait_agent_returns_statuses_keyed_by_path() {
assert_eq!(
result,
crate::tools::handlers::multi_agents_v2::wait::WaitAgentResult {
status: HashMap::from([(spawn_result.task_name, AgentStatus::Shutdown)]),
message: "Wait completed.".to_string(),
timed_out: false,
}
);
assert_eq!(success, None);
}
#[tokio::test]
async fn multi_agent_v2_wait_agent_does_not_return_completed_content() {
let (mut session, mut turn) = make_session_and_context().await;
let manager = thread_manager();
session.services.agent_control = manager.agent_control();
let mut config = (*turn.config).clone();
config
.features
.enable(Feature::MultiAgentV2)
.expect("test config should allow feature update");
turn.config = Arc::new(config.clone());
let thread = manager.start_thread(config).await.expect("start thread");
let agent_id = thread.thread_id;
let child_turn = thread.thread.codex.session.new_default_turn().await;
thread
.thread
.codex
.session
.send_event(
child_turn.as_ref(),
EventMsg::TurnComplete(TurnCompleteEvent {
turn_id: child_turn.sub_id.clone(),
last_agent_message: Some("sensitive child output".to_string()),
}),
)
.await;
let output = WaitAgentHandlerV2
.handle(invocation(
Arc::new(session),
Arc::new(turn),
"wait_agent",
function_payload(json!({
"targets": [agent_id.to_string()],
"timeout_ms": 1000
})),
))
.await
.expect("wait_agent should succeed");
let (content, success) = expect_text_output(output);
let result: crate::tools::handlers::multi_agents_v2::wait::WaitAgentResult =
serde_json::from_str(&content).expect("wait_agent result should be json");
assert_eq!(
result,
crate::tools::handlers::multi_agents_v2::wait::WaitAgentResult {
message: "Wait completed.".to_string(),
timed_out: false,
}
);
assert!(!content.contains("sensitive child output"));
assert_eq!(success, None);
}
#[tokio::test]
async fn close_agent_submits_shutdown_and_returns_previous_status() {
let (mut session, turn) = make_session_and_context().await;

View File

@@ -35,21 +35,12 @@ impl ToolHandler for Handler {
let args: WaitArgs = parse_arguments(&arguments)?;
let receiver_thread_ids = resolve_agent_targets(&session, &turn, args.targets).await?;
let mut receiver_agents = Vec::with_capacity(receiver_thread_ids.len());
let mut target_by_thread_id = HashMap::with_capacity(receiver_thread_ids.len());
for receiver_thread_id in &receiver_thread_ids {
let agent_metadata = session
.services
.agent_control
.get_agent_metadata(*receiver_thread_id)
.unwrap_or_default();
target_by_thread_id.insert(
*receiver_thread_id,
agent_metadata
.agent_path
.as_ref()
.map(ToString::to_string)
.unwrap_or_else(|| receiver_thread_id.to_string()),
);
receiver_agents.push(CollabAgentRef {
thread_id: *receiver_thread_id,
agent_nickname: agent_metadata.agent_nickname,
@@ -152,18 +143,7 @@ impl ToolHandler for Handler {
let timed_out = statuses.is_empty();
let statuses_by_id = statuses.clone().into_iter().collect::<HashMap<_, _>>();
let agent_statuses = build_wait_agent_statuses(&statuses_by_id, &receiver_agents);
let result = WaitAgentResult {
status: statuses
.into_iter()
.filter_map(|(thread_id, status)| {
target_by_thread_id
.get(&thread_id)
.cloned()
.map(|target| (target, status))
})
.collect(),
timed_out,
};
let result = WaitAgentResult::from_timed_out(timed_out);
session
.send_event(
@@ -191,10 +171,24 @@ struct WaitArgs {
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq)]
pub(crate) struct WaitAgentResult {
pub(crate) status: HashMap<String, AgentStatus>,
pub(crate) message: String,
pub(crate) timed_out: bool,
}
impl WaitAgentResult {
fn from_timed_out(timed_out: bool) -> Self {
let message = if timed_out {
"Wait timed out."
} else {
"Wait completed."
};
Self {
message: message.to_string(),
timed_out,
}
}
}
impl ToolOutput for WaitAgentResult {
fn log_preview(&self) -> String {
tool_output_json_text(self, "wait_agent")

View File

@@ -178,23 +178,41 @@ fn resume_agent_output_schema() -> JsonValue {
})
}
fn wait_output_schema() -> JsonValue {
json!({
"type": "object",
"properties": {
"status": {
"type": "object",
"description": "Final statuses keyed by canonical task name when available, otherwise by agent id.",
"additionalProperties": agent_status_output_schema()
fn wait_output_schema(multi_agent_v2: bool) -> JsonValue {
if multi_agent_v2 {
json!({
"type": "object",
"properties": {
"message": {
"type": "string",
"description": "Brief wait summary without the agent's final content."
},
"timed_out": {
"type": "boolean",
"description": "Whether the wait call returned due to timeout before any agent reached a final status."
}
},
"timed_out": {
"type": "boolean",
"description": "Whether the wait call returned due to timeout before any agent reached a final status."
}
},
"required": ["status", "timed_out"],
"additionalProperties": false
})
"required": ["message", "timed_out"],
"additionalProperties": false
})
} else {
json!({
"type": "object",
"properties": {
"status": {
"type": "object",
"description": "Final statuses keyed by canonical task name when available, otherwise by agent id.",
"additionalProperties": agent_status_output_schema()
},
"timed_out": {
"type": "boolean",
"description": "Whether the wait call returned due to timeout before any agent reached a final status."
}
},
"required": ["status", "timed_out"],
"additionalProperties": false
})
}
}
fn close_agent_output_schema() -> JsonValue {
@@ -1422,7 +1440,7 @@ fn create_resume_agent_tool() -> ToolSpec {
})
}
fn create_wait_agent_tool() -> ToolSpec {
fn create_wait_agent_tool(multi_agent_v2: bool) -> ToolSpec {
let mut properties = BTreeMap::new();
properties.insert(
"targets".to_string(),
@@ -1445,8 +1463,13 @@ fn create_wait_agent_tool() -> ToolSpec {
ToolSpec::Function(ResponsesApiTool {
name: "wait_agent".to_string(),
description: "Wait for agents to reach a final status. Completed statuses may include the agent's final message. Returns empty status when timed out. Once the agent reaches a final status, a notification message will be received containing the same completed status."
.to_string(),
description: if multi_agent_v2 {
"Wait for agents to reach a final status. Returns a brief wait summary instead of the agent's final content. Returns a timeout summary when no agent reaches a final status before the deadline."
.to_string()
} else {
"Wait for agents to reach a final status. Completed statuses may include the agent's final message. Returns empty status when timed out. Once the agent reaches a final status, a notification message will be received containing the same completed status."
.to_string()
},
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
@@ -1454,7 +1477,7 @@ fn create_wait_agent_tool() -> ToolSpec {
required: Some(vec!["targets".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: Some(wait_output_schema()),
output_schema: Some(wait_output_schema(multi_agent_v2)),
})
}
@@ -3006,7 +3029,7 @@ pub(crate) fn build_specs_with_discoverable_tools(
}
push_tool_spec(
&mut builder,
create_wait_agent_tool(),
create_wait_agent_tool(config.multi_agent_v2),
/*supports_parallel_tool_calls*/ false,
config.code_mode_enabled,
);

View File

@@ -469,7 +469,7 @@ fn test_full_toolset_specs_for_gpt5_codex_unified_exec_web_search() {
create_view_image_tool(config.can_request_original_image_detail),
create_spawn_agent_tool(&config),
create_send_input_tool(),
create_wait_agent_tool(),
create_wait_agent_tool(config.multi_agent_v2),
create_close_agent_tool(),
] {
expected.insert(tool_name(&spec).to_string(), spec);
@@ -607,8 +607,8 @@ fn test_build_specs_multi_agent_v2_uses_task_names_and_hides_resume() {
.as_ref()
.expect("wait_agent should define output schema");
assert_eq!(
output_schema["properties"]["status"]["description"],
json!("Final statuses keyed by canonical task name when available, otherwise by agent id.")
output_schema["properties"]["message"]["description"],
json!("Brief wait summary without the agent's final content.")
);
assert_lacks_tool_name(&tools, "resume_agent");
}

View File

@@ -12,6 +12,7 @@ use super::compact::FIRST_REPLY;
use super::compact::SUMMARY_TEXT;
use anyhow::Result;
use codex_core::CodexThread;
use codex_core::ForkSnapshot;
use codex_core::ThreadManager;
use codex_core::compact::SUMMARIZATION_PROMPT;
use codex_core::config::Config;
@@ -383,8 +384,13 @@ async fn compact_resume_after_second_compaction_preserves_history() -> Result<()
let seeded_user_prefix = &first_request_user_texts[..first_turn_user_index];
let summary_after_second_compact =
extract_summary_user_text(&requests[requests.len() - 3], SUMMARY_TEXT);
let mut expected_after_second_compact_user_texts =
vec!["AFTER_FORK".to_string(), summary_after_second_compact];
let mut expected_after_second_compact_user_texts = vec![
"hello world".to_string(),
"AFTER_COMPACT".to_string(),
"AFTER_RESUME".to_string(),
"AFTER_FORK".to_string(),
summary_after_second_compact,
];
expected_after_second_compact_user_texts.extend_from_slice(seeded_user_prefix);
expected_after_second_compact_user_texts.push("AFTER_COMPACT_2".to_string());
let final_user_texts = json_message_input_texts(&requests[requests.len() - 1], "user");
@@ -841,8 +847,14 @@ async fn fork_thread(
path: std::path::PathBuf,
nth_user_message: usize,
) -> Arc<CodexThread> {
Box::pin(manager.fork_thread(nth_user_message, config.clone(), path, false, None))
.await
.expect("fork conversation")
.thread
Box::pin(manager.fork_thread(
ForkSnapshot::TruncateBeforeNthUserMessage(nth_user_message),
config.clone(),
path,
/*persist_extended_history*/ false,
/*parent_trace*/ None,
))
.await
.expect("fork conversation")
.thread
}

View File

@@ -1,3 +1,4 @@
use codex_core::ForkSnapshot;
use codex_core::NewThread;
use codex_core::parse_turn_item;
use codex_protocol::items::TurnItem;
@@ -110,7 +111,13 @@ async fn fork_thread_twice_drops_to_first_message() {
thread: codex_fork1,
..
} = thread_manager
.fork_thread(1, config_for_fork.clone(), base_path.clone(), false, None)
.fork_thread(
ForkSnapshot::TruncateBeforeNthUserMessage(1),
config_for_fork.clone(),
base_path.clone(),
/*persist_extended_history*/ false,
/*parent_trace*/ None,
)
.await
.expect("fork 1");
@@ -129,7 +136,13 @@ async fn fork_thread_twice_drops_to_first_message() {
thread: codex_fork2,
..
} = thread_manager
.fork_thread(0, config_for_fork.clone(), fork1_path.clone(), false, None)
.fork_thread(
ForkSnapshot::TruncateBeforeNthUserMessage(0),
config_for_fork.clone(),
fork1_path.clone(),
/*persist_extended_history*/ false,
/*parent_trace*/ None,
)
.await
.expect("fork 2");

View File

@@ -1,4 +1,5 @@
use anyhow::Result;
use codex_core::ForkSnapshot;
use codex_core::config::Constrained;
use codex_execpolicy::Policy;
use codex_protocol::models::DeveloperInstructions;
@@ -419,7 +420,13 @@ async fn resume_and_fork_append_permissions_messages() -> Result<()> {
fork_config.permissions.approval_policy = Constrained::allow_any(AskForApproval::UnlessTrusted);
let forked = initial
.thread_manager
.fork_thread(usize::MAX, fork_config, rollout_path, false, None)
.fork_thread(
ForkSnapshot::Interrupted,
fork_config,
rollout_path,
/*persist_extended_history*/ false,
/*parent_trace*/ None,
)
.await?;
forked
.thread

View File

@@ -531,7 +531,9 @@ async fn shell_command_snapshot_still_intercepts_apply_patch() -> Result<()> {
let script = "apply_patch <<'EOF'\n*** Begin Patch\n*** Add File: snapshot-apply.txt\n+hello from snapshot\n*** End Patch\nEOF\n";
let args = json!({
"command": script,
"timeout_ms": 1_000,
// The intercepted apply_patch path self-invokes codex, which can take
// longer than a second in Bazel macOS test environments.
"timeout_ms": 5_000,
});
let call_id = "shell-snapshot-apply-patch";
let responses = vec![

View File

@@ -0,0 +1,148 @@
---
name: plugin-creator
description: Create and scaffold plugin directories for Codex with a required `.codex-plugin/plugin.json`, optional plugin folders/files, and baseline placeholders you can edit before publishing or testing. Use when Codex needs to create a new local plugin, add optional plugin structure, or generate or update repo-root `.agents/plugins/marketplace.json` entries for plugin ordering and availability metadata.
---
# Plugin Creator
## Quick Start
1. Run the scaffold script:
```bash
# Plugin names are normalized to lower-case hyphen-case and must be <= 64 chars.
# The generated folder and plugin.json name are always the same.
# Run from repo root (or replace .agents/... with the absolute path to this SKILL).
# By default creates in <repo_root>/plugins/<plugin-name>.
python3 .agents/skills/plugin-creator/scripts/create_basic_plugin.py <plugin-name>
```
2. Open `<plugin-path>/.codex-plugin/plugin.json` and replace `[TODO: ...]` placeholders.
3. Generate or update the repo marketplace entry when the plugin should appear in Codex UI ordering:
```bash
# marketplace.json always lives at <repo-root>/.agents/plugins/marketplace.json
python3 .agents/skills/plugin-creator/scripts/create_basic_plugin.py my-plugin --with-marketplace
```
4. Generate/adjust optional companion folders as needed:
```bash
python3 .agents/skills/plugin-creator/scripts/create_basic_plugin.py my-plugin --path <parent-plugin-directory> \
--with-skills --with-hooks --with-scripts --with-assets --with-mcp --with-apps --with-marketplace
```
`<parent-plugin-directory>` is the directory where the plugin folder `<plugin-name>` will be created (for example `~/code/plugins`).
## What this skill creates
- Creates plugin root at `/<parent-plugin-directory>/<plugin-name>/`.
- Always creates `/<parent-plugin-directory>/<plugin-name>/.codex-plugin/plugin.json`.
- Fills the manifest with the full schema shape, placeholder values, and the complete `interface` section.
- Creates or updates `<repo-root>/.agents/plugins/marketplace.json` when `--with-marketplace` is set.
- If the marketplace file does not exist yet, seed top-level `name` plus `interface.displayName` placeholders before adding the first plugin entry.
- `<plugin-name>` is normalized using skill-creator naming rules:
- `My Plugin``my-plugin`
- `My--Plugin``my-plugin`
- underscores, spaces, and punctuation are converted to `-`
- result is lower-case hyphen-delimited with consecutive hyphens collapsed
- Supports optional creation of:
- `skills/`
- `hooks/`
- `scripts/`
- `assets/`
- `.mcp.json`
- `.app.json`
## Marketplace workflow
- `marketplace.json` always lives at `<repo-root>/.agents/plugins/marketplace.json`.
- Marketplace root metadata supports top-level `name` plus optional `interface.displayName`.
- Treat plugin order in `plugins[]` as render order in Codex. Append new entries unless a user explicitly asks to reorder the list.
- `displayName` belongs inside the marketplace `interface` object, not individual `plugins[]` entries.
- Each generated marketplace entry must include all of:
- `policy.installation`
- `policy.authentication`
- `category`
- Default new entries to:
- `policy.installation: "AVAILABLE"`
- `policy.authentication: "ON_INSTALL"`
- Override defaults only when the user explicitly specifies another allowed value.
- Allowed `policy.installation` values:
- `NOT_AVAILABLE`
- `AVAILABLE`
- `INSTALLED_BY_DEFAULT`
- Allowed `policy.authentication` values:
- `ON_INSTALL`
- `ON_USE`
- Treat `policy.products` as an override. Omit it unless the user explicitly requests product gating.
- The generated plugin entry shape is:
```json
{
"name": "plugin-name",
"source": {
"source": "local",
"path": "./plugins/plugin-name"
},
"policy": {
"installation": "AVAILABLE",
"authentication": "ON_INSTALL"
},
"category": "Productivity"
}
```
- Use `--force` only when intentionally replacing an existing marketplace entry for the same plugin name.
- If `<repo-root>/.agents/plugins/marketplace.json` does not exist yet, create it with top-level `"name"`, an `"interface"` object containing `"displayName"`, and a `plugins` array, then add the new entry.
- For a brand-new marketplace file, the root object should look like:
```json
{
"name": "[TODO: marketplace-name]",
"interface": {
"displayName": "[TODO: Marketplace Display Name]"
},
"plugins": [
{
"name": "plugin-name",
"source": {
"source": "local",
"path": "./plugins/plugin-name"
},
"policy": {
"installation": "AVAILABLE",
"authentication": "ON_INSTALL"
},
"category": "Productivity"
}
]
}
```
## Required behavior
- Outer folder name and `plugin.json` `"name"` are always the same normalized plugin name.
- Do not remove required structure; keep `.codex-plugin/plugin.json` present.
- Keep manifest values as placeholders until a human or follow-up step explicitly fills them.
- If creating files inside an existing plugin path, use `--force` only when overwrite is intentional.
- Preserve any existing marketplace `interface.displayName`.
- When generating marketplace entries, always write `policy.installation`, `policy.authentication`, and `category` even if their values are defaults.
- Add `policy.products` only when the user explicitly asks for that override.
- Keep marketplace `source.path` relative to repo root as `./plugins/<plugin-name>`.
## Reference to exact spec sample
For the exact canonical sample JSON for both plugin manifests and marketplace entries, use:
- `references/plugin-json-spec.md`
## Validation
After editing `SKILL.md`, run:
```bash
python3 <path-to-skill-creator>/scripts/quick_validate.py .agents/skills/plugin-creator
```

View File

@@ -0,0 +1,6 @@
interface:
display_name: "Plugin Creator"
short_description: "Scaffold plugins and marketplace entries"
default_prompt: "Use $plugin-creator to scaffold a plugin with placeholder plugin.json, optional structure, and a marketplace.json entry."
icon_small: "./assets/plugin-creator-small.svg"
icon_large: "./assets/plugin-creator.png"

View File

@@ -0,0 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 20 20">
<path fill="#0D0D0D" d="M12.03 4.113a3.612 3.612 0 0 1 5.108 5.108l-6.292 6.29c-.324.324-.56.561-.791.752l-.235.176c-.205.14-.422.261-.65.36l-.229.093a4.136 4.136 0 0 1-.586.16l-.764.134-2.394.4c-.142.024-.294.05-.423.06-.098.007-.232.01-.378-.026l-.149-.05a1.081 1.081 0 0 1-.521-.474l-.046-.093a1.104 1.104 0 0 1-.075-.527c.01-.129.035-.28.06-.422l.398-2.394c.1-.602.162-.987.295-1.35l.093-.23c.1-.228.22-.445.36-.65l.176-.235c.19-.232.428-.467.751-.79l6.292-6.292Zm-5.35 7.232c-.35.35-.534.535-.66.688l-.11.147a2.67 2.67 0 0 0-.24.433l-.062.154c-.08.22-.124.462-.232 1.112l-.398 2.394-.001.001h.003l2.393-.399.717-.126a2.63 2.63 0 0 0 .394-.105l.154-.063a2.65 2.65 0 0 0 .433-.24l.147-.11c.153-.126.339-.31.688-.66l4.988-4.988-3.227-3.226-4.987 4.988Zm9.517-6.291a2.281 2.281 0 0 0-3.225 0l-.364.362 3.226 3.227.363-.364c.89-.89.89-2.334 0-3.225ZM4.583 1.783a.3.3 0 0 1 .294.241c.117.585.347 1.092.707 1.48.357.385.859.668 1.549.783a.3.3 0 0 1 0 .592c-.69.115-1.192.398-1.549.783-.315.34-.53.77-.657 1.265l-.05.215a.3.3 0 0 1-.588 0c-.117-.585-.347-1.092-.707-1.48-.357-.384-.859-.668-1.549-.783a.3.3 0 0 1 0-.592c.69-.115 1.192-.398 1.549-.783.36-.388.59-.895.707-1.48l.015-.05a.3.3 0 0 1 .279-.19Z"/>
</svg>

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

@@ -0,0 +1,166 @@
# Plugin JSON sample spec
```json
{
"name": "plugin-name",
"version": "1.2.0",
"description": "Brief plugin description",
"author": {
"name": "Author Name",
"email": "author@example.com",
"url": "https://github.com/author"
},
"homepage": "https://docs.example.com/plugin",
"repository": "https://github.com/author/plugin",
"license": "MIT",
"keywords": ["keyword1", "keyword2"],
"skills": "./skills/",
"hooks": "./hooks.json",
"mcpServers": "./.mcp.json",
"apps": "./.app.json",
"interface": {
"displayName": "Plugin Display Name",
"shortDescription": "Short description for subtitle",
"longDescription": "Long description for details page",
"developerName": "OpenAI",
"category": "Productivity",
"capabilities": ["Interactive", "Write"],
"websiteURL": "https://openai.com/",
"privacyPolicyURL": "https://openai.com/policies/row-privacy-policy/",
"termsOfServiceURL": "https://openai.com/policies/row-terms-of-use/",
"defaultPrompt": [
"Summarize my inbox and draft replies for me.",
"Find open bugs and turn them into Linear tickets.",
"Review today's meetings and flag scheduling gaps."
],
"brandColor": "#3B82F6",
"composerIcon": "./assets/icon.png",
"logo": "./assets/logo.png",
"screenshots": [
"./assets/screenshot1.png",
"./assets/screenshot2.png",
"./assets/screenshot3.png"
]
}
}
```
## Field guide
### Top-level fields
- `name` (`string`): Plugin identifier (kebab-case, no spaces). Required if `plugin.json` is provided and used as manifest name and component namespace.
- `version` (`string`): Plugin semantic version.
- `description` (`string`): Short purpose summary.
- `author` (`object`): Publisher identity.
- `name` (`string`): Author or team name.
- `email` (`string`): Contact email.
- `url` (`string`): Author/team homepage or profile URL.
- `homepage` (`string`): Documentation URL for plugin usage.
- `repository` (`string`): Source code URL.
- `license` (`string`): License identifier (for example `MIT`, `Apache-2.0`).
- `keywords` (`array` of `string`): Search/discovery tags.
- `skills` (`string`): Relative path to skill directories/files.
- `hooks` (`string`): Hook config path.
- `mcpServers` (`string`): MCP config path.
- `apps` (`string`): App manifest path for plugin integrations.
- `interface` (`object`): Interface/UX metadata block for plugin presentation.
### `interface` fields
- `displayName` (`string`): User-facing title shown for the plugin.
- `shortDescription` (`string`): Brief subtitle used in compact views.
- `longDescription` (`string`): Longer description used on details screens.
- `developerName` (`string`): Human-readable publisher name.
- `category` (`string`): Plugin category bucket.
- `capabilities` (`array` of `string`): Capability list from implementation.
- `websiteURL` (`string`): Public website for the plugin.
- `privacyPolicyURL` (`string`): Privacy policy URL.
- `termsOfServiceURL` (`string`): Terms of service URL.
- `defaultPrompt` (`array` of `string`): Starter prompts shown in composer/UX context.
- Include at most 3 strings. Entries after the first 3 are ignored and will not be included.
- Each string is capped at 128 characters. Longer entries are truncated.
- Prefer short starter prompts around 50 characters so they scan well in the UI.
- `brandColor` (`string`): Theme color for the plugin card.
- `composerIcon` (`string`): Path to icon asset.
- `logo` (`string`): Path to logo asset.
- `screenshots` (`array` of `string`): List of screenshot asset paths.
- Screenshot entries must be PNG filenames and stored under `./assets/`.
- Keep file paths relative to plugin root.
### Path conventions and defaults
- Path values should be relative and begin with `./`.
- `skills`, `hooks`, and `mcpServers` are supplemented on top of default component discovery; they do not replace defaults.
- Custom path values must follow the plugin root convention and naming/namespacing rules.
- This repos scaffold writes `.codex-plugin/plugin.json`; treat that as the manifest location this skill generates.
# Marketplace JSON sample spec
`marketplace.json` depends on where the plugin should live:
- Repo plugin: `<repo-root>/.agents/plugins/marketplace.json`
- Local plugin: `~/.agents/plugins/marketplace.json`
```json
{
"name": "openai-curated",
"interface": {
"displayName": "ChatGPT Official"
},
"plugins": [
{
"name": "linear",
"source": {
"source": "local",
"path": "./plugins/linear"
},
"installPolicy": "AVAILABLE",
"authPolicy": "ON_INSTALL",
"category": "Productivity"
}
]
}
```
## Marketplace field guide
### Top-level fields
- `name` (`string`): Marketplace identifier or catalog name.
- `interface` (`object`, optional): Marketplace presentation metadata.
- `plugins` (`array`): Ordered plugin entries. This order determines how Codex renders plugins.
### `interface` fields
- `displayName` (`string`, optional): User-facing marketplace title.
### Plugin entry fields
- `name` (`string`): Plugin identifier. Match the plugin folder name and `plugin.json` `name`.
- `source` (`object`): Plugin source descriptor.
- `source` (`string`): Use `local` for this repo workflow.
- `path` (`string`): Relative plugin path based on the marketplace root.
- Repo plugin: `./plugins/<plugin-name>`
- Local plugin in `~/.agents/plugins/marketplace.json`: `./.codex/plugins/<plugin-name>`
- `policy` (`object`): Marketplace policy block. Always include it.
- `installation` (`string`): Availability policy.
- Allowed values: `NOT_AVAILABLE`, `AVAILABLE`, `INSTALLED_BY_DEFAULT`
- Default for new entries: `AVAILABLE`
- `authentication` (`string`): Authentication timing policy.
- Allowed values: `ON_INSTALL`, `ON_USE`
- Default for new entries: `ON_INSTALL`
- `products` (`array` of `string`, optional): Product override for this plugin entry. Omit it unless product gating is explicitly requested.
- `category` (`string`): Display category bucket. Always include it.
### Marketplace generation rules
- `displayName` belongs under the top-level `interface` object, not individual plugin entries.
- When creating a new marketplace file from scratch, seed `interface.displayName` alongside top-level `name`.
- Always include `policy.installation`, `policy.authentication`, and `category` on every generated or updated plugin entry.
- Treat `policy.products` as an override and omit it unless explicitly requested.
- Append new entries unless the user explicitly requests reordering.
- Replace an existing entry for the same plugin only when overwrite is intentional.
- Choose marketplace location to match the plugin destination:
- Repo plugin: `<repo-root>/.agents/plugins/marketplace.json`
- Local plugin: `~/.agents/plugins/marketplace.json`

View File

@@ -0,0 +1,291 @@
#!/usr/bin/env python3
"""Scaffold a plugin directory and optionally update marketplace.json."""
from __future__ import annotations
import argparse
import json
import re
from pathlib import Path
from typing import Any
MAX_PLUGIN_NAME_LENGTH = 64
DEFAULT_PLUGIN_PARENT = Path.cwd() / "plugins"
DEFAULT_MARKETPLACE_PATH = Path.cwd() / ".agents" / "plugins" / "marketplace.json"
DEFAULT_INSTALL_POLICY = "AVAILABLE"
DEFAULT_AUTH_POLICY = "ON_INSTALL"
DEFAULT_CATEGORY = "Productivity"
DEFAULT_MARKETPLACE_DISPLAY_NAME = "[TODO: Marketplace Display Name]"
VALID_INSTALL_POLICIES = {"NOT_AVAILABLE", "AVAILABLE", "INSTALLED_BY_DEFAULT"}
VALID_AUTH_POLICIES = {"ON_INSTALL", "ON_USE"}
def normalize_plugin_name(plugin_name: str) -> str:
"""Normalize a plugin name to lowercase hyphen-case."""
normalized = plugin_name.strip().lower()
normalized = re.sub(r"[^a-z0-9]+", "-", normalized)
normalized = normalized.strip("-")
normalized = re.sub(r"-{2,}", "-", normalized)
return normalized
def validate_plugin_name(plugin_name: str) -> None:
if not plugin_name:
raise ValueError("Plugin name must include at least one letter or digit.")
if len(plugin_name) > MAX_PLUGIN_NAME_LENGTH:
raise ValueError(
f"Plugin name '{plugin_name}' is too long ({len(plugin_name)} characters). "
f"Maximum is {MAX_PLUGIN_NAME_LENGTH} characters."
)
def build_plugin_json(plugin_name: str) -> dict:
return {
"name": plugin_name,
"version": "[TODO: 1.2.0]",
"description": "[TODO: Brief plugin description]",
"author": {
"name": "[TODO: Author Name]",
"email": "[TODO: author@example.com]",
"url": "[TODO: https://github.com/author]",
},
"homepage": "[TODO: https://docs.example.com/plugin]",
"repository": "[TODO: https://github.com/author/plugin]",
"license": "[TODO: MIT]",
"keywords": ["[TODO: keyword1]", "[TODO: keyword2]"],
"skills": "[TODO: ./skills/]",
"hooks": "[TODO: ./hooks.json]",
"mcpServers": "[TODO: ./.mcp.json]",
"apps": "[TODO: ./.app.json]",
"interface": {
"displayName": "[TODO: Plugin Display Name]",
"shortDescription": "[TODO: Short description for subtitle]",
"longDescription": "[TODO: Long description for details page]",
"developerName": "[TODO: OpenAI]",
"category": "[TODO: Productivity]",
"capabilities": ["[TODO: Interactive]", "[TODO: Write]"],
"websiteURL": "[TODO: https://openai.com/]",
"privacyPolicyURL": "[TODO: https://openai.com/policies/row-privacy-policy/]",
"termsOfServiceURL": "[TODO: https://openai.com/policies/row-terms-of-use/]",
"defaultPrompt": [
"[TODO: Summarize my inbox and draft replies for me.]",
"[TODO: Find open bugs and turn them into tickets.]",
"[TODO: Review today's meetings and flag gaps.]",
],
"brandColor": "[TODO: #3B82F6]",
"composerIcon": "[TODO: ./assets/icon.png]",
"logo": "[TODO: ./assets/logo.png]",
"screenshots": [
"[TODO: ./assets/screenshot1.png]",
"[TODO: ./assets/screenshot2.png]",
"[TODO: ./assets/screenshot3.png]",
],
},
}
def build_marketplace_entry(
plugin_name: str,
install_policy: str,
auth_policy: str,
category: str,
) -> dict[str, Any]:
return {
"name": plugin_name,
"source": {
"source": "local",
"path": f"./plugins/{plugin_name}",
},
"policy": {
"installation": install_policy,
"authentication": auth_policy,
},
"category": category,
}
def load_json(path: Path) -> dict[str, Any]:
with path.open() as handle:
return json.load(handle)
def build_default_marketplace() -> dict[str, Any]:
return {
"name": "[TODO: marketplace-name]",
"interface": {
"displayName": DEFAULT_MARKETPLACE_DISPLAY_NAME,
},
"plugins": [],
}
def validate_marketplace_interface(payload: dict[str, Any]) -> None:
interface = payload.get("interface")
if interface is not None and not isinstance(interface, dict):
raise ValueError("marketplace.json field 'interface' must be an object.")
def update_marketplace_json(
marketplace_path: Path,
plugin_name: str,
install_policy: str,
auth_policy: str,
category: str,
force: bool,
) -> None:
if marketplace_path.exists():
payload = load_json(marketplace_path)
else:
payload = build_default_marketplace()
if not isinstance(payload, dict):
raise ValueError(f"{marketplace_path} must contain a JSON object.")
validate_marketplace_interface(payload)
plugins = payload.setdefault("plugins", [])
if not isinstance(plugins, list):
raise ValueError(f"{marketplace_path} field 'plugins' must be an array.")
new_entry = build_marketplace_entry(plugin_name, install_policy, auth_policy, category)
for index, entry in enumerate(plugins):
if isinstance(entry, dict) and entry.get("name") == plugin_name:
if not force:
raise FileExistsError(
f"Marketplace entry '{plugin_name}' already exists in {marketplace_path}. "
"Use --force to overwrite that entry."
)
plugins[index] = new_entry
break
else:
plugins.append(new_entry)
write_json(marketplace_path, payload, force=True)
def write_json(path: Path, data: dict, force: bool) -> None:
if path.exists() and not force:
raise FileExistsError(f"{path} already exists. Use --force to overwrite.")
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("w") as handle:
json.dump(data, handle, indent=2)
handle.write("\n")
def create_stub_file(path: Path, payload: dict, force: bool) -> None:
if path.exists() and not force:
return
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("w") as handle:
json.dump(payload, handle, indent=2)
handle.write("\n")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Create a plugin skeleton with placeholder plugin.json."
)
parser.add_argument("plugin_name")
parser.add_argument(
"--path",
default=str(DEFAULT_PLUGIN_PARENT),
help="Parent directory for plugin creation (defaults to <cwd>/plugins)",
)
parser.add_argument("--with-skills", action="store_true", help="Create skills/ directory")
parser.add_argument("--with-hooks", action="store_true", help="Create hooks/ directory")
parser.add_argument("--with-scripts", action="store_true", help="Create scripts/ directory")
parser.add_argument("--with-assets", action="store_true", help="Create assets/ directory")
parser.add_argument("--with-mcp", action="store_true", help="Create .mcp.json placeholder")
parser.add_argument("--with-apps", action="store_true", help="Create .app.json placeholder")
parser.add_argument(
"--with-marketplace",
action="store_true",
help="Create or update <cwd>/.agents/plugins/marketplace.json",
)
parser.add_argument(
"--marketplace-path",
default=str(DEFAULT_MARKETPLACE_PATH),
help="Path to marketplace.json (defaults to <cwd>/.agents/plugins/marketplace.json)",
)
parser.add_argument(
"--install-policy",
default=DEFAULT_INSTALL_POLICY,
choices=sorted(VALID_INSTALL_POLICIES),
help="Marketplace policy.installation value",
)
parser.add_argument(
"--auth-policy",
default=DEFAULT_AUTH_POLICY,
choices=sorted(VALID_AUTH_POLICIES),
help="Marketplace policy.authentication value",
)
parser.add_argument(
"--category",
default=DEFAULT_CATEGORY,
help="Marketplace category value",
)
parser.add_argument("--force", action="store_true", help="Overwrite existing files")
return parser.parse_args()
def main() -> None:
args = parse_args()
raw_plugin_name = args.plugin_name
plugin_name = normalize_plugin_name(raw_plugin_name)
if plugin_name != raw_plugin_name:
print(f"Note: Normalized plugin name from '{raw_plugin_name}' to '{plugin_name}'.")
validate_plugin_name(plugin_name)
plugin_root = (Path(args.path).expanduser().resolve() / plugin_name)
plugin_root.mkdir(parents=True, exist_ok=True)
plugin_json_path = plugin_root / ".codex-plugin" / "plugin.json"
write_json(plugin_json_path, build_plugin_json(plugin_name), args.force)
optional_directories = {
"skills": args.with_skills,
"hooks": args.with_hooks,
"scripts": args.with_scripts,
"assets": args.with_assets,
}
for folder, enabled in optional_directories.items():
if enabled:
(plugin_root / folder).mkdir(parents=True, exist_ok=True)
if args.with_mcp:
create_stub_file(
plugin_root / ".mcp.json",
{"mcpServers": {}},
args.force,
)
if args.with_apps:
create_stub_file(
plugin_root / ".app.json",
{
"apps": {},
},
args.force,
)
if args.with_marketplace:
marketplace_path = Path(args.marketplace_path).expanduser().resolve()
update_marketplace_json(
marketplace_path,
plugin_name,
args.install_policy,
args.auth_policy,
args.category,
args.force,
)
print(f"Created plugin scaffold: {plugin_root}")
print(f"plugin manifest: {plugin_json_path}")
if args.with_marketplace:
print(f"marketplace manifest: {marketplace_path}")
if __name__ == "__main__":
main()

View File

@@ -57,6 +57,7 @@ use codex_app_server_protocol::RequestId;
use codex_arg0::Arg0DispatchPaths;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::ForkSnapshot;
use codex_core::ThreadManager;
use codex_core::config::Config;
use codex_core::config::ConfigBuilder;
@@ -2502,7 +2503,7 @@ impl App {
);
let forked = thread_manager
.fork_thread(
usize::MAX,
ForkSnapshot::Interrupted,
config.clone(),
target_session.path.clone(),
/*persist_extended_history*/ false,
@@ -2925,7 +2926,7 @@ impl App {
match self
.server
.fork_thread(
usize::MAX,
ForkSnapshot::Interrupted,
self.config.clone(),
path.clone(),
/*persist_extended_history*/ false,