Compare commits

...

1 Commits

Author SHA1 Message Date
rka-oai
a9c7852206 Integrate migrate-cli workflow 2025-11-06 10:34:15 -08:00
19 changed files with 1137 additions and 0 deletions

View File

@@ -80,6 +80,7 @@ Codex CLI supports a rich set of configuration options, with preferences stored
- [Example prompts](./docs/getting-started.md#example-prompts)
- [Custom prompts](./docs/prompts.md)
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
- [**Migrations**](./docs/migrations.md)
- [**Configuration**](./docs/config.md)
- [Example config](./docs/example-config.md)
- [**Sandbox & approvals**](./docs/sandbox.md)

3
codex-rs/Cargo.lock generated
View File

@@ -967,6 +967,7 @@ dependencies = [
"anyhow",
"assert_cmd",
"assert_matches",
"chrono",
"clap",
"clap_complete",
"codex-app-server",
@@ -989,8 +990,10 @@ dependencies = [
"codex-windows-sandbox",
"ctor 0.5.0",
"owo-colors",
"pathdiff",
"predicates",
"pretty_assertions",
"serde",
"serde_json",
"supports-color",
"tempfile",

View File

@@ -7,6 +7,10 @@ version = { workspace = true }
name = "codex"
path = "src/main.rs"
[[bin]]
name = "migrate-cli"
path = "src/bin/migrate_cli.rs"
[lib]
name = "codex_cli"
path = "src/lib.rs"
@@ -16,6 +20,7 @@ workspace = true
[dependencies]
anyhow = { workspace = true }
chrono = { workspace = true }
clap = { workspace = true, features = ["derive"] }
clap_complete = { workspace = true }
codex-app-server = { workspace = true }
@@ -37,6 +42,8 @@ codex-stdio-to-uds = { workspace = true }
codex-tui = { workspace = true }
ctor = { workspace = true }
owo-colors = { workspace = true }
pathdiff = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
supports-color = { workspace = true }
toml = { workspace = true }

View File

@@ -0,0 +1,9 @@
use clap::Parser;
use codex_cli::migrate::MigrateCli;
fn main() {
if let Err(err) = MigrateCli::parse().run() {
eprintln!("{err:?}");
std::process::exit(1);
}
}

View File

@@ -1,6 +1,7 @@
pub mod debug_sandbox;
mod exit_status;
pub mod login;
pub mod migrate;
use clap::Parser;
use codex_common::CliConfigOverrides;

View File

@@ -28,6 +28,7 @@ use supports_color::Stream;
mod mcp_cmd;
use crate::mcp_cmd::McpCli;
use codex_cli::migrate::MigrateCli;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::features::is_known_feature_key;
@@ -73,6 +74,9 @@ enum Subcommand {
/// Remove stored authentication credentials.
Logout(LogoutCommand),
/// Manage Codex migration workstreams.
Migrate(MigrateCli),
/// [experimental] Run Codex as an MCP server and manage MCP servers.
Mcp(McpCli),
@@ -443,6 +447,9 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
);
run_logout(logout_cli.config_overrides).await;
}
Some(Subcommand::Migrate(migrate_cli)) => {
migrate_cli.run()?;
}
Some(Subcommand::Completion(completion_cli)) => {
print_completion(completion_cli);
}

782
codex-rs/cli/src/migrate.rs Normal file
View File

@@ -0,0 +1,782 @@
use std::fs;
use std::fs::OpenOptions;
use std::io::Write as _;
use std::path::Path;
use std::path::PathBuf;
use anyhow::Context;
use anyhow::Result;
use chrono::Local;
use chrono::Utc;
use clap::Parser;
use clap::Subcommand;
use clap::ValueEnum;
use codex_tui::migration::MigrationWorkspace;
use codex_tui::migration::create_migration_workspace;
use pathdiff::diff_paths;
use serde::Deserialize;
use serde::Serialize;
const STATE_DIR: &str = ".codex/migrate";
const INDEX_FILE: &str = "index.json";
const MIGRATIONS_DIR: &str = "migrations";
const TASKS_FILE: &str = "tasks.json";
const RUNS_DIR: &str = "runs";
const STATE_VERSION: u32 = 1;
const INDEX_VERSION: u32 = 1;
#[derive(Debug, Parser)]
pub struct MigrateCli {
/// Root of the repository / workspace that owns the migration artifacts.
#[arg(long = "root", value_name = "DIR", default_value = ".")]
root: PathBuf,
#[command(subcommand)]
command: MigrateCommand,
}
#[derive(Debug, Subcommand)]
enum MigrateCommand {
/// Create a migration workspace and seed a task graph.
Plan(PlanCommand),
/// Execute or update a migration task.
Execute(ExecuteCommand),
}
#[derive(Debug, Parser)]
struct PlanCommand {
/// Short description for the migration (used to name the workspace).
#[arg(value_name = "DESCRIPTION")]
summary: String,
/// How many explorer workstreams should be created for parallel agents.
#[arg(long = "parallel", value_name = "COUNT", default_value_t = 2)]
parallel_scouts: usize,
}
#[derive(Debug, Parser)]
struct ExecuteCommand {
/// Specific task id to update. Omit to pick the next runnable task.
#[arg(value_name = "TASK_ID")]
task_id: Option<String>,
/// Name (or path) of the migration workspace to operate on.
#[arg(long = "workspace", value_name = "PATH")]
workspace: Option<String>,
/// Explicitly set a task's status instead of starting it.
#[arg(long = "status", value_enum, requires = "task_id")]
status: Option<TaskStatus>,
/// Append a short note to journal.md after updating the task.
#[arg(long = "note", value_name = "TEXT")]
note: Option<String>,
}
impl MigrateCli {
pub fn run(self) -> Result<()> {
let root = self
.root
.canonicalize()
.unwrap_or_else(|_| self.root.clone());
match self.command {
MigrateCommand::Plan(cmd) => run_plan(&root, cmd),
MigrateCommand::Execute(cmd) => run_execute(&root, cmd),
}
}
}
fn run_plan(root: &Path, cmd: PlanCommand) -> Result<()> {
fs::create_dir_all(package_dir(root))?;
let migrations_dir = root.join(MIGRATIONS_DIR);
let workspace = create_migration_workspace(&migrations_dir, cmd.summary.as_str())
.with_context(|| {
format!(
"failed to create migration workspace inside {}",
migrations_dir.display()
)
})?;
let parallel = cmd.parallel_scouts.clamp(1, 8);
let state = MigrationState::new(cmd.summary.clone(), &workspace, parallel);
state.save()?;
write_workspace_readme(&workspace, cmd.summary.as_str())?;
let workspace_rel = diff_paths(&workspace.dir_path, root)
.unwrap_or_else(|| workspace.dir_path.clone())
.display()
.to_string();
refresh_index(root, &state)?;
println!(
"Created migration workspace `{}` in {workspace_rel}",
workspace.dir_name
);
println!("- Plan: {}", rel_to_root(&workspace.plan_path, root));
println!("- Journal: {}", rel_to_root(&workspace.journal_path, root));
println!(
"Next: open this repo in Codex, run /migrate, and let the agent follow up with `migrate-cli execute` to begin running tasks."
);
Ok(())
}
fn run_execute(root: &Path, cmd: ExecuteCommand) -> Result<()> {
let workspace_dir = resolve_workspace(root, cmd.workspace.as_deref())?;
let mut state = MigrationState::load(workspace_dir)?;
let task_id = if let Some(id) = cmd.task_id {
id
} else if let Some(id) = state.next_runnable_task_id() {
id
} else {
println!("All tasks are complete. Specify --task-id to override.");
return Ok(());
};
if !state.can_start(&task_id) && cmd.status.is_none() {
anyhow::bail!(
"Task `{task_id}` is blocked by its dependencies. Complete the prerequisites or pass --status to override."
);
}
let describe_task = cmd.status.is_none();
let task_snapshot = if describe_task {
Some(
state
.task(&task_id)
.cloned()
.with_context(|| format!("unknown task id `{task_id}`"))?,
)
} else {
None
};
let new_status = cmd.status.unwrap_or(TaskStatus::Running);
state.set_status(&task_id, new_status)?;
let mut run_file = None;
if new_status == TaskStatus::Running && describe_task {
run_file = Some(write_run_file(root, &state, &task_id)?);
}
state.save()?;
if let Some(note) = cmd.note {
append_journal(&state, &task_id, new_status, note.as_str())?;
}
refresh_index(root, &state)?;
if describe_task {
if let Some(task) = task_snapshot.as_ref() {
print_task_brief(&state, task, root);
}
if let Some(path) = run_file {
println!("Runbook prepared at {path}");
}
println!(
"When you finish, mark it done with `migrate-cli execute --task-id {task_id} --status done --note \"<summary>\"` and run `migrate-cli execute` again for the next task."
);
} else {
println!("Task `{task_id}` status -> {new_status}");
if let Some(path) = run_file {
println!("Runbook prepared at {path}");
}
}
Ok(())
}
fn resolve_workspace(root: &Path, provided: Option<&str>) -> Result<PathBuf> {
if let Some(input) = provided {
let direct = PathBuf::from(input);
let candidate = if direct.is_absolute() {
direct
} else {
let joined = root.join(&direct);
if joined.join(TASKS_FILE).exists() {
joined
} else {
root.join(MIGRATIONS_DIR).join(&direct)
}
};
if candidate.join(TASKS_FILE).exists() {
return Ok(candidate);
}
anyhow::bail!("No migration workspace found at {}", candidate.display());
}
let index = load_index(&index_path(root))?;
let latest = index
.migrations
.iter()
.max_by_key(|entry| entry.updated_at_epoch)
.context("No recorded migrations. Run `migrate-cli plan` first.")?;
let rel = PathBuf::from(&latest.workspace);
let path = if rel.is_absolute() {
rel
} else {
root.join(rel)
};
Ok(path)
}
fn write_workspace_readme(workspace: &MigrationWorkspace, summary: &str) -> Result<()> {
let contents = format!(
"# {name}\n\n{summary}\n\n- `plan.md` canonical blueprint\n- `journal.md` publish progress + hand-offs\n- `tasks.json` orchestration metadata\n- `runs/` generated runbooks per task\n\nUse `migrate-cli execute --workspace {name}` to advance tasks or open this folder in Codex and run `/migrate`.\n",
name = workspace.dir_name,
summary = summary
);
fs::write(workspace.dir_path.join("README.md"), contents)?;
Ok(())
}
fn append_journal(
state: &MigrationState,
task_id: &str,
status: TaskStatus,
note: &str,
) -> Result<()> {
let mut file = OpenOptions::new()
.append(true)
.open(state.journal_path())
.with_context(|| format!("failed to open {}", state.journal_path().display()))?;
let timestamp = Local::now().format("%Y-%m-%d %H:%M %Z");
writeln!(
file,
"| {timestamp} | migrate::execute | Task {task_id} -> {status} | | {note} |"
)?;
Ok(())
}
fn write_run_file(root: &Path, state: &MigrationState, task_id: &str) -> Result<String> {
let task = state
.task(task_id)
.with_context(|| format!("unknown task id `{task_id}`"))?;
let runs_dir = state.workspace_dir().join(RUNS_DIR);
fs::create_dir_all(&runs_dir)?;
let timestamp = Utc::now().format("%Y%m%d-%H%M%S");
let file_name = format!("{task_id}-{timestamp}.md");
let path = runs_dir.join(&file_name);
let plan = rel_to_root(&state.plan_path(), root);
let journal = rel_to_root(&state.journal_path(), root);
let mut body = format!(
"# Task {task_id}: {}\n\n{}\n\n## Checkpoints\n",
task.title, task.description
);
for checkpoint in &task.checkpoints {
body.push_str(&format!("- {checkpoint}\n"));
}
body.push_str(&format!(
"\nPublish updates to `{journal}`. Mirror final scope into `{plan}` when it changes.\n"
));
fs::write(&path, body)?;
Ok(rel_to_root(&path, root))
}
fn print_task_brief(state: &MigrationState, task: &MigrationTask, root: &Path) {
println!("--- migrate::execute ---");
println!("Workspace: {}", state.workspace_dir_string(root));
println!("Plan: {}", rel_to_root(&state.plan_path(), root));
println!("Journal: {}", rel_to_root(&state.journal_path(), root));
println!();
println!("Task `{}` {}", task.id, task.title);
println!("{}", task.description);
if !task.depends_on.is_empty() {
println!("Depends on: {}", task.depends_on.join(", "));
}
if let Some(group) = &task.parallel_group {
println!("Parallel track: {group}");
}
if let Some(owner) = &task.owner_hint {
println!("Suggested owner: {owner}");
}
if !task.publish_to.is_empty() {
println!("Publish updates to: {}", task.publish_to.join(", "));
}
if !task.checkpoints.is_empty() {
println!("Checkpoints:");
for checkpoint in &task.checkpoints {
println!(" - {checkpoint}");
}
}
println!(
"Document findings in journal.md, reflect scope changes back into plan.md, and keep runbooks inside runs/."
);
}
fn package_dir(root: &Path) -> PathBuf {
root.join(STATE_DIR)
}
fn index_path(root: &Path) -> PathBuf {
package_dir(root).join(INDEX_FILE)
}
fn rel_to_root(path: &Path, root: &Path) -> String {
diff_paths(path, root)
.unwrap_or_else(|| path.to_path_buf())
.display()
.to_string()
}
fn write_pretty_json(path: &Path, value: &impl Serialize) -> Result<()> {
let text = serde_json::to_string_pretty(value)?;
fs::write(path, text)?;
Ok(())
}
#[derive(Debug, Serialize, Deserialize, Clone)]
struct MigrationIndexEntry {
slug: String,
summary: String,
workspace: String,
plan: String,
journal: String,
tasks_path: String,
pending_tasks: usize,
running_tasks: usize,
blocked_tasks: usize,
ready_parallel_tasks: Vec<String>,
status: IndexStatus,
updated_at: String,
updated_at_epoch: i64,
}
#[derive(Debug, Serialize, Deserialize, Clone, Copy)]
#[serde(rename_all = "snake_case")]
enum IndexStatus {
Planning,
Executing,
Complete,
}
#[derive(Debug, Serialize, Deserialize)]
struct MigrationIndex {
version: u32,
migrations: Vec<MigrationIndexEntry>,
}
impl Default for MigrationIndex {
fn default() -> Self {
Self {
version: INDEX_VERSION,
migrations: Vec::new(),
}
}
}
fn load_index(path: &Path) -> Result<MigrationIndex> {
if path.exists() {
let text = fs::read_to_string(path)?;
Ok(serde_json::from_str(&text)?)
} else {
Ok(MigrationIndex::default())
}
}
fn refresh_index(root: &Path, state: &MigrationState) -> Result<()> {
fs::create_dir_all(package_dir(root))?;
let mut index = load_index(&index_path(root))?;
let entry = state.to_index_entry(root);
index
.migrations
.retain(|existing| existing.slug != entry.slug || existing.workspace != entry.workspace);
index.migrations.push(entry);
write_pretty_json(&index_path(root), &index)
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, ValueEnum)]
#[serde(rename_all = "snake_case")]
#[derive(Default)]
enum TaskStatus {
#[default]
Pending,
Running,
Blocked,
Done,
}
impl std::fmt::Display for TaskStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let label = match self {
TaskStatus::Pending => "pending",
TaskStatus::Running => "running",
TaskStatus::Blocked => "blocked",
TaskStatus::Done => "done",
};
write!(f, "{label}")
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct MigrationTask {
id: String,
title: String,
description: String,
#[serde(default)]
status: TaskStatus,
#[serde(default)]
depends_on: Vec<String>,
#[serde(default)]
parallel_group: Option<String>,
#[serde(default)]
owner_hint: Option<String>,
#[serde(default)]
publish_to: Vec<String>,
#[serde(default)]
checkpoints: Vec<String>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct MigrationStateFile {
version: u32,
summary: String,
slug: String,
plan_path: String,
journal_path: String,
tasks: Vec<MigrationTask>,
}
struct MigrationState {
file: MigrationStateFile,
workspace_dir: PathBuf,
}
impl MigrationState {
fn new(summary: String, workspace: &MigrationWorkspace, parallel: usize) -> Self {
let tasks = seed_tasks(&summary, parallel);
Self {
file: MigrationStateFile {
version: STATE_VERSION,
summary,
slug: workspace.dir_name.clone(),
plan_path: "plan.md".to_string(),
journal_path: "journal.md".to_string(),
tasks,
},
workspace_dir: workspace.dir_path.clone(),
}
}
fn load(workspace_dir: PathBuf) -> Result<Self> {
let data_path = workspace_dir.join(TASKS_FILE);
let text = fs::read_to_string(&data_path)
.with_context(|| format!("missing tasks file at {}", data_path.display()))?;
let file: MigrationStateFile = serde_json::from_str(&text)?;
Ok(Self {
file,
workspace_dir,
})
}
fn save(&self) -> Result<()> {
write_pretty_json(&self.workspace_dir.join(TASKS_FILE), &self.file)
}
fn workspace_dir(&self) -> &Path {
&self.workspace_dir
}
fn plan_path(&self) -> PathBuf {
self.workspace_dir.join(&self.file.plan_path)
}
fn journal_path(&self) -> PathBuf {
self.workspace_dir.join(&self.file.journal_path)
}
fn task(&self, id: &str) -> Option<&MigrationTask> {
self.file.tasks.iter().find(|task| task.id == id)
}
fn task_mut(&mut self, id: &str) -> Option<&mut MigrationTask> {
self.file.tasks.iter_mut().find(|task| task.id == id)
}
fn set_status(&mut self, id: &str, status: TaskStatus) -> Result<()> {
let task = self
.task_mut(id)
.with_context(|| format!("unknown task id `{id}`"))?;
task.status = status;
Ok(())
}
fn next_runnable_task_id(&self) -> Option<String> {
self.file
.tasks
.iter()
.find(|task| task.status == TaskStatus::Pending && self.dependencies_met(task))
.map(|task| task.id.clone())
}
fn dependencies_met(&self, task: &MigrationTask) -> bool {
task.depends_on.iter().all(|dep| {
self.file
.tasks
.iter()
.find(|t| &t.id == dep)
.map(|t| t.status == TaskStatus::Done)
.unwrap_or(false)
})
}
fn can_start(&self, id: &str) -> bool {
self.task(id)
.map(|task| self.dependencies_met(task))
.unwrap_or(false)
}
fn workspace_dir_string(&self, root: &Path) -> String {
rel_to_root(&self.workspace_dir, root)
}
fn ready_parallel_tasks(&self) -> Vec<String> {
self.file
.tasks
.iter()
.filter(|task| task.parallel_group.is_some())
.filter(|task| task.status == TaskStatus::Pending)
.filter(|task| self.dependencies_met(task))
.map(|task| task.id.clone())
.collect()
}
fn status_counts(&self) -> (usize, usize, usize, usize) {
let mut pending = 0;
let mut running = 0;
let mut blocked = 0;
let mut done = 0;
for task in &self.file.tasks {
match task.status {
TaskStatus::Pending => pending += 1,
TaskStatus::Running => running += 1,
TaskStatus::Blocked => blocked += 1,
TaskStatus::Done => done += 1,
}
}
(pending, running, blocked, done)
}
fn to_index_entry(&self, root: &Path) -> MigrationIndexEntry {
let (pending, running, blocked, _done) = self.status_counts();
let ready_parallel_tasks = self.ready_parallel_tasks();
let status = if pending == 0 && running == 0 && blocked == 0 {
IndexStatus::Complete
} else if running > 0 {
IndexStatus::Executing
} else {
IndexStatus::Planning
};
let now = Utc::now();
MigrationIndexEntry {
slug: self.file.slug.clone(),
summary: self.file.summary.clone(),
workspace: self.workspace_dir_string(root),
plan: rel_to_root(&self.plan_path(), root),
journal: rel_to_root(&self.journal_path(), root),
tasks_path: rel_to_root(&self.workspace_dir.join(TASKS_FILE), root),
pending_tasks: pending,
running_tasks: running,
blocked_tasks: blocked,
ready_parallel_tasks,
status,
updated_at: now.to_rfc3339(),
updated_at_epoch: now.timestamp(),
}
}
}
fn seed_tasks(summary: &str, parallel: usize) -> Vec<MigrationTask> {
let mut tasks = Vec::new();
let plan_targets = vec!["plan.md".to_string(), "journal.md".to_string()];
tasks.push(MigrationTask {
id: "plan-baseline".to_string(),
title: "Map current + target states".to_string(),
description: format!(
"Capture why `{summary}` is needed, current system contracts, and the desired end state in `plan.md`."
),
publish_to: plan_targets.clone(),
checkpoints: vec![
"Document repositories, services, and owners".to_string(),
"List non-negotiable constraints".to_string(),
],
..Default::default()
});
tasks.push(MigrationTask {
id: "plan-guardrails".to_string(),
title: "Design guardrails + approvals".to_string(),
description: "Spell out kill-switches, approvals, and telemetry gating.".to_string(),
depends_on: vec!["plan-baseline".to_string()],
publish_to: plan_targets.clone(),
checkpoints: vec![
"Define approval owners".to_string(),
"List telemetry + alerting hooks".to_string(),
],
..Default::default()
});
tasks.push(MigrationTask {
id: "plan-blueprint".to_string(),
title: "Lock incremental rollout plan".to_string(),
description: "Lay out the numbered steps and decision records for the migration."
.to_string(),
depends_on: vec!["plan-guardrails".to_string()],
publish_to: plan_targets.clone(),
checkpoints: vec![
"Identify sequencing + dependencies".to_string(),
"Assign owners to each increment".to_string(),
],
..Default::default()
});
let mut sources: Vec<String> = (1..=parallel.max(1))
.map(|i| format!("workstream #{i}"))
.collect();
if sources.is_empty() {
sources.push("workstream #1".to_string());
}
for (idx, source) in sources.iter().enumerate() {
tasks.push(MigrationTask {
id: format!("parallel-scout-{}", idx + 1),
title: format!("Deep-dive: {source}"),
description: format!(
"Inventory blockers, data contracts, and automation opportunities for `{source}`. Feed findings into journal.md and update plan.md if scope shifts."
),
depends_on: vec!["plan-blueprint".to_string()],
parallel_group: Some("exploration".to_string()),
owner_hint: Some("domain expert".to_string()),
publish_to: plan_targets.clone(),
checkpoints: vec![
"Publish progress + artifacts to journal.md".to_string(),
"Flag shared learnings for other workstreams".to_string(),
],
..Default::default()
});
}
tasks.push(MigrationTask {
id: "parallel-telemetry".to_string(),
title: "Build shared telemetry + rehearsal harness".to_string(),
description:
"Codify validation scripts, load tests, and dashboards each workstream will reuse."
.to_string(),
depends_on: vec!["plan-blueprint".to_string()],
parallel_group: Some("stabilization".to_string()),
publish_to: plan_targets.clone(),
checkpoints: vec![
"Link dashboards in journal.md".to_string(),
"Tag required signals per task".to_string(),
],
..Default::default()
});
tasks.push(MigrationTask {
id: "parallel-backfill".to_string(),
title: "Design data backfill + rollback story".to_string(),
description: "Document backfill tooling, rehearsal cadence, and rollback triggers."
.to_string(),
depends_on: vec!["plan-blueprint".to_string()],
parallel_group: Some("stabilization".to_string()),
publish_to: plan_targets.clone(),
checkpoints: vec![
"Note dry-run schedule in journal.md".to_string(),
"List reversibility safeguards".to_string(),
],
..Default::default()
});
let mut cutover_dependencies = vec![
"plan-baseline".to_string(),
"plan-guardrails".to_string(),
"plan-blueprint".to_string(),
"parallel-telemetry".to_string(),
"parallel-backfill".to_string(),
];
cutover_dependencies.extend(
sources
.iter()
.enumerate()
.map(|(idx, _)| format!("parallel-scout-{}", idx + 1)),
);
tasks.push(MigrationTask {
id: "plan-cutover".to_string(),
title: "Execute rollout + capture learnings".to_string(),
description: "Drive the migration, capture deviations, and publish the final hand-off."
.to_string(),
depends_on: cutover_dependencies,
publish_to: plan_targets,
checkpoints: vec![
"Attach final verification evidence".to_string(),
"Document kill-switch + rollback state".to_string(),
],
..Default::default()
});
tasks
}
impl Default for MigrationTask {
fn default() -> Self {
Self {
id: String::new(),
title: String::new(),
description: String::new(),
status: TaskStatus::Pending,
depends_on: Vec::new(),
parallel_group: None,
owner_hint: None,
publish_to: Vec::new(),
checkpoints: Vec::new(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[test]
fn next_task_unlocked_after_dependencies_complete() {
let tmp = TempDir::new().unwrap();
let workspace = MigrationWorkspace {
dir_path: tmp.path().to_path_buf(),
dir_name: "migration_demo".to_string(),
plan_path: tmp.path().join("plan.md"),
journal_path: tmp.path().join("journal.md"),
};
fs::write(&workspace.plan_path, "plan").unwrap();
fs::write(&workspace.journal_path, "journal").unwrap();
let mut state = MigrationState::new("Demo".to_string(), &workspace, 1);
assert_eq!(
state.next_runnable_task_id().as_deref(),
Some("plan-baseline")
);
state.set_status("plan-baseline", TaskStatus::Done).unwrap();
state
.set_status("plan-guardrails", TaskStatus::Done)
.unwrap();
assert_eq!(
state.next_runnable_task_id().as_deref(),
Some("plan-blueprint")
);
}
#[test]
fn ready_parallel_tasks_wait_for_blueprint() {
let tmp = TempDir::new().unwrap();
let workspace = MigrationWorkspace {
dir_path: tmp.path().to_path_buf(),
dir_name: "migration_demo".to_string(),
plan_path: tmp.path().join("plan.md"),
journal_path: tmp.path().join("journal.md"),
};
fs::write(&workspace.plan_path, "plan").unwrap();
fs::write(&workspace.journal_path, "journal").unwrap();
let mut state = MigrationState::new("Demo".to_string(), &workspace, 2);
assert!(state.ready_parallel_tasks().is_empty());
state.set_status("plan-baseline", TaskStatus::Done).unwrap();
state
.set_status("plan-guardrails", TaskStatus::Done)
.unwrap();
state
.set_status("plan-blueprint", TaskStatus::Done)
.unwrap();
let ready = state.ready_parallel_tasks();
let ready_set: std::collections::HashSet<_> = ready.into_iter().collect();
let expected = std::collections::HashSet::from([
"parallel-scout-1".to_string(),
"parallel-scout-2".to_string(),
"parallel-telemetry".to_string(),
"parallel-backfill".to_string(),
]);
assert_eq!(ready_set, expected);
}
}

View File

@@ -0,0 +1,16 @@
# Migration journal {{MIGRATION_SUMMARY}}
> Workspace: `{{WORKSPACE_NAME}}`
> Created: {{CREATED_AT}}
Use this log for async updates, agent hand-offs, and to publish what was learned during each workstream. Keep entries concise and focused on signals other collaborators need.
## Logging guidance
- Start each entry with a timestamp and author/agent/workstream name.
- Capture what changed, how it was validated, links to diffs/tests, and any open questions.
- Highlight blockers, decisions needed, or knowledge that other agents should adopt.
- Update the plan (`plan.md`) when scope changes; use this journal for progress + lessons.
| Timestamp | Agent / Workstream | Update / Learnings | Blockers & Risks | Next action / owner |
| --------- | ------------------ | ------------------ | ---------------- | ------------------- |
| | | | | |

View File

@@ -0,0 +1,35 @@
# Migration plan {{MIGRATION_SUMMARY}}
> Workspace: `{{WORKSPACE_NAME}}`
> Generated: {{CREATED_AT}}
Use this document as the single source of truth for the migration effort. Keep it updated so any engineer (or agent) can jump in mid-flight.
## 1. Context & stakes
- Current state snapshot
- Target end state and deadline/launch windows
- Guardrails, SLAs, compliance/regulatory constraints
## 2. Incremental plan (numbered)
1. `[Step name]` — Purpose, scope, primary owner/skillset, upstream/downstream dependencies, validation & rollback signals.
2. `…`
Each step must explain:
- Preconditions & artifacts required before starting
- Specific code/data/infrastructure changes
- Telemetry, tests, or dry-runs that prove success
## 3. Parallel workstreams
| Workstream | Objective | Inputs & dependencies | Ownership / skills | Progress & telemetry hooks |
| ---------- | --------- | --------------------- | ------------------ | ------------------------- |
| _Fill in during planning_ | | | | |
## 4. Data + rollout considerations
- Data migration / backfill plan
- Environment readiness, feature flags, or config toggles
- Rollout plan (phases, smoke tests, canaries) and explicit rollback/kill-switch criteria
## 5. Risks, decisions, and follow-ups
- Top risks with mitigation owners
- Open questions / decisions with DRI and due date
- Handoff expectations (reference `journal.md` for ongoing updates)

View File

@@ -0,0 +1,13 @@
You are resuming the active migration. Stay in the repository root and coordinate through the CLI tool so every agent shares the same state.
1. Run `migrate-cli execute`.
- It selects the next runnable task, marks it `running`, prints a detailed brief, and drops a runbook under `runs/`.
- Note the workspace path plus the plan/journal locations from the CLI output.
2. Follow the brief:
- Read any referenced files, services, or dashboards.
- Update `plan.md` when scope changes and log progress plus artifacts in `journal.md`.
- Keep the checkpoints in the runbook so other agents can audit what happened.
3. When you finish, record the result with `migrate-cli execute --task-id <TASK_ID> --status done --note "short summary"`, then run `migrate-cli execute` again to fetch the next task.
4. If you discover blockers, use `--status blocked --note "context"` so the index reflects reality.
Always make the artifacts inside the migration workspace the source of truth: `plan.md` for decisions and sequencing, `journal.md` for hand-offs, `tasks.json`/`runs/` for orchestration metadata.

View File

@@ -0,0 +1,21 @@
You are the migration showrunner for "{{MIGRATION_SUMMARY}}". Spin up the shared tooling and produce a plan that other agents can execute safely.
1. From the repo root run `migrate-cli plan "{{MIGRATION_SUMMARY}}"`.
- It creates `migrations/migration_<slug>/` with `plan.md`, `journal.md`, `tasks.json`, and a `runs/` folder.
- Inspect the CLI output to learn the workspace path.
2. Study the codebase, dependencies, deployment gates, and data contracts. Pull in any diagrams or docs already in the repo.
3. Populate `plan.md` with:
- An executive overview describing the current vs. target state, risks, and unknowns.
- A numbered incremental plan (1., 2., 3., …) that lists owners/skillsets, dependencies, validation steps, and rollback/kill-switch guidance.
- A section detailing how multiple agents can work in parallel, where they should publish progress, and how learnings flow between streams.
- Guardrails for telemetry, backfills, dry runs, and approvals.
4. Keep `journal.md` as the live log for progress, blockers, data snapshots, and hand-offs.
5. When the plan is solid, remind collaborators to run `/continue-migration` (which triggers `migrate-cli execute`) whenever they are ready for the next task brief.
General guidance:
- Call out missing information and request the files/owners you need.
- Prefer automation, reproducible scripts, and links to existing tooling over prose.
- Explicitly document how agents publish updates (journal.md) versus canonical decisions (plan.md).
- Organize tasks so multiple agents can operate concurrently while sharing artifacts.
After sharing the plan in chat, mirror the structure into `plan.md` using `apply_patch` or an editor, and seed `journal.md` with the first entry that summarizes current status and next checkpoints.

View File

@@ -450,6 +450,12 @@ impl App {
AppEvent::OpenReviewCustomPrompt => {
self.chat_widget.show_review_custom_prompt();
}
AppEvent::StartMigration { summary } => {
self.chat_widget.start_migration(summary);
}
AppEvent::ContinueMigration => {
self.chat_widget.continue_migration();
}
AppEvent::FullScreenApprovalRequest(request) => match request {
ApprovalRequest::ApplyPatch { cwd, changes, .. } => {
let _ = tui.enter_alt_screen();

View File

@@ -102,6 +102,14 @@ pub(crate) enum AppEvent {
/// Open the custom prompt option from the review popup.
OpenReviewCustomPrompt,
/// Kick off the `/migrate` workflow after the user names the migration.
StartMigration {
summary: String,
},
/// Prompt Codex to resume a migration via migrate-cli execute.
ContinueMigration,
/// Open the approval popup.
FullScreenApprovalRequest(ApprovalRequest),

View File

@@ -86,6 +86,8 @@ use crate::history_cell::AgentMessageCell;
use crate::history_cell::HistoryCell;
use crate::history_cell::McpToolCallCell;
use crate::markdown::append_markdown;
use crate::migration::build_continue_migration_prompt;
use crate::migration::build_migration_prompt;
#[cfg(target_os = "windows")]
use crate::onboarding::WSL_INSTRUCTIONS;
use crate::render::Insets;
@@ -1234,6 +1236,12 @@ impl ChatWidget {
const INIT_PROMPT: &str = include_str!("../prompt_for_init_command.md");
self.submit_user_message(INIT_PROMPT.to_string().into());
}
SlashCommand::Migrate => {
self.open_migrate_prompt();
}
SlashCommand::ContinueMigration => {
self.app_event_tx.send(AppEvent::ContinueMigration);
}
SlashCommand::Compact => {
self.clear_token_usage();
self.app_event_tx.send(AppEvent::CodexOp(Op::Compact));
@@ -2422,6 +2430,50 @@ impl ChatWidget {
self.bottom_pane.show_view(Box::new(view));
}
pub(crate) fn start_migration(&mut self, summary: String) {
let summary = summary.trim();
if summary.is_empty() {
return;
}
let prompt = build_migration_prompt(summary);
self.add_info_message(
format!("Prompting Codex to run `migrate-cli plan \"{summary}\"`."),
Some(
"The tool scaffolds migrations/migration_<slug> with plan.md, journal.md, tasks.json, and runs/."
.to_string(),
),
);
self.submit_user_message(prompt.into());
}
pub(crate) fn continue_migration(&mut self) {
self.add_info_message(
"Prompting Codex to resume the active migration via `migrate-cli execute`.".to_string(),
Some(
"It will print the next task brief, mark it running, and remind you to update plan.md + journal.md."
.to_string(),
),
);
self.submit_user_message(build_continue_migration_prompt().into());
}
fn open_migrate_prompt(&mut self) {
let tx = self.app_event_tx.clone();
let view = CustomPromptView::new(
"Describe the migration".to_string(),
"Example: Phase 2 move billing to Postgres".to_string(),
Some("We'll ask Codex to run `migrate-cli plan` once you press Enter.".to_string()),
Box::new(move |prompt: String| {
let trimmed = prompt.trim().to_string();
if trimmed.is_empty() {
return;
}
tx.send(AppEvent::StartMigration { summary: trimmed });
}),
);
self.bottom_pane.show_view(Box::new(view));
}
pub(crate) fn token_usage(&self) -> TokenUsage {
self.token_info
.as_ref()

View File

@@ -54,6 +54,7 @@ pub mod live_wrap;
mod markdown;
mod markdown_render;
mod markdown_stream;
pub mod migration;
pub mod onboarding;
mod pager_overlay;
pub mod public_widgets;

View File

@@ -0,0 +1,123 @@
use chrono::Local;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
pub const MIGRATION_PROMPT_TEMPLATE: &str = include_str!("../prompt_for_migrate_command.md");
pub const CONTINUE_MIGRATION_PROMPT_TEMPLATE: &str =
include_str!("../prompt_for_continue_migration_command.md");
const MIGRATION_PLAN_TEMPLATE: &str = include_str!("../migration_plan_template.md");
const MIGRATION_JOURNAL_TEMPLATE: &str = include_str!("../migration_journal_template.md");
#[derive(Debug, Clone)]
pub struct MigrationWorkspace {
pub dir_path: PathBuf,
pub dir_name: String,
pub plan_path: PathBuf,
pub journal_path: PathBuf,
}
pub fn create_migration_workspace(
base_dir: &Path,
summary: &str,
) -> Result<MigrationWorkspace, std::io::Error> {
fs::create_dir_all(base_dir)?;
let slug = sanitize_migration_slug(summary);
let base_name = format!("migration_{slug}");
let (dir_path, dir_name) = next_available_migration_dir(base_dir, &base_name);
fs::create_dir_all(&dir_path)?;
let created_at = Local::now().format("%Y-%m-%d %H:%M %Z").to_string();
let plan_path = dir_path.join("plan.md");
let journal_path = dir_path.join("journal.md");
let replacements = [
("{{MIGRATION_SUMMARY}}", summary),
("{{WORKSPACE_NAME}}", dir_name.as_str()),
("{{CREATED_AT}}", created_at.as_str()),
];
let plan_contents = fill_template(MIGRATION_PLAN_TEMPLATE, &replacements);
let journal_contents = fill_template(MIGRATION_JOURNAL_TEMPLATE, &replacements);
fs::write(&plan_path, plan_contents)?;
fs::write(&journal_path, journal_contents)?;
Ok(MigrationWorkspace {
dir_path,
dir_name,
plan_path,
journal_path,
})
}
pub fn build_migration_prompt(summary: &str) -> String {
fill_template(
MIGRATION_PROMPT_TEMPLATE,
&[("{{MIGRATION_SUMMARY}}", summary)],
)
}
pub fn build_continue_migration_prompt() -> String {
CONTINUE_MIGRATION_PROMPT_TEMPLATE.to_string()
}
pub fn sanitize_migration_slug(summary: &str) -> String {
let mut slug = String::new();
let mut last_was_dash = true;
for ch in summary.trim().to_lowercase().chars() {
if ch.is_ascii_alphanumeric() {
slug.push(ch);
last_was_dash = false;
} else if !last_was_dash {
slug.push('-');
last_was_dash = true;
}
}
let mut trimmed = slug.trim_matches('-').to_string();
if trimmed.len() > 48 {
trimmed = trimmed
.chars()
.take(48)
.collect::<String>()
.trim_matches('-')
.to_string();
}
if trimmed.is_empty() {
return Local::now().format("plan-%Y%m%d-%H%M%S").to_string();
}
trimmed
}
fn next_available_migration_dir(base_dir: &Path, base_name: &str) -> (PathBuf, String) {
let mut candidate_name = base_name.to_string();
let mut candidate_path = base_dir.join(&candidate_name);
let mut suffix = 2;
while candidate_path.exists() {
candidate_name = format!("{base_name}_{suffix:02}");
candidate_path = base_dir.join(&candidate_name);
suffix += 1;
}
(candidate_path, candidate_name)
}
fn fill_template(template: &str, replacements: &[(&str, &str)]) -> String {
let mut filled = template.to_string();
for (needle, value) in replacements {
filled = filled.replace(needle, value);
}
filled
}
#[cfg(test)]
mod tests {
use super::sanitize_migration_slug;
#[test]
fn slug_sanitizes_whitespace_and_length() {
let slug = sanitize_migration_slug(" Launch 🚀 Phase #2 migration :: Big Refactor ");
assert_eq!(slug, "launch-phase-2-migration-big-refactor");
}
#[test]
fn slug_falls_back_to_timestamp() {
let slug = sanitize_migration_slug(" ");
assert!(slug.starts_with("plan-"));
assert!(slug.len() > 10);
}
}

View File

@@ -17,6 +17,8 @@ pub enum SlashCommand {
Review,
New,
Init,
Migrate,
ContinueMigration,
Compact,
Undo,
Diff,
@@ -39,6 +41,8 @@ impl SlashCommand {
SlashCommand::New => "start a new chat during a conversation",
SlashCommand::Init => "create an AGENTS.md file with instructions for Codex",
SlashCommand::Compact => "summarize conversation to prevent hitting the context limit",
SlashCommand::Migrate => "ask Codex to run migrate-cli plan and build the workspace",
SlashCommand::ContinueMigration => "resume the migration with migrate-cli execute",
SlashCommand::Review => "review my current changes and find issues",
SlashCommand::Undo => "ask Codex to undo a turn",
SlashCommand::Quit | SlashCommand::Exit => "exit Codex",
@@ -65,6 +69,8 @@ impl SlashCommand {
match self {
SlashCommand::New
| SlashCommand::Init
| SlashCommand::Migrate
| SlashCommand::ContinueMigration
| SlashCommand::Compact
| SlashCommand::Undo
| SlashCommand::Model

44
docs/migrations.md Normal file
View File

@@ -0,0 +1,44 @@
# Codex migrations
Codex ships a purpose-built `migrate-cli` binary plus slash commands so every migration follows the same playbook. The CLI manages workspaces under `migrations/migration_<slug>/`, keeps `.codex/migrate/index.json` updated, and prints detailed task briefs that Codex can execute.
## CLI quickstart
> Run the binary directly (`migrate-cli plan ...`) or via Cargo while developing (`cargo run -p codex-cli --bin migrate-cli -- plan ...`).
### `migrate-cli plan "<description>"`
* Creates `migrations/migration_<slug>/` with:
* `plan.md` canonical blueprint.
* `journal.md` running log of progress, hand-offs, and blockers.
* `tasks.json` orchestration metadata and dependencies.
* `runs/` runbooks generated per task when execution starts.
* Seeds a dependency-aware task graph so you can parallelize safely.
* Updates `.codex/migrate/index.json` so dashboards (or other agents) discover the workspace.
Use this command whenever you kick off a new initiative. After it runs, open the repo in Codex and use `/migrate` so the agent runs the same command and fills out `plan.md`/`journal.md` automatically.
### `migrate-cli execute [TASK_ID] [--status <state>] [--note "..."]`
* With no arguments it picks the next runnable task, marks it `running`, prints a task brief (workspace path, plan/journal locations, checkpoints), and drops a runbook under `runs/`.
* Use `--task-id <id> --status done --note "summary"` when you finish so the CLI records the journal entry and advances the graph.
* Use `--status blocked` to flag issues, or pass `--workspace <path>` if you are not working on the most recent migration.
Every invocation refreshes `.codex/migrate/index.json`, so team members and tools always see accurate status.
## Slash commands inside Codex
| Command | Purpose |
| --- | --- |
| `/migrate` | Ask Codex to run `migrate-cli plan` with your description, gather context, and populate `plan.md`/`journal.md`. |
| `/continue-migration` | Ask Codex to run `migrate-cli execute`, accept the next task brief, and push that task forward. |
Because the CLI writes the real artifacts, the slash commands simply queue up the right instructions so the agent runs the tool for you.
## Recommended workflow to share with your team
1. **Plan** Open the repo in Codex and run `/migrate`. Codex will run `migrate-cli plan "<description>"`, scaffold the workspace, and fill in the executive overview plus incremental plan inside `plan.md` and `journal.md`.
2. **Execute** Whenever you want the next piece of work, run `/continue-migration`. Codex runs `migrate-cli execute`, receives the task brief, and uses it (plus repo context) to do the work. When done it should mark the task complete with `migrate-cli execute --task-id <ID> --status done --note "summary"`.
3. **Repeat** Continue using `/continue-migration` to keep the task graph flowing. `tasks.json` and `.codex/migrate/index.json` stay up to date automatically, and `runs/` accumulates runbooks for auditability.
Since everything lives in the repo, you can commit `plan.md`, `journal.md`, `tasks.json`, and `runs/` so asynchronous contributors (human or agent) always have the latest state.

View File

@@ -17,6 +17,8 @@ Control Codexs behavior during an interactive session with slash commands.
| `/review` | review my current changes and find issues |
| `/new` | start a new chat during a conversation |
| `/init` | create an AGENTS.md file with instructions for Codex |
| `/migrate` | ask Codex to run `migrate-cli plan` and populate the migration workspace |
| `/continue-migration` | ask Codex to run `migrate-cli execute` and work the next task |
| `/compact` | summarize conversation to prevent hitting the context limit |
| `/undo` | ask Codex to undo a turn |
| `/diff` | show git diff (including untracked files) |