Compare commits

...

6 Commits

Author SHA1 Message Date
Ahmed Ibrahim
3a585c0b96 codex: fix CI failure on PR #15023
Remove the accidental mcp-core workspace leak from the rollout branch and trim dependencies left behind by the split.

Co-authored-by: Codex <noreply@openai.com>
2026-03-18 05:11:15 +00:00
Ahmed Ibrahim
90ffb3fb4a Split rollout crate out of codex-core
Co-authored-by: Codex <noreply@openai.com>
2026-03-18 04:51:40 +00:00
Ahmed Ibrahim
1cf68f940c codex: fix CI failure on PR #15010
Co-authored-by: Codex <noreply@openai.com>
2026-03-18 04:05:05 +00:00
Ahmed Ibrahim
0f406c3de0 codex: address PR review feedback (#15010)
Co-authored-by: Codex <noreply@openai.com>
2026-03-18 03:49:51 +00:00
Ahmed Ibrahim
8b3fc35e0b fix: unblock config loader split CI
Co-authored-by: Codex <noreply@openai.com>
2026-03-18 03:34:46 +00:00
Ahmed Ibrahim
38a28973a8 refactor: move config loader internals into codex-config
Extract config-layer IO and managed requirements loading into codex-config so codex-core keeps a thinner config loader facade.

Co-authored-by: Codex <noreply@openai.com>
2026-03-18 02:30:22 +00:00
49 changed files with 1608 additions and 1067 deletions

34
codex-rs/Cargo.lock generated
View File

@@ -1433,6 +1433,7 @@ dependencies = [
"codex-otel",
"codex-protocol",
"codex-rmcp-client",
"codex-rollout",
"codex-shell-command",
"codex-state",
"codex-utils-absolute-path",
@@ -1786,10 +1787,12 @@ name = "codex-config"
version = "0.0.0"
dependencies = [
"anyhow",
"base64 0.22.1",
"codex-app-server-protocol",
"codex-execpolicy",
"codex-protocol",
"codex-utils-absolute-path",
"core-foundation 0.9.4",
"futures",
"multimap",
"pretty_assertions",
@@ -1802,6 +1805,7 @@ dependencies = [
"toml 0.9.11+spec-1.1.0",
"toml_edit 0.24.0+spec-1.1.0",
"tracing",
"windows-sys 0.52.0",
]
[[package]]
@@ -1842,7 +1846,6 @@ dependencies = [
"codex-config",
"codex-connectors",
"codex-execpolicy",
"codex-file-search",
"codex-git",
"codex-hooks",
"codex-keyring-store",
@@ -1850,6 +1853,7 @@ dependencies = [
"codex-otel",
"codex-protocol",
"codex-rmcp-client",
"codex-rollout",
"codex-secrets",
"codex-shell-command",
"codex-shell-escalation",
@@ -1866,7 +1870,6 @@ dependencies = [
"codex-utils-stream-parser",
"codex-utils-string",
"codex-windows-sandbox",
"core-foundation 0.9.4",
"core_test_support",
"csv",
"ctor 0.6.3",
@@ -1911,7 +1914,6 @@ dependencies = [
"test-case",
"test-log",
"thiserror 2.0.18",
"time",
"tokio",
"tokio-tungstenite",
"tokio-util",
@@ -1926,7 +1928,6 @@ dependencies = [
"walkdir",
"which",
"wildmatch",
"windows-sys 0.52.0",
"wiremock",
"zip",
"zstd",
@@ -1960,6 +1961,7 @@ dependencies = [
"codex-feedback",
"codex-otel",
"codex-protocol",
"codex-rollout",
"codex-utils-absolute-path",
"codex-utils-cargo-bin",
"codex-utils-cli",
@@ -2378,6 +2380,28 @@ dependencies = [
"which",
]
[[package]]
name = "codex-rollout"
version = "0.0.0"
dependencies = [
"anyhow",
"async-trait",
"chrono",
"codex-file-search",
"codex-otel",
"codex-protocol",
"codex-state",
"codex-utils-absolute-path",
"pretty_assertions",
"serde",
"serde_json",
"tempfile",
"time",
"tokio",
"tracing",
"uuid",
]
[[package]]
name = "codex-secrets"
version = "0.0.0"
@@ -2513,6 +2537,7 @@ dependencies = [
"codex-login",
"codex-otel",
"codex-protocol",
"codex-rollout",
"codex-shell-command",
"codex-state",
"codex-tui-app-server",
@@ -2604,6 +2629,7 @@ dependencies = [
"codex-login",
"codex-otel",
"codex-protocol",
"codex-rollout",
"codex-shell-command",
"codex-state",
"codex-utils-absolute-path",

View File

@@ -41,6 +41,7 @@ members = [
"responses-api-proxy",
"stdio-to-uds",
"otel",
"rollout",
"tui",
"tui_app_server",
"utils/absolute-path",
@@ -120,6 +121,7 @@ codex-ollama = { path = "ollama" }
codex-otel = { path = "otel" }
codex-process-hardening = { path = "process-hardening" }
codex-protocol = { path = "protocol" }
codex-rollout = { path = "rollout" }
codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-rmcp-client = { path = "rmcp-client" }
codex-secrets = { path = "secrets" }

View File

@@ -43,6 +43,7 @@ codex-login = { workspace = true }
codex-protocol = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-feedback = { workspace = true }
codex-rollout = { workspace = true }
codex-rmcp-client = { workspace = true }
codex-state = { workspace = true }
codex-utils-absolute-path = { workspace = true }

View File

@@ -103,7 +103,6 @@ use codex_app_server_protocol::build_turns_from_rollout_items;
use codex_app_server_protocol::convert_patch_changes;
use codex_core::CodexThread;
use codex_core::ThreadManager;
use codex_core::find_thread_name_by_id;
use codex_core::review_format::format_review_findings_block;
use codex_core::review_prompts;
use codex_core::sandboxing::intersect_permission_profiles;
@@ -131,6 +130,7 @@ use codex_protocol::request_permissions::RequestPermissionProfile as CoreRequest
use codex_protocol::request_permissions::RequestPermissionsResponse as CoreRequestPermissionsResponse;
use codex_protocol::request_user_input::RequestUserInputAnswer as CoreRequestUserInputAnswer;
use codex_protocol::request_user_input::RequestUserInputResponse as CoreRequestUserInputResponse;
use codex_rollout::find_thread_name_by_id;
use codex_shell_command::parse_command::shlex_join;
use std::collections::HashMap;
use std::convert::TryFrom;

View File

@@ -178,14 +178,10 @@ use codex_cloud_requirements::cloud_requirements_loader;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::CodexThread;
use codex_core::Cursor as RolloutCursor;
use codex_core::NewThread;
use codex_core::RolloutRecorder;
use codex_core::SessionMeta;
use codex_core::SteerInputError;
use codex_core::ThreadConfigSnapshot;
use codex_core::ThreadManager;
use codex_core::ThreadSortKey as CoreThreadSortKey;
use codex_core::auth::AuthMode as CoreAuthMode;
use codex_core::auth::CLIENT_ID;
use codex_core::auth::login_with_api_key;
@@ -208,17 +204,12 @@ use codex_core::exec_env::create_env;
use codex_core::features::FEATURES;
use codex_core::features::Feature;
use codex_core::features::Stage;
use codex_core::find_archived_thread_path_by_id_str;
use codex_core::find_thread_name_by_id;
use codex_core::find_thread_names_by_ids;
use codex_core::find_thread_path_by_id_str;
use codex_core::git_info::git_diff_to_remote;
use codex_core::mcp::auth::discover_supported_scopes;
use codex_core::mcp::auth::resolve_oauth_scopes;
use codex_core::mcp::collect_mcp_snapshot;
use codex_core::mcp::group_tools_by_server;
use codex_core::models_manager::collaboration_mode_presets::CollaborationModesConfig;
use codex_core::parse_cursor;
use codex_core::plugins::MarketplaceError;
use codex_core::plugins::MarketplacePluginSourceSummary;
use codex_core::plugins::PluginInstallError as CorePluginInstallError;
@@ -226,13 +217,7 @@ use codex_core::plugins::PluginInstallRequest;
use codex_core::plugins::PluginReadRequest;
use codex_core::plugins::PluginUninstallError as CorePluginUninstallError;
use codex_core::plugins::load_plugin_apps;
use codex_core::read_head_for_summary;
use codex_core::read_session_meta_line;
use codex_core::rollout_date_parts;
use codex_core::sandboxing::SandboxPermissions;
use codex_core::state_db::StateDbHandle;
use codex_core::state_db::get_state_db;
use codex_core::state_db::reconcile_rollout;
use codex_core::windows_sandbox::WindowsSandboxLevelExt;
use codex_core::windows_sandbox::WindowsSandboxSetupMode as CoreWindowsSandboxSetupMode;
use codex_core::windows_sandbox::WindowsSandboxSetupRequest;
@@ -270,6 +255,24 @@ use codex_protocol::protocol::W3cTraceContext;
use codex_protocol::user_input::MAX_USER_INPUT_TEXT_CHARS;
use codex_protocol::user_input::UserInput as CoreInputItem;
use codex_rmcp_client::perform_oauth_login_return_url;
use codex_rollout::ARCHIVED_SESSIONS_SUBDIR;
use codex_rollout::Cursor as RolloutCursor;
use codex_rollout::RolloutRecorder;
use codex_rollout::SESSIONS_SUBDIR;
use codex_rollout::SessionMeta;
use codex_rollout::ThreadItem;
use codex_rollout::ThreadSortKey as CoreThreadSortKey;
use codex_rollout::find_archived_thread_path_by_id_str;
use codex_rollout::find_thread_name_by_id;
use codex_rollout::find_thread_names_by_ids;
use codex_rollout::find_thread_path_by_id_str;
use codex_rollout::parse_cursor;
use codex_rollout::read_head_for_summary;
use codex_rollout::read_session_meta_line;
use codex_rollout::rollout_date_parts;
use codex_rollout::state_db::StateDbHandle;
use codex_rollout::state_db::get_state_db;
use codex_rollout::state_db::reconcile_rollout;
use codex_state::StateRuntime;
use codex_state::ThreadMetadataBuilder;
use codex_state::log_db::LogDbLayer;
@@ -2698,10 +2701,7 @@ impl CodexMessageProcessor {
let rollout_path_display = archived_path.display().to_string();
let fallback_provider = self.config.model_provider_id.clone();
let state_db_ctx = get_state_db(&self.config).await;
let archived_folder = self
.config
.codex_home
.join(codex_core::ARCHIVED_SESSIONS_SUBDIR);
let archived_folder = self.config.codex_home.join(ARCHIVED_SESSIONS_SUBDIR);
let result: Result<Thread, JSONRPCErrorError> = async {
let canonical_archived_dir = tokio::fs::canonicalize(&archived_folder).await.map_err(
@@ -2759,7 +2759,7 @@ impl CodexMessageProcessor {
});
};
let sessions_folder = self.config.codex_home.join(codex_core::SESSIONS_SUBDIR);
let sessions_folder = self.config.codex_home.join(SESSIONS_SUBDIR);
let dest_dir = sessions_folder.join(year).join(month).join(day);
let restored_path = dest_dir.join(&file_name);
tokio::fs::create_dir_all(&dest_dir)
@@ -4109,7 +4109,7 @@ impl CodexMessageProcessor {
}
}
GetConversationSummaryParams::ThreadId { conversation_id } => {
match codex_core::find_thread_path_by_id_str(
match find_thread_path_by_id_str(
&self.config.codex_home,
&conversation_id.to_string(),
)
@@ -4961,7 +4961,7 @@ impl CodexMessageProcessor {
rollout_path: &Path,
) -> Result<(), JSONRPCErrorError> {
// Verify rollout_path is under sessions dir.
let rollout_folder = self.config.codex_home.join(codex_core::SESSIONS_SUBDIR);
let rollout_folder = self.config.codex_home.join(SESSIONS_SUBDIR);
let canonical_sessions_dir = match tokio::fs::canonicalize(&rollout_folder).await {
Ok(path) => path,
@@ -5046,10 +5046,7 @@ impl CodexMessageProcessor {
// Move the rollout file to archived.
let result: std::io::Result<()> = async move {
let archive_folder = self
.config
.codex_home
.join(codex_core::ARCHIVED_SESSIONS_SUBDIR);
let archive_folder = self.config.codex_home.join(ARCHIVED_SESSIONS_SUBDIR);
tokio::fs::create_dir_all(&archive_folder).await?;
let archived_path = archive_folder.join(&file_name);
tokio::fs::rename(&canonical_rollout_path, &archived_path).await?;
@@ -7720,7 +7717,7 @@ async fn read_summary_from_state_db_context_by_thread_id(
}
async fn summary_from_thread_list_item(
it: codex_core::ThreadItem,
it: ThreadItem,
fallback_provider: &str,
state_db_ctx: Option<&StateDbHandle>,
) -> Option<ConversationSummary> {

View File

@@ -1,7 +1,7 @@
use codex_app_server_protocol::ThreadSourceKind;
use codex_core::INTERACTIVE_SESSION_SOURCES;
use codex_protocol::protocol::SessionSource as CoreSessionSource;
use codex_protocol::protocol::SubAgentSource as CoreSubAgentSource;
use codex_rollout::INTERACTIVE_SESSION_SOURCES;
pub(crate) fn compute_source_filters(
source_kinds: Option<Vec<ThreadSourceKind>>,

View File

@@ -18,8 +18,8 @@ use codex_app_server_protocol::ThreadUnarchiveResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput;
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
use codex_core::find_thread_path_by_id_str;
use codex_rollout::ARCHIVED_SESSIONS_SUBDIR;
use codex_rollout::find_thread_path_by_id_str;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;

View File

@@ -22,13 +22,13 @@ use codex_app_server_protocol::ThreadStatus;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput;
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
use codex_protocol::ThreadId;
use codex_protocol::protocol::GitInfo as CoreGitInfo;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::RolloutLine;
use codex_protocol::protocol::SessionSource as CoreSessionSource;
use codex_protocol::protocol::SubAgentSource;
use codex_rollout::ARCHIVED_SESSIONS_SUBDIR;
use core_test_support::responses;
use pretty_assertions::assert_eq;
use std::cmp::Reverse;

View File

@@ -18,10 +18,10 @@ use codex_app_server_protocol::ThreadResumeResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadStatus;
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
use codex_core::state_db::reconcile_rollout;
use codex_protocol::ThreadId;
use codex_protocol::protocol::GitInfo as RolloutGitInfo;
use codex_rollout::ARCHIVED_SESSIONS_SUBDIR;
use codex_rollout::state_db::reconcile_rollout;
use codex_state::StateRuntime;
use pretty_assertions::assert_eq;
use serde_json::Value;

View File

@@ -15,8 +15,8 @@ use codex_app_server_protocol::ThreadUnarchivedNotification;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput;
use codex_core::find_archived_thread_path_by_id_str;
use codex_core::find_thread_path_by_id_str;
use codex_rollout::find_archived_thread_path_by_id_str;
use codex_rollout::find_thread_path_by_id_str;
use pretty_assertions::assert_eq;
use serde_json::Value;
use std::fs::FileTimes;

View File

@@ -4,10 +4,14 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[lib]
doctest = false
[lints]
workspace = true
[dependencies]
base64 = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-execpolicy = { workspace = true }
codex-protocol = { workspace = true }
@@ -24,6 +28,16 @@ toml = { workspace = true }
toml_edit = { workspace = true }
tracing = { workspace = true }
[target.'cfg(target_os = "macos")'.dependencies]
core-foundation = "0.9"
[target.'cfg(target_os = "windows")'.dependencies]
windows-sys = { version = "0.52", features = [
"Win32_Foundation",
"Win32_System_Com",
"Win32_UI_Shell",
] }
[dev-dependencies]
anyhow = { workspace = true }
pretty_assertions = { workspace = true }

View File

@@ -1,10 +1,10 @@
use super::LoaderOverrides;
use crate::LoaderOverrides;
use crate::config_error_from_toml;
use crate::io_error_from_config_error;
#[cfg(target_os = "macos")]
use super::macos::ManagedAdminConfigLayer;
use crate::macos::ManagedAdminConfigLayer;
#[cfg(target_os = "macos")]
use super::macos::load_managed_admin_config_layer;
use codex_config::config_error_from_toml;
use codex_config::io_error_from_config_error;
use crate::macos::load_managed_admin_config_layer;
use codex_utils_absolute_path::AbsolutePathBuf;
use std::io;
use std::path::Path;
@@ -16,26 +16,26 @@ use toml::Value as TomlValue;
const CODEX_MANAGED_CONFIG_SYSTEM_PATH: &str = "/etc/codex/managed_config.toml";
#[derive(Debug, Clone)]
pub(super) struct MangedConfigFromFile {
pub struct ManagedConfigFromFile {
pub managed_config: TomlValue,
pub file: AbsolutePathBuf,
}
#[derive(Debug, Clone)]
pub(super) struct ManagedConfigFromMdm {
pub struct ManagedConfigFromMdm {
pub managed_config: TomlValue,
pub raw_toml: String,
}
#[derive(Debug, Clone)]
pub(super) struct LoadedConfigLayers {
pub struct LoadedConfigLayers {
/// If present, data read from a file such as `/etc/codex/managed_config.toml`.
pub managed_config: Option<MangedConfigFromFile>,
pub managed_config: Option<ManagedConfigFromFile>,
/// If present, data read from managed preferences (macOS only).
pub managed_config_from_mdm: Option<ManagedConfigFromMdm>,
}
pub(super) async fn load_config_layers_internal(
pub async fn load_config_layers_internal(
codex_home: &Path,
overrides: LoaderOverrides,
) -> io::Result<LoadedConfigLayers> {
@@ -59,7 +59,7 @@ pub(super) async fn load_config_layers_internal(
let managed_config =
read_config_from_path(&managed_config_path, /*log_missing_as_info*/ false)
.await?
.map(|managed_config| MangedConfigFromFile {
.map(|managed_config| ManagedConfigFromFile {
managed_config,
file: managed_config_path.clone(),
});
@@ -88,7 +88,7 @@ fn map_managed_admin_layer(layer: ManagedAdminConfigLayer) -> ManagedConfigFromM
}
}
pub(super) async fn read_config_from_path(
async fn read_config_from_path(
path: impl AsRef<Path>,
log_missing_as_info: bool,
) -> io::Result<Option<TomlValue>> {
@@ -120,8 +120,7 @@ pub(super) async fn read_config_from_path(
}
}
/// Return the default managed config path.
pub(super) fn managed_config_default_path(codex_home: &Path) -> PathBuf {
fn managed_config_default_path(codex_home: &Path) -> PathBuf {
#[cfg(unix)]
{
let _ = codex_home;

View File

@@ -3,6 +3,10 @@ mod config_requirements;
mod constraint;
mod diagnostics;
mod fingerprint;
mod layer_io;
mod loader;
#[cfg(target_os = "macos")]
mod macos;
mod merge;
mod overrides;
mod requirements_exec_policy;
@@ -44,6 +48,15 @@ pub use diagnostics::format_config_error;
pub use diagnostics::format_config_error_with_source;
pub use diagnostics::io_error_from_config_error;
pub use fingerprint::version_for_toml;
pub use layer_io::LoadedConfigLayers;
pub use layer_io::ManagedConfigFromFile;
pub use layer_io::ManagedConfigFromMdm;
pub use layer_io::load_config_layers_internal;
pub use loader::load_managed_admin_requirements;
pub use loader::load_requirements_from_legacy_scheme;
pub use loader::load_requirements_toml;
pub use loader::system_config_toml_file;
pub use loader::system_requirements_toml_file;
pub use merge::merge_toml_values;
pub use overrides::build_cli_overrides_layer;
pub use requirements_exec_policy::RequirementsExecPolicy;

View File

@@ -0,0 +1,236 @@
use crate::ConfigRequirementsToml;
use crate::ConfigRequirementsWithSources;
use crate::LoadedConfigLayers;
use crate::RequirementSource;
#[cfg(target_os = "macos")]
use crate::macos::load_managed_admin_requirements_toml;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::protocol::AskForApproval;
use codex_utils_absolute_path::AbsolutePathBuf;
use serde::Deserialize;
use std::io;
use std::path::Path;
#[cfg(windows)]
use std::path::PathBuf;
#[cfg(unix)]
pub const SYSTEM_CONFIG_TOML_FILE_UNIX: &str = "/etc/codex/config.toml";
#[cfg(windows)]
const DEFAULT_PROGRAM_DATA_DIR_WINDOWS: &str = r"C:\ProgramData";
pub async fn load_requirements_toml(
config_requirements_toml: &mut ConfigRequirementsWithSources,
requirements_toml_file: impl AsRef<Path>,
) -> io::Result<()> {
let requirements_toml_file =
AbsolutePathBuf::from_absolute_path(requirements_toml_file.as_ref())?;
match tokio::fs::read_to_string(&requirements_toml_file).await {
Ok(contents) => {
let requirements_config: ConfigRequirementsToml =
toml::from_str(&contents).map_err(|err| {
io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Error parsing requirements file {}: {err}",
requirements_toml_file.as_ref().display(),
),
)
})?;
config_requirements_toml.merge_unset_fields(
RequirementSource::SystemRequirementsToml {
file: requirements_toml_file.clone(),
},
requirements_config,
);
}
Err(err) if err.kind() == io::ErrorKind::NotFound => {}
Err(err) => {
return Err(io::Error::new(
err.kind(),
format!(
"Failed to read requirements file {}: {err}",
requirements_toml_file.as_ref().display(),
),
));
}
}
Ok(())
}
pub async fn load_managed_admin_requirements(
config_requirements_toml: &mut ConfigRequirementsWithSources,
managed_config_requirements_base64: Option<&str>,
) -> io::Result<()> {
#[cfg(target_os = "macos")]
{
load_managed_admin_requirements_toml(
config_requirements_toml,
managed_config_requirements_base64,
)
.await
}
#[cfg(not(target_os = "macos"))]
{
let _ = config_requirements_toml;
let _ = managed_config_requirements_base64;
Ok(())
}
}
#[cfg(unix)]
pub fn system_requirements_toml_file() -> io::Result<AbsolutePathBuf> {
AbsolutePathBuf::from_absolute_path(Path::new("/etc/codex/requirements.toml"))
}
#[cfg(windows)]
pub fn system_requirements_toml_file() -> io::Result<AbsolutePathBuf> {
windows_system_requirements_toml_file()
}
#[cfg(unix)]
pub fn system_config_toml_file() -> io::Result<AbsolutePathBuf> {
AbsolutePathBuf::from_absolute_path(Path::new(SYSTEM_CONFIG_TOML_FILE_UNIX))
}
#[cfg(windows)]
pub fn system_config_toml_file() -> io::Result<AbsolutePathBuf> {
windows_system_config_toml_file()
}
#[cfg(windows)]
fn windows_codex_system_dir() -> PathBuf {
let program_data = windows_program_data_dir_from_known_folder().unwrap_or_else(|err| {
tracing::warn!(
error = %err,
"Failed to resolve ProgramData known folder; using default path"
);
PathBuf::from(DEFAULT_PROGRAM_DATA_DIR_WINDOWS)
});
program_data.join("OpenAI").join("Codex")
}
#[cfg(windows)]
fn windows_system_requirements_toml_file() -> io::Result<AbsolutePathBuf> {
let requirements_toml_file = windows_codex_system_dir().join("requirements.toml");
AbsolutePathBuf::try_from(requirements_toml_file)
}
#[cfg(windows)]
fn windows_system_config_toml_file() -> io::Result<AbsolutePathBuf> {
let config_toml_file = windows_codex_system_dir().join("config.toml");
AbsolutePathBuf::try_from(config_toml_file)
}
#[cfg(windows)]
fn windows_program_data_dir_from_known_folder() -> io::Result<PathBuf> {
use std::ffi::OsString;
use std::os::windows::ffi::OsStringExt;
use windows_sys::Win32::System::Com::CoTaskMemFree;
use windows_sys::Win32::UI::Shell::FOLDERID_ProgramData;
use windows_sys::Win32::UI::Shell::KF_FLAG_DEFAULT;
use windows_sys::Win32::UI::Shell::SHGetKnownFolderPath;
let mut path_ptr = std::ptr::null_mut::<u16>();
let known_folder_flags = u32::try_from(KF_FLAG_DEFAULT).map_err(|_| {
io::Error::other(format!(
"KF_FLAG_DEFAULT did not fit in u32: {KF_FLAG_DEFAULT}"
))
})?;
let hr = unsafe {
SHGetKnownFolderPath(&FOLDERID_ProgramData, known_folder_flags, 0, &mut path_ptr)
};
if hr != 0 {
return Err(io::Error::other(format!(
"SHGetKnownFolderPath(FOLDERID_ProgramData) failed with HRESULT {hr:#010x}"
)));
}
if path_ptr.is_null() {
return Err(io::Error::other(
"SHGetKnownFolderPath(FOLDERID_ProgramData) returned a null pointer",
));
}
let path = unsafe {
let mut len = 0usize;
while *path_ptr.add(len) != 0 {
len += 1;
}
let wide = std::slice::from_raw_parts(path_ptr, len);
let path = PathBuf::from(OsString::from_wide(wide));
CoTaskMemFree(path_ptr.cast());
path
};
Ok(path)
}
pub async fn load_requirements_from_legacy_scheme(
config_requirements_toml: &mut ConfigRequirementsWithSources,
loaded_config_layers: LoadedConfigLayers,
) -> io::Result<()> {
let LoadedConfigLayers {
managed_config,
managed_config_from_mdm,
} = loaded_config_layers;
for (source, config) in managed_config_from_mdm
.map(|config| {
(
RequirementSource::LegacyManagedConfigTomlFromMdm,
config.managed_config,
)
})
.into_iter()
.chain(managed_config.map(|config| {
(
RequirementSource::LegacyManagedConfigTomlFromFile { file: config.file },
config.managed_config,
)
}))
{
let legacy_config: LegacyManagedConfigToml =
config.try_into().map_err(|err: toml::de::Error| {
io::Error::new(
io::ErrorKind::InvalidData,
format!("Failed to parse config requirements as TOML: {err}"),
)
})?;
let requirements = ConfigRequirementsToml::from(legacy_config);
config_requirements_toml.merge_unset_fields(source, requirements);
}
Ok(())
}
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
struct LegacyManagedConfigToml {
approval_policy: Option<AskForApproval>,
sandbox_mode: Option<SandboxMode>,
}
impl From<LegacyManagedConfigToml> for ConfigRequirementsToml {
fn from(legacy: LegacyManagedConfigToml) -> Self {
let mut config_requirements_toml = ConfigRequirementsToml::default();
let LegacyManagedConfigToml {
approval_policy,
sandbox_mode,
} = legacy;
if let Some(approval_policy) = approval_policy {
config_requirements_toml.allowed_approval_policies = Some(vec![approval_policy]);
}
if let Some(sandbox_mode) = sandbox_mode {
let required_mode = sandbox_mode.into();
let mut allowed_modes = vec![crate::SandboxModeRequirement::ReadOnly];
if required_mode != crate::SandboxModeRequirement::ReadOnly {
allowed_modes.push(required_mode);
}
config_requirements_toml.allowed_sandbox_modes = Some(allowed_modes);
}
config_requirements_toml
}
}

View File

@@ -1,6 +1,6 @@
use super::ConfigRequirementsToml;
use super::ConfigRequirementsWithSources;
use super::RequirementSource;
use crate::ConfigRequirementsToml;
use crate::ConfigRequirementsWithSources;
use crate::RequirementSource;
use base64::Engine;
use base64::prelude::BASE64_STANDARD;
use core_foundation::base::TCFType;
@@ -16,19 +16,19 @@ const MANAGED_PREFERENCES_CONFIG_KEY: &str = "config_toml_base64";
const MANAGED_PREFERENCES_REQUIREMENTS_KEY: &str = "requirements_toml_base64";
#[derive(Debug, Clone)]
pub(super) struct ManagedAdminConfigLayer {
pub struct ManagedAdminConfigLayer {
pub config: TomlValue,
pub raw_toml: String,
}
pub(super) fn managed_preferences_requirements_source() -> RequirementSource {
fn managed_preferences_requirements_source() -> RequirementSource {
RequirementSource::MdmManagedPreferences {
domain: MANAGED_PREFERENCES_APPLICATION_ID.to_string(),
key: MANAGED_PREFERENCES_REQUIREMENTS_KEY.to_string(),
}
}
pub(crate) async fn load_managed_admin_config_layer(
pub async fn load_managed_admin_config_layer(
override_base64: Option<&str>,
) -> io::Result<Option<ManagedAdminConfigLayer>> {
if let Some(encoded) = override_base64 {
@@ -61,7 +61,7 @@ fn load_managed_admin_config() -> io::Result<Option<ManagedAdminConfigLayer>> {
.transpose()
}
pub(crate) async fn load_managed_admin_requirements_toml(
pub async fn load_managed_admin_requirements_toml(
target: &mut ConfigRequirementsWithSources,
override_base64: Option<&str>,
) -> io::Result<()> {

View File

@@ -37,7 +37,6 @@ codex-config = { workspace = true }
codex-shell-command = { workspace = true }
codex-skills = { workspace = true }
codex-execpolicy = { workspace = true }
codex-file-search = { workspace = true }
codex-git = { workspace = true }
codex-hooks = { workspace = true }
codex-keyring-store = { workspace = true }
@@ -45,6 +44,7 @@ codex-network-proxy = { workspace = true }
codex-otel = { workspace = true }
codex-artifacts = { workspace = true }
codex-protocol = { workspace = true }
codex-rollout = { workspace = true }
codex-rmcp-client = { workspace = true }
codex-state = { workspace = true }
codex-utils-absolute-path = { workspace = true }
@@ -93,12 +93,6 @@ similar = { workspace = true }
tempfile = { workspace = true }
test-log = { workspace = true }
thiserror = { workspace = true }
time = { workspace = true, features = [
"formatting",
"parsing",
"local-offset",
"macros",
] }
tokio = { workspace = true, features = [
"io-std",
"macros",
@@ -123,7 +117,6 @@ landlock = { workspace = true }
seccompiler = { workspace = true }
[target.'cfg(target_os = "macos")'.dependencies]
core-foundation = "0.9"
keyring = { workspace = true, features = ["apple-native"] }
# Build OpenSSL from source for musl builds.
@@ -136,11 +129,6 @@ openssl-sys = { workspace = true, features = ["vendored"] }
[target.'cfg(target_os = "windows")'.dependencies]
keyring = { workspace = true, features = ["windows-native"] }
windows-sys = { version = "0.52", features = [
"Win32_Foundation",
"Win32_System_Com",
"Win32_UI_Shell",
] }
[target.'cfg(any(target_os = "freebsd", target_os = "openbsd"))'.dependencies]
keyring = { workspace = true, features = ["sync-secret-service"] }

View File

@@ -41,9 +41,9 @@ use crate::protocol::TokenUsageInfo;
use crate::protocol::TurnCompleteEvent;
use crate::protocol::TurnStartedEvent;
use crate::protocol::UserMessageEvent;
use crate::rollout::RolloutRecorder;
use crate::rollout::RolloutRecorderParams;
use crate::rollout::policy::EventPersistenceMode;
use crate::rollout::recorder::RolloutRecorder;
use crate::rollout::recorder::RolloutRecorderParams;
use crate::state::TaskKind;
use crate::tasks::SessionTask;
use crate::tasks::SessionTaskContext;

View File

@@ -1,27 +1,18 @@
mod layer_io;
#[cfg(target_os = "macos")]
mod macos;
#[cfg(test)]
mod tests;
use crate::config::ConfigToml;
use crate::config_loader::layer_io::LoadedConfigLayers;
use crate::git_info::resolve_root_git_project_for_trust;
use codex_app_server_protocol::ConfigLayerSource;
use codex_config::CONFIG_TOML_FILE;
use codex_config::ConfigRequirementsWithSources;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::protocol::AskForApproval;
use codex_utils_absolute_path::AbsolutePathBuf;
use codex_utils_absolute_path::AbsolutePathBufGuard;
use dunce::canonicalize as normalize_path;
use serde::Deserialize;
use std::io;
use std::path::Path;
#[cfg(windows)]
use std::path::PathBuf;
use toml::Value as TomlValue;
pub use codex_config::AppRequirementToml;
@@ -38,6 +29,7 @@ pub use codex_config::ConfigRequirements;
pub use codex_config::ConfigRequirementsToml;
pub use codex_config::ConstrainedWithSource;
pub use codex_config::FeatureRequirementsToml;
use codex_config::LoadedConfigLayers;
pub use codex_config::LoaderOverrides;
pub use codex_config::McpServerIdentity;
pub use codex_config::McpServerRequirement;
@@ -55,18 +47,16 @@ pub(crate) use codex_config::config_error_from_toml;
pub use codex_config::format_config_error;
pub use codex_config::format_config_error_with_source;
pub(crate) use codex_config::io_error_from_config_error;
use codex_config::load_config_layers_internal;
use codex_config::load_managed_admin_requirements;
use codex_config::load_requirements_from_legacy_scheme;
pub(crate) use codex_config::load_requirements_toml;
pub use codex_config::merge_toml_values;
use codex_config::system_config_toml_file;
use codex_config::system_requirements_toml_file;
#[cfg(test)]
pub(crate) use codex_config::version_for_toml;
/// On Unix systems, load default settings from this file path, if present.
/// Note that /etc/codex/ is treated as a "config folder," so subfolders such
/// as skills/ and rules/ will also be honored.
pub const SYSTEM_CONFIG_TOML_FILE_UNIX: &str = "/etc/codex/config.toml";
#[cfg(windows)]
const DEFAULT_PROGRAM_DATA_DIR_WINDOWS: &str = r"C:\ProgramData";
const DEFAULT_PROJECT_ROOT_MARKERS: &[&str] = &[".git"];
pub(crate) async fn first_layer_config_error(layers: &ConfigLayerStack) -> Option<ConfigError> {
@@ -125,8 +115,7 @@ pub async fn load_config_layers_state(
.merge_unset_fields(RequirementSource::CloudRequirements, requirements);
}
#[cfg(target_os = "macos")]
macos::load_managed_admin_requirements_toml(
load_managed_admin_requirements(
&mut config_requirements_toml,
overrides
.macos_managed_config_requirements_base64
@@ -140,7 +129,7 @@ pub async fn load_config_layers_state(
// Make a best-effort to support the legacy `managed_config.toml` as a
// requirements specification.
let loaded_config_layers = layer_io::load_config_layers_internal(codex_home, overrides).await?;
let loaded_config_layers = load_config_layers_internal(codex_home, overrides).await?;
load_requirements_from_legacy_scheme(
&mut config_requirements_toml,
loaded_config_layers.clone(),
@@ -343,185 +332,6 @@ async fn load_config_toml_for_required_layer(
Ok(create_entry(toml_value))
}
/// If available, apply requirements from the platform system
/// `requirements.toml` location to `config_requirements_toml` by filling in
/// any unset fields.
async fn load_requirements_toml(
config_requirements_toml: &mut ConfigRequirementsWithSources,
requirements_toml_file: impl AsRef<Path>,
) -> io::Result<()> {
let requirements_toml_file =
AbsolutePathBuf::from_absolute_path(requirements_toml_file.as_ref())?;
match tokio::fs::read_to_string(&requirements_toml_file).await {
Ok(contents) => {
let requirements_config: ConfigRequirementsToml =
toml::from_str(&contents).map_err(|e| {
io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Error parsing requirements file {}: {e}",
requirements_toml_file.as_ref().display(),
),
)
})?;
config_requirements_toml.merge_unset_fields(
RequirementSource::SystemRequirementsToml {
file: requirements_toml_file.clone(),
},
requirements_config,
);
}
Err(e) => {
if e.kind() != io::ErrorKind::NotFound {
return Err(io::Error::new(
e.kind(),
format!(
"Failed to read requirements file {}: {e}",
requirements_toml_file.as_ref().display(),
),
));
}
}
}
Ok(())
}
#[cfg(unix)]
fn system_requirements_toml_file() -> io::Result<AbsolutePathBuf> {
AbsolutePathBuf::from_absolute_path(Path::new("/etc/codex/requirements.toml"))
}
#[cfg(windows)]
fn system_requirements_toml_file() -> io::Result<AbsolutePathBuf> {
windows_system_requirements_toml_file()
}
#[cfg(unix)]
fn system_config_toml_file() -> io::Result<AbsolutePathBuf> {
AbsolutePathBuf::from_absolute_path(Path::new(SYSTEM_CONFIG_TOML_FILE_UNIX))
}
#[cfg(windows)]
fn system_config_toml_file() -> io::Result<AbsolutePathBuf> {
windows_system_config_toml_file()
}
#[cfg(windows)]
fn windows_codex_system_dir() -> PathBuf {
let program_data = windows_program_data_dir_from_known_folder().unwrap_or_else(|err| {
tracing::warn!(
error = %err,
"Failed to resolve ProgramData known folder; using default path"
);
PathBuf::from(DEFAULT_PROGRAM_DATA_DIR_WINDOWS)
});
program_data.join("OpenAI").join("Codex")
}
#[cfg(windows)]
fn windows_system_requirements_toml_file() -> io::Result<AbsolutePathBuf> {
let requirements_toml_file = windows_codex_system_dir().join("requirements.toml");
AbsolutePathBuf::try_from(requirements_toml_file)
}
#[cfg(windows)]
fn windows_system_config_toml_file() -> io::Result<AbsolutePathBuf> {
let config_toml_file = windows_codex_system_dir().join("config.toml");
AbsolutePathBuf::try_from(config_toml_file)
}
#[cfg(windows)]
fn windows_program_data_dir_from_known_folder() -> io::Result<PathBuf> {
use std::ffi::OsString;
use std::os::windows::ffi::OsStringExt;
use windows_sys::Win32::System::Com::CoTaskMemFree;
use windows_sys::Win32::UI::Shell::FOLDERID_ProgramData;
use windows_sys::Win32::UI::Shell::KF_FLAG_DEFAULT;
use windows_sys::Win32::UI::Shell::SHGetKnownFolderPath;
let mut path_ptr = std::ptr::null_mut::<u16>();
let known_folder_flags = u32::try_from(KF_FLAG_DEFAULT).map_err(|_| {
io::Error::other(format!(
"KF_FLAG_DEFAULT did not fit in u32: {KF_FLAG_DEFAULT}"
))
})?;
// Known folder IDs reference:
// https://learn.microsoft.com/en-us/windows/win32/shell/knownfolderid
// SAFETY: SHGetKnownFolderPath initializes path_ptr with a CoTaskMem-allocated,
// null-terminated UTF-16 string on success.
let hr = unsafe {
SHGetKnownFolderPath(&FOLDERID_ProgramData, known_folder_flags, 0, &mut path_ptr)
};
if hr != 0 {
return Err(io::Error::other(format!(
"SHGetKnownFolderPath(FOLDERID_ProgramData) failed with HRESULT {hr:#010x}"
)));
}
if path_ptr.is_null() {
return Err(io::Error::other(
"SHGetKnownFolderPath(FOLDERID_ProgramData) returned a null pointer",
));
}
// SAFETY: path_ptr is a valid null-terminated UTF-16 string allocated by
// SHGetKnownFolderPath and must be freed with CoTaskMemFree.
let path = unsafe {
let mut len = 0usize;
while *path_ptr.add(len) != 0 {
len += 1;
}
let wide = std::slice::from_raw_parts(path_ptr, len);
let path = PathBuf::from(OsString::from_wide(wide));
CoTaskMemFree(path_ptr.cast());
path
};
Ok(path)
}
async fn load_requirements_from_legacy_scheme(
config_requirements_toml: &mut ConfigRequirementsWithSources,
loaded_config_layers: LoadedConfigLayers,
) -> io::Result<()> {
// In this implementation, earlier layers cannot be overwritten by later
// layers, so list managed_config_from_mdm first because it has the highest
// precedence.
let LoadedConfigLayers {
managed_config,
managed_config_from_mdm,
} = loaded_config_layers;
for (source, config) in managed_config_from_mdm
.map(|config| {
(
RequirementSource::LegacyManagedConfigTomlFromMdm,
config.managed_config,
)
})
.into_iter()
.chain(managed_config.map(|c| {
(
RequirementSource::LegacyManagedConfigTomlFromFile { file: c.file },
c.managed_config,
)
}))
{
let legacy_config: LegacyManagedConfigToml =
config.try_into().map_err(|err: toml::de::Error| {
io::Error::new(
io::ErrorKind::InvalidData,
format!("Failed to parse config requirements as TOML: {err}"),
)
})?;
let new_requirements_toml = ConfigRequirementsToml::from(legacy_config);
config_requirements_toml.merge_unset_fields(source, new_requirements_toml);
}
Ok(())
}
/// Reads `project_root_markers` from the [toml::Value] produced by merging
/// `config.toml` from the config layers in the stack preceding
/// [ConfigLayerSource::Project].
@@ -895,51 +705,12 @@ async fn load_project_layers(
Ok(layers)
}
/// The legacy mechanism for specifying admin-enforced configuration is to read
/// from a file like `/etc/codex/managed_config.toml` that has the same
/// structure as `config.toml` where fields like `approval_policy` can specify
/// exactly one value rather than a list of allowed values.
///
/// If present, re-interpret `managed_config.toml` as a `requirements.toml`
/// where each specified field is treated as a constraint allowing only that
/// value.
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
struct LegacyManagedConfigToml {
approval_policy: Option<AskForApproval>,
sandbox_mode: Option<SandboxMode>,
}
impl From<LegacyManagedConfigToml> for ConfigRequirementsToml {
fn from(legacy: LegacyManagedConfigToml) -> Self {
let mut config_requirements_toml = ConfigRequirementsToml::default();
let LegacyManagedConfigToml {
approval_policy,
sandbox_mode,
} = legacy;
if let Some(approval_policy) = approval_policy {
config_requirements_toml.allowed_approval_policies = Some(vec![approval_policy]);
}
if let Some(sandbox_mode) = sandbox_mode {
let required_mode: SandboxModeRequirement = sandbox_mode.into();
// Allowing read-only is a requirement for Codex to function correctly.
// So in this backfill path, we append read-only if it's not already specified.
let mut allowed_modes = vec![SandboxModeRequirement::ReadOnly];
if required_mode != SandboxModeRequirement::ReadOnly {
allowed_modes.push(required_mode);
}
config_requirements_toml.allowed_sandbox_modes = Some(allowed_modes);
}
config_requirements_toml
}
}
// Cannot name this `mod tests` because of tests.rs in this folder.
#[cfg(test)]
mod unit_tests {
use super::*;
#[cfg(windows)]
use std::path::Path;
use codex_config::ManagedConfigFromFile;
use codex_protocol::protocol::SandboxPolicy;
use tempfile::tempdir;
#[test]
@@ -979,65 +750,81 @@ foo = "xyzzy"
Ok(())
}
#[test]
fn legacy_managed_config_backfill_includes_read_only_sandbox_mode() {
let legacy = LegacyManagedConfigToml {
approval_policy: None,
sandbox_mode: Some(SandboxMode::WorkspaceWrite),
#[tokio::test]
async fn legacy_managed_config_backfill_includes_read_only_sandbox_mode() {
let tmp = tempdir().expect("tempdir");
let managed_path = AbsolutePathBuf::try_from(tmp.path().join("managed_config.toml"))
.expect("managed path");
let loaded_layers = LoadedConfigLayers {
managed_config: Some(ManagedConfigFromFile {
managed_config: toml::toml! {
sandbox_mode = "workspace-write"
}
.into(),
file: managed_path.clone(),
}),
managed_config_from_mdm: None,
};
let requirements = ConfigRequirementsToml::from(legacy);
let mut requirements_with_sources = ConfigRequirementsWithSources::default();
load_requirements_from_legacy_scheme(&mut requirements_with_sources, loaded_layers)
.await
.expect("load legacy requirements");
let requirements: ConfigRequirements = requirements_with_sources
.try_into()
.expect("requirements parse");
assert_eq!(
requirements.allowed_sandbox_modes,
Some(vec![
SandboxModeRequirement::ReadOnly,
SandboxModeRequirement::WorkspaceWrite
])
requirements.sandbox_policy.get(),
&SandboxPolicy::new_read_only_policy()
);
assert!(
requirements
.sandbox_policy
.can_set(&SandboxPolicy::new_workspace_write_policy())
.is_ok()
);
assert_eq!(
requirements
.sandbox_policy
.can_set(&SandboxPolicy::DangerFullAccess),
Err(codex_config::ConstraintError::InvalidValue {
field_name: "sandbox_mode",
candidate: "DangerFullAccess".into(),
allowed: "[ReadOnly, WorkspaceWrite]".into(),
requirement_source: RequirementSource::LegacyManagedConfigTomlFromFile {
file: managed_path,
},
})
);
}
#[cfg(windows)]
#[test]
fn windows_system_requirements_toml_file_uses_expected_suffix() {
let expected = windows_program_data_dir_from_known_folder()
.unwrap_or_else(|_| PathBuf::from(DEFAULT_PROGRAM_DATA_DIR_WINDOWS))
.join("OpenAI")
.join("Codex")
.join("requirements.toml");
assert_eq!(
windows_system_requirements_toml_file()
.expect("requirements.toml path")
.as_path(),
expected.as_path()
);
assert!(
windows_system_requirements_toml_file()
system_requirements_toml_file()
.expect("requirements.toml path")
.as_path()
.ends_with(Path::new("OpenAI").join("Codex").join("requirements.toml"))
.ends_with(
std::path::Path::new("OpenAI")
.join("Codex")
.join("requirements.toml")
)
);
}
#[cfg(windows)]
#[test]
fn windows_system_config_toml_file_uses_expected_suffix() {
let expected = windows_program_data_dir_from_known_folder()
.unwrap_or_else(|_| PathBuf::from(DEFAULT_PROGRAM_DATA_DIR_WINDOWS))
.join("OpenAI")
.join("Codex")
.join("config.toml");
assert_eq!(
windows_system_config_toml_file()
.expect("config.toml path")
.as_path(),
expected.as_path()
);
assert!(
windows_system_config_toml_file()
system_config_toml_file()
.expect("config.toml path")
.as_path()
.ends_with(Path::new("OpenAI").join("Codex").join("config.toml"))
.ends_with(
std::path::Path::new("OpenAI")
.join("Codex")
.join("config.toml")
)
);
}
}

View File

@@ -33,7 +33,7 @@ use crate::config::types::McpServerConfig;
use crate::features::Feature;
use crate::model_provider_info::ModelProviderInfo;
use crate::protocol::SandboxPolicy;
use crate::rollout::recorder::RolloutRecorder;
use crate::rollout::RolloutRecorder;
use super::GUARDIAN_REVIEW_TIMEOUT;
use super::GUARDIAN_REVIEWER_NAME;

View File

@@ -0,0 +1,53 @@
use crate::config::Config;
pub use codex_rollout::ARCHIVED_SESSIONS_SUBDIR;
pub use codex_rollout::INTERACTIVE_SESSION_SOURCES;
pub use codex_rollout::RolloutRecorder;
pub use codex_rollout::RolloutRecorderParams;
pub use codex_rollout::SESSIONS_SUBDIR;
pub use codex_rollout::SessionMeta;
pub use codex_rollout::append_thread_name;
pub use codex_rollout::find_archived_thread_path_by_id_str;
#[allow(deprecated)]
pub use codex_rollout::find_conversation_path_by_id_str;
pub use codex_rollout::find_thread_name_by_id;
pub use codex_rollout::find_thread_path_by_id_str;
pub use codex_rollout::find_thread_path_by_name_str;
pub use codex_rollout::list;
pub use codex_rollout::metadata;
pub use codex_rollout::policy;
pub use codex_rollout::rollout_date_parts;
pub use codex_rollout::session_index;
mod error;
pub(crate) mod truncation;
pub(crate) use error::map_session_init_error;
impl codex_rollout::StateDbConfig for Config {
fn codex_home(&self) -> &std::path::Path {
self.codex_home.as_path()
}
fn sqlite_home(&self) -> &std::path::Path {
self.sqlite_home.as_path()
}
fn model_provider_id(&self) -> &str {
self.model_provider_id.as_str()
}
}
impl codex_rollout::RolloutConfig for Config {
fn cwd(&self) -> &std::path::Path {
self.cwd.as_path()
}
fn generate_memories(&self) -> bool {
self.memories.generate_memories
}
fn originator(&self) -> String {
crate::default_client::originator().value
}
}

View File

@@ -1,547 +1 @@
use crate::config::Config;
use crate::path_utils::normalize_for_path_comparison;
use crate::rollout::list::Cursor;
use crate::rollout::list::ThreadSortKey;
use crate::rollout::metadata;
use chrono::DateTime;
use chrono::NaiveDateTime;
use chrono::Timelike;
use chrono::Utc;
use codex_protocol::ThreadId;
use codex_protocol::dynamic_tools::DynamicToolSpec;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::SessionSource;
pub use codex_state::LogEntry;
use codex_state::ThreadMetadataBuilder;
use serde_json::Value;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use tracing::warn;
use uuid::Uuid;
/// Core-facing handle to the SQLite-backed state runtime.
pub type StateDbHandle = Arc<codex_state::StateRuntime>;
/// Initialize the state runtime for thread state persistence and backfill checks. To only be used
/// inside `core`. The initialization should not be done anywhere else.
pub(crate) async fn init(config: &Config) -> Option<StateDbHandle> {
let runtime = match codex_state::StateRuntime::init(
config.sqlite_home.clone(),
config.model_provider_id.clone(),
)
.await
{
Ok(runtime) => runtime,
Err(err) => {
warn!(
"failed to initialize state runtime at {}: {err}",
config.sqlite_home.display()
);
return None;
}
};
let backfill_state = match runtime.get_backfill_state().await {
Ok(state) => state,
Err(err) => {
warn!(
"failed to read backfill state at {}: {err}",
config.codex_home.display()
);
return None;
}
};
if backfill_state.status != codex_state::BackfillStatus::Complete {
let runtime_for_backfill = runtime.clone();
let config = config.clone();
tokio::spawn(async move {
metadata::backfill_sessions(runtime_for_backfill.as_ref(), &config).await;
});
}
Some(runtime)
}
/// Get the DB if the feature is enabled and the DB exists.
pub async fn get_state_db(config: &Config) -> Option<StateDbHandle> {
let state_path = codex_state::state_db_path(config.sqlite_home.as_path());
if !tokio::fs::try_exists(&state_path).await.unwrap_or(false) {
return None;
}
let runtime = codex_state::StateRuntime::init(
config.sqlite_home.clone(),
config.model_provider_id.clone(),
)
.await
.ok()?;
require_backfill_complete(runtime, config.sqlite_home.as_path()).await
}
/// Open the state runtime when the SQLite file exists, without feature gating.
///
/// This is used for parity checks during the SQLite migration phase.
pub async fn open_if_present(codex_home: &Path, default_provider: &str) -> Option<StateDbHandle> {
let db_path = codex_state::state_db_path(codex_home);
if !tokio::fs::try_exists(&db_path).await.unwrap_or(false) {
return None;
}
let runtime =
codex_state::StateRuntime::init(codex_home.to_path_buf(), default_provider.to_string())
.await
.ok()?;
require_backfill_complete(runtime, codex_home).await
}
async fn require_backfill_complete(
runtime: StateDbHandle,
codex_home: &Path,
) -> Option<StateDbHandle> {
match runtime.get_backfill_state().await {
Ok(state) if state.status == codex_state::BackfillStatus::Complete => Some(runtime),
Ok(state) => {
warn!(
"state db backfill not complete at {} (status: {})",
codex_home.display(),
state.status.as_str()
);
None
}
Err(err) => {
warn!(
"failed to read backfill state at {}: {err}",
codex_home.display()
);
None
}
}
}
fn cursor_to_anchor(cursor: Option<&Cursor>) -> Option<codex_state::Anchor> {
let cursor = cursor?;
let value = serde_json::to_value(cursor).ok()?;
let cursor_str = value.as_str()?;
let (ts_str, id_str) = cursor_str.split_once('|')?;
if id_str.contains('|') {
return None;
}
let id = Uuid::parse_str(id_str).ok()?;
let ts = if let Ok(naive) = NaiveDateTime::parse_from_str(ts_str, "%Y-%m-%dT%H-%M-%S") {
DateTime::<Utc>::from_naive_utc_and_offset(naive, Utc)
} else if let Ok(dt) = DateTime::parse_from_rfc3339(ts_str) {
dt.with_timezone(&Utc)
} else {
return None;
}
.with_nanosecond(0)?;
Some(codex_state::Anchor { ts, id })
}
pub(crate) fn normalize_cwd_for_state_db(cwd: &Path) -> PathBuf {
normalize_for_path_comparison(cwd).unwrap_or_else(|_| cwd.to_path_buf())
}
/// List thread ids from SQLite for parity checks without rollout scanning.
#[allow(clippy::too_many_arguments)]
pub async fn list_thread_ids_db(
context: Option<&codex_state::StateRuntime>,
codex_home: &Path,
page_size: usize,
cursor: Option<&Cursor>,
sort_key: ThreadSortKey,
allowed_sources: &[SessionSource],
model_providers: Option<&[String]>,
archived_only: bool,
stage: &str,
) -> Option<Vec<ThreadId>> {
let ctx = context?;
if ctx.codex_home() != codex_home {
warn!(
"state db codex_home mismatch: expected {}, got {}",
ctx.codex_home().display(),
codex_home.display()
);
}
let anchor = cursor_to_anchor(cursor);
let allowed_sources: Vec<String> = allowed_sources
.iter()
.map(|value| match serde_json::to_value(value) {
Ok(Value::String(s)) => s,
Ok(other) => other.to_string(),
Err(_) => String::new(),
})
.collect();
let model_providers = model_providers.map(<[String]>::to_vec);
match ctx
.list_thread_ids(
page_size,
anchor.as_ref(),
match sort_key {
ThreadSortKey::CreatedAt => codex_state::SortKey::CreatedAt,
ThreadSortKey::UpdatedAt => codex_state::SortKey::UpdatedAt,
},
allowed_sources.as_slice(),
model_providers.as_deref(),
archived_only,
)
.await
{
Ok(ids) => Some(ids),
Err(err) => {
warn!("state db list_thread_ids failed during {stage}: {err}");
None
}
}
}
/// List thread metadata from SQLite without rollout directory traversal.
#[allow(clippy::too_many_arguments)]
pub async fn list_threads_db(
context: Option<&codex_state::StateRuntime>,
codex_home: &Path,
page_size: usize,
cursor: Option<&Cursor>,
sort_key: ThreadSortKey,
allowed_sources: &[SessionSource],
model_providers: Option<&[String]>,
archived: bool,
search_term: Option<&str>,
) -> Option<codex_state::ThreadsPage> {
let ctx = context?;
if ctx.codex_home() != codex_home {
warn!(
"state db codex_home mismatch: expected {}, got {}",
ctx.codex_home().display(),
codex_home.display()
);
}
let anchor = cursor_to_anchor(cursor);
let allowed_sources: Vec<String> = allowed_sources
.iter()
.map(|value| match serde_json::to_value(value) {
Ok(Value::String(s)) => s,
Ok(other) => other.to_string(),
Err(_) => String::new(),
})
.collect();
let model_providers = model_providers.map(<[String]>::to_vec);
match ctx
.list_threads(
page_size,
anchor.as_ref(),
match sort_key {
ThreadSortKey::CreatedAt => codex_state::SortKey::CreatedAt,
ThreadSortKey::UpdatedAt => codex_state::SortKey::UpdatedAt,
},
allowed_sources.as_slice(),
model_providers.as_deref(),
archived,
search_term,
)
.await
{
Ok(mut page) => {
let mut valid_items = Vec::with_capacity(page.items.len());
for item in page.items {
if tokio::fs::try_exists(&item.rollout_path)
.await
.unwrap_or(false)
{
valid_items.push(item);
} else {
warn!(
"state db list_threads returned stale rollout path for thread {}: {}",
item.id,
item.rollout_path.display()
);
warn!("state db discrepancy during list_threads_db: stale_db_path_dropped");
let _ = ctx.delete_thread(item.id).await;
}
}
page.items = valid_items;
Some(page)
}
Err(err) => {
warn!("state db list_threads failed: {err}");
None
}
}
}
/// Look up the rollout path for a thread id using SQLite.
pub async fn find_rollout_path_by_id(
context: Option<&codex_state::StateRuntime>,
thread_id: ThreadId,
archived_only: Option<bool>,
stage: &str,
) -> Option<PathBuf> {
let ctx = context?;
ctx.find_rollout_path_by_id(thread_id, archived_only)
.await
.unwrap_or_else(|err| {
warn!("state db find_rollout_path_by_id failed during {stage}: {err}");
None
})
}
/// Get dynamic tools for a thread id using SQLite.
pub async fn get_dynamic_tools(
context: Option<&codex_state::StateRuntime>,
thread_id: ThreadId,
stage: &str,
) -> Option<Vec<DynamicToolSpec>> {
let ctx = context?;
match ctx.get_dynamic_tools(thread_id).await {
Ok(tools) => tools,
Err(err) => {
warn!("state db get_dynamic_tools failed during {stage}: {err}");
None
}
}
}
/// Persist dynamic tools for a thread id using SQLite, if none exist yet.
pub async fn persist_dynamic_tools(
context: Option<&codex_state::StateRuntime>,
thread_id: ThreadId,
tools: Option<&[DynamicToolSpec]>,
stage: &str,
) {
let Some(ctx) = context else {
return;
};
if let Err(err) = ctx.persist_dynamic_tools(thread_id, tools).await {
warn!("state db persist_dynamic_tools failed during {stage}: {err}");
}
}
pub async fn mark_thread_memory_mode_polluted(
context: Option<&codex_state::StateRuntime>,
thread_id: ThreadId,
stage: &str,
) {
let Some(ctx) = context else {
return;
};
if let Err(err) = ctx.mark_thread_memory_mode_polluted(thread_id).await {
warn!("state db mark_thread_memory_mode_polluted failed during {stage}: {err}");
}
}
/// Reconcile rollout items into SQLite, falling back to scanning the rollout file.
pub async fn reconcile_rollout(
context: Option<&codex_state::StateRuntime>,
rollout_path: &Path,
default_provider: &str,
builder: Option<&ThreadMetadataBuilder>,
items: &[RolloutItem],
archived_only: Option<bool>,
new_thread_memory_mode: Option<&str>,
) {
let Some(ctx) = context else {
return;
};
if builder.is_some() || !items.is_empty() {
apply_rollout_items(
Some(ctx),
rollout_path,
default_provider,
builder,
items,
"reconcile_rollout",
new_thread_memory_mode,
/*updated_at_override*/ None,
)
.await;
return;
}
let outcome =
match metadata::extract_metadata_from_rollout(rollout_path, default_provider).await {
Ok(outcome) => outcome,
Err(err) => {
warn!(
"state db reconcile_rollout extraction failed {}: {err}",
rollout_path.display()
);
return;
}
};
let mut metadata = outcome.metadata;
let memory_mode = outcome.memory_mode.unwrap_or_else(|| "enabled".to_string());
metadata.cwd = normalize_cwd_for_state_db(&metadata.cwd);
if let Ok(Some(existing_metadata)) = ctx.get_thread(metadata.id).await {
metadata.prefer_existing_git_info(&existing_metadata);
}
match archived_only {
Some(true) if metadata.archived_at.is_none() => {
metadata.archived_at = Some(metadata.updated_at);
}
Some(false) => {
metadata.archived_at = None;
}
Some(true) | None => {}
}
if let Err(err) = ctx.upsert_thread(&metadata).await {
warn!(
"state db reconcile_rollout upsert failed {}: {err}",
rollout_path.display()
);
return;
}
if let Err(err) = ctx
.set_thread_memory_mode(metadata.id, memory_mode.as_str())
.await
{
warn!(
"state db reconcile_rollout memory_mode update failed {}: {err}",
rollout_path.display()
);
return;
}
if let Ok(meta_line) = crate::rollout::list::read_session_meta_line(rollout_path).await {
persist_dynamic_tools(
Some(ctx),
meta_line.meta.id,
meta_line.meta.dynamic_tools.as_deref(),
"reconcile_rollout",
)
.await;
} else {
warn!(
"state db reconcile_rollout missing session meta {}",
rollout_path.display()
);
}
}
/// Repair a thread's rollout path after filesystem fallback succeeds.
pub async fn read_repair_rollout_path(
context: Option<&codex_state::StateRuntime>,
thread_id: Option<ThreadId>,
archived_only: Option<bool>,
rollout_path: &Path,
) {
let Some(ctx) = context else {
return;
};
// Fast path: update an existing metadata row in place, but avoid writes when
// read-repair computes no effective change.
let mut saw_existing_metadata = false;
if let Some(thread_id) = thread_id
&& let Ok(Some(metadata)) = ctx.get_thread(thread_id).await
{
saw_existing_metadata = true;
let mut repaired = metadata.clone();
repaired.rollout_path = rollout_path.to_path_buf();
repaired.cwd = normalize_cwd_for_state_db(&repaired.cwd);
match archived_only {
Some(true) if repaired.archived_at.is_none() => {
repaired.archived_at = Some(repaired.updated_at);
}
Some(false) => {
repaired.archived_at = None;
}
Some(true) | None => {}
}
if repaired == metadata {
return;
}
warn!("state db discrepancy during read_repair_rollout_path: upsert_needed (fast path)");
if let Err(err) = ctx.upsert_thread(&repaired).await {
warn!(
"state db read-repair upsert failed for {}: {err}",
rollout_path.display()
);
} else {
return;
}
}
// Slow path: when the row is missing/unreadable (or direct upsert failed),
// rebuild metadata from rollout contents and reconcile it into SQLite.
if !saw_existing_metadata {
warn!("state db discrepancy during read_repair_rollout_path: upsert_needed (slow path)");
}
let default_provider = crate::rollout::list::read_session_meta_line(rollout_path)
.await
.ok()
.and_then(|meta| meta.meta.model_provider)
.unwrap_or_default();
reconcile_rollout(
Some(ctx),
rollout_path,
default_provider.as_str(),
/*builder*/ None,
&[],
archived_only,
/*new_thread_memory_mode*/ None,
)
.await;
}
/// Apply rollout items incrementally to SQLite.
#[allow(clippy::too_many_arguments)]
pub async fn apply_rollout_items(
context: Option<&codex_state::StateRuntime>,
rollout_path: &Path,
_default_provider: &str,
builder: Option<&ThreadMetadataBuilder>,
items: &[RolloutItem],
stage: &str,
new_thread_memory_mode: Option<&str>,
updated_at_override: Option<DateTime<Utc>>,
) {
let Some(ctx) = context else {
return;
};
let mut builder = match builder {
Some(builder) => builder.clone(),
None => match metadata::builder_from_items(items, rollout_path) {
Some(builder) => builder,
None => {
warn!(
"state db apply_rollout_items missing builder during {stage}: {}",
rollout_path.display()
);
warn!("state db discrepancy during apply_rollout_items: {stage}, missing_builder");
return;
}
},
};
builder.rollout_path = rollout_path.to_path_buf();
builder.cwd = normalize_cwd_for_state_db(&builder.cwd);
if let Err(err) = ctx
.apply_rollout_items(&builder, items, new_thread_memory_mode, updated_at_override)
.await
{
warn!(
"state db apply_rollout_items failed during {stage} for {}: {err}",
rollout_path.display()
);
}
}
pub async fn touch_thread_updated_at(
context: Option<&codex_state::StateRuntime>,
thread_id: Option<ThreadId>,
updated_at: DateTime<Utc>,
stage: &str,
) -> bool {
let Some(ctx) = context else {
return false;
};
let Some(thread_id) = thread_id else {
return false;
};
ctx.touch_thread_updated_at(thread_id, updated_at)
.await
.unwrap_or_else(|err| {
warn!("state db touch_thread_updated_at failed during {stage} for {thread_id}: {err}");
false
})
}
#[cfg(test)]
#[path = "state_db_tests.rs"]
mod tests;
pub use codex_rollout::state_db::*;

View File

@@ -26,6 +26,7 @@ codex-core = { workspace = true }
codex-feedback = { workspace = true }
codex-otel = { workspace = true }
codex-protocol = { workspace = true }
codex-rollout = { workspace = true }
codex-utils-absolute-path = { workspace = true }
codex-utils-cli = { workspace = true }
codex-utils-elapsed = { workspace = true }

View File

@@ -100,8 +100,9 @@ use crate::event_processor::CodexStatus;
use crate::event_processor::EventProcessor;
use codex_core::default_client::set_default_client_residency_requirement;
use codex_core::default_client::set_default_originator;
use codex_core::find_thread_path_by_id_str;
use codex_core::find_thread_path_by_name_str;
use codex_rollout::RolloutRecorder;
use codex_rollout::find_thread_path_by_id_str;
use codex_rollout::find_thread_path_by_name_str;
const DEFAULT_ANALYTICS_ENABLED: bool = true;
@@ -1406,11 +1407,11 @@ async fn resolve_resume_path(
} else {
Some(config.cwd.as_path())
};
match codex_core::RolloutRecorder::find_latest_thread_path(
match RolloutRecorder::find_latest_thread_path(
config,
/*page_size*/ 1,
/*cursor*/ None,
codex_core::ThreadSortKey::UpdatedAt,
codex_rollout::ThreadSortKey::UpdatedAt,
&[],
Some(default_provider_filter.as_slice()),
&config.model_provider_id,

View File

@@ -0,0 +1,6 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "rollout",
crate_name = "codex_rollout",
)

View File

@@ -0,0 +1,44 @@
[package]
name = "codex-rollout"
version.workspace = true
edition.workspace = true
license.workspace = true
[lib]
name = "codex_rollout"
path = "src/lib.rs"
[lints]
workspace = true
[dependencies]
anyhow = { workspace = true }
async-trait = { workspace = true }
chrono = { workspace = true, features = ["serde"] }
codex-file-search = { workspace = true }
codex-otel = { workspace = true }
codex-protocol = { workspace = true }
codex-state = { workspace = true }
codex-utils-absolute-path = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
time = { workspace = true, features = [
"formatting",
"local-offset",
"macros",
] }
tokio = { workspace = true, features = [
"fs",
"io-util",
"macros",
"process",
"rt-multi-thread",
"sync",
"time",
] }
tracing = { workspace = true, features = ["log"] }
uuid = { workspace = true, features = ["serde", "v4", "v5"] }
[dev-dependencies]
pretty_assertions = { workspace = true }
tempfile = { workspace = true }

View File

@@ -0,0 +1,65 @@
use codex_protocol::protocol::GitInfo;
use std::path::Path;
use tokio::process::Command;
use tokio::time::Duration as TokioDuration;
use tokio::time::timeout;
const GIT_COMMAND_TIMEOUT: TokioDuration = TokioDuration::from_secs(5);
pub(crate) async fn collect_git_info(cwd: &Path) -> Option<GitInfo> {
let is_git_repo = run_git_command_with_timeout(&["rev-parse", "--git-dir"], cwd)
.await?
.status
.success();
if !is_git_repo {
return None;
}
let (commit_result, branch_result, url_result) = tokio::join!(
run_git_command_with_timeout(&["rev-parse", "HEAD"], cwd),
run_git_command_with_timeout(&["rev-parse", "--abbrev-ref", "HEAD"], cwd),
run_git_command_with_timeout(&["remote", "get-url", "origin"], cwd)
);
let mut git_info = GitInfo {
commit_hash: None,
branch: None,
repository_url: None,
};
if let Some(output) = commit_result
&& output.status.success()
&& let Ok(hash) = String::from_utf8(output.stdout)
{
git_info.commit_hash = Some(hash.trim().to_string());
}
if let Some(output) = branch_result
&& output.status.success()
&& let Ok(branch) = String::from_utf8(output.stdout)
{
let branch = branch.trim();
if branch != "HEAD" {
git_info.branch = Some(branch.to_string());
}
}
if let Some(output) = url_result
&& output.status.success()
&& let Ok(url) = String::from_utf8(output.stdout)
{
git_info.repository_url = Some(url.trim().to_string());
}
Some(git_info)
}
async fn run_git_command_with_timeout(args: &[&str], cwd: &Path) -> Option<std::process::Output> {
timeout(
GIT_COMMAND_TIMEOUT,
Command::new("git").args(args).current_dir(cwd).output(),
)
.await
.ok()?
.ok()
}

154
codex-rs/rollout/src/lib.rs Normal file
View File

@@ -0,0 +1,154 @@
#![deny(clippy::print_stdout, clippy::print_stderr)]
use std::path::Path;
use std::path::PathBuf;
mod git_info;
mod path_utils;
pub mod rollout;
pub mod state_db;
mod truncate;
pub(crate) use codex_protocol::protocol;
pub trait StateDbConfig {
fn codex_home(&self) -> &Path;
fn sqlite_home(&self) -> &Path;
fn model_provider_id(&self) -> &str;
}
pub trait RolloutConfig: StateDbConfig {
fn cwd(&self) -> &Path;
fn generate_memories(&self) -> bool;
fn originator(&self) -> String;
}
#[derive(Clone, Debug)]
pub struct StateDbConfigSnapshot {
codex_home: PathBuf,
sqlite_home: PathBuf,
model_provider_id: String,
}
impl StateDbConfigSnapshot {
pub fn new(config: &(impl StateDbConfig + ?Sized)) -> Self {
Self {
codex_home: config.codex_home().to_path_buf(),
sqlite_home: config.sqlite_home().to_path_buf(),
model_provider_id: config.model_provider_id().to_string(),
}
}
pub fn from_parts(
codex_home: PathBuf,
sqlite_home: PathBuf,
model_provider_id: String,
) -> Self {
Self {
codex_home,
sqlite_home,
model_provider_id,
}
}
}
impl StateDbConfig for StateDbConfigSnapshot {
fn codex_home(&self) -> &Path {
self.codex_home.as_path()
}
fn sqlite_home(&self) -> &Path {
self.sqlite_home.as_path()
}
fn model_provider_id(&self) -> &str {
self.model_provider_id.as_str()
}
}
#[derive(Clone, Debug)]
pub struct RolloutConfigSnapshot {
state_db: StateDbConfigSnapshot,
cwd: PathBuf,
generate_memories: bool,
originator: String,
}
impl RolloutConfigSnapshot {
pub fn new(config: &(impl RolloutConfig + ?Sized)) -> Self {
Self {
state_db: StateDbConfigSnapshot::new(config),
cwd: config.cwd().to_path_buf(),
generate_memories: config.generate_memories(),
originator: config.originator(),
}
}
pub fn from_parts(
codex_home: PathBuf,
sqlite_home: PathBuf,
cwd: PathBuf,
model_provider_id: String,
generate_memories: bool,
originator: String,
) -> Self {
Self {
state_db: StateDbConfigSnapshot::from_parts(codex_home, sqlite_home, model_provider_id),
cwd,
generate_memories,
originator,
}
}
}
impl StateDbConfig for RolloutConfigSnapshot {
fn codex_home(&self) -> &Path {
self.state_db.codex_home()
}
fn sqlite_home(&self) -> &Path {
self.state_db.sqlite_home()
}
fn model_provider_id(&self) -> &str {
self.state_db.model_provider_id()
}
}
impl RolloutConfig for RolloutConfigSnapshot {
fn cwd(&self) -> &Path {
self.cwd.as_path()
}
fn generate_memories(&self) -> bool {
self.generate_memories
}
fn originator(&self) -> String {
self.originator.clone()
}
}
pub use rollout::ARCHIVED_SESSIONS_SUBDIR;
pub use rollout::INTERACTIVE_SESSION_SOURCES;
pub use rollout::RolloutRecorder;
pub use rollout::RolloutRecorderParams;
pub use rollout::SESSIONS_SUBDIR;
pub use rollout::SessionMeta;
pub use rollout::append_thread_name;
pub use rollout::find_archived_thread_path_by_id_str;
#[allow(deprecated)]
pub use rollout::find_conversation_path_by_id_str;
pub use rollout::find_thread_name_by_id;
pub use rollout::find_thread_path_by_id_str;
pub use rollout::find_thread_path_by_name_str;
pub use rollout::list::Cursor;
pub use rollout::list::ThreadItem;
pub use rollout::list::ThreadSortKey;
pub use rollout::list::ThreadsPage;
pub use rollout::list::parse_cursor;
pub use rollout::list::read_head_for_summary;
pub use rollout::list::read_session_meta_line;
pub use rollout::policy::EventPersistenceMode;
pub use rollout::rollout_date_parts;
pub use rollout::session_index::find_thread_names_by_ids;

View File

@@ -0,0 +1,97 @@
use codex_utils_absolute_path::AbsolutePathBuf;
use std::path::Path;
use std::path::PathBuf;
pub(crate) fn normalize_for_path_comparison(path: impl AsRef<Path>) -> std::io::Result<PathBuf> {
let canonical = path.as_ref().canonicalize()?;
Ok(normalize_for_wsl(canonical))
}
fn normalize_for_wsl(path: PathBuf) -> PathBuf {
if !is_wsl() {
return path;
}
if !is_wsl_case_insensitive_path(&path) {
return path;
}
lower_ascii_path(path)
}
fn is_wsl() -> bool {
#[cfg(target_os = "linux")]
{
if std::env::var_os("WSL_DISTRO_NAME").is_some() {
return true;
}
match std::fs::read_to_string("/proc/version") {
Ok(version) => version.to_lowercase().contains("microsoft"),
Err(_) => false,
}
}
#[cfg(not(target_os = "linux"))]
{
false
}
}
fn is_wsl_case_insensitive_path(path: &Path) -> bool {
#[cfg(target_os = "linux")]
{
use std::os::unix::ffi::OsStrExt;
use std::path::Component;
let mut components = path.components();
let Some(Component::RootDir) = components.next() else {
return false;
};
let Some(Component::Normal(mnt)) = components.next() else {
return false;
};
if !ascii_eq_ignore_case(mnt.as_bytes(), b"mnt") {
return false;
}
let Some(Component::Normal(drive)) = components.next() else {
return false;
};
let drive_bytes = drive.as_bytes();
drive_bytes.len() == 1 && drive_bytes[0].is_ascii_alphabetic()
}
#[cfg(not(target_os = "linux"))]
{
let _ = path;
false
}
}
#[cfg(target_os = "linux")]
fn ascii_eq_ignore_case(left: &[u8], right: &[u8]) -> bool {
left.len() == right.len()
&& left
.iter()
.zip(right)
.all(|(lhs, rhs)| lhs.to_ascii_lowercase() == *rhs)
}
#[cfg(target_os = "linux")]
fn lower_ascii_path(path: PathBuf) -> PathBuf {
use std::ffi::OsString;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::ffi::OsStringExt;
let absolute_path = AbsolutePathBuf::from_absolute_path(&path)
.map(AbsolutePathBuf::into_path_buf)
.unwrap_or(path);
let bytes = absolute_path.as_os_str().as_bytes();
let mut lowered = Vec::with_capacity(bytes.len());
for byte in bytes {
lowered.push(byte.to_ascii_lowercase());
}
PathBuf::from(OsString::from_vec(lowered))
}
#[cfg(not(target_os = "linux"))]
fn lower_ascii_path(path: PathBuf) -> PathBuf {
path
}

View File

@@ -111,16 +111,16 @@ pub enum ThreadSortKey {
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum ThreadListLayout {
pub enum ThreadListLayout {
NestedByDate,
Flat,
}
pub(crate) struct ThreadListConfig<'a> {
pub(crate) allowed_sources: &'a [SessionSource],
pub(crate) model_providers: Option<&'a [String]>,
pub(crate) default_provider: &'a str,
pub(crate) layout: ThreadListLayout,
pub struct ThreadListConfig<'a> {
pub allowed_sources: &'a [SessionSource],
pub model_providers: Option<&'a [String]>,
pub default_provider: &'a str,
pub layout: ThreadListLayout,
}
/// Pagination cursor identifying a file by timestamp and UUID.
@@ -300,7 +300,7 @@ impl From<codex_state::Anchor> for Cursor {
/// can be supplied on the next call to resume after the last returned item, resilient to
/// concurrent new sessions being appended. Ordering is stable by the requested sort key
/// (timestamp desc, then UUID desc).
pub(crate) async fn get_threads(
pub async fn get_threads(
codex_home: &Path,
page_size: usize,
cursor: Option<&Cursor>,
@@ -325,7 +325,7 @@ pub(crate) async fn get_threads(
.await
}
pub(crate) async fn get_threads_in_root(
pub async fn get_threads_in_root(
root: PathBuf,
page_size: usize,
cursor: Option<&Cursor>,

View File

@@ -1,4 +1,4 @@
use crate::config::Config;
use crate::StateDbConfig;
use crate::rollout;
use crate::rollout::list::parse_timestamp_uuid_from_filename;
use crate::rollout::recorder::RolloutRecorder;
@@ -35,7 +35,7 @@ const BACKFILL_LEASE_SECONDS: i64 = 900;
#[cfg(test)]
const BACKFILL_LEASE_SECONDS: i64 = 1;
pub(crate) fn builder_from_session_meta(
pub fn builder_from_session_meta(
session_meta: &SessionMetaLine,
rollout_path: &Path,
) -> Option<ThreadMetadataBuilder> {
@@ -61,7 +61,7 @@ pub(crate) fn builder_from_session_meta(
Some(builder)
}
pub(crate) fn builder_from_items(
pub fn builder_from_items(
items: &[RolloutItem],
rollout_path: &Path,
) -> Option<ThreadMetadataBuilder> {
@@ -130,7 +130,10 @@ pub(crate) async fn extract_metadata_from_rollout(
})
}
pub(crate) async fn backfill_sessions(runtime: &codex_state::StateRuntime, config: &Config) {
pub async fn backfill_sessions(
runtime: &codex_state::StateRuntime,
config: &(impl StateDbConfig + ?Sized),
) {
let metric_client = codex_otel::metrics::global();
let timer = metric_client
.as_ref()
@@ -140,7 +143,7 @@ pub(crate) async fn backfill_sessions(runtime: &codex_state::StateRuntime, confi
Err(err) => {
warn!(
"failed to read backfill state at {}: {err}",
config.codex_home.display()
config.codex_home().display()
);
BackfillState::default()
}
@@ -153,7 +156,7 @@ pub(crate) async fn backfill_sessions(runtime: &codex_state::StateRuntime, confi
Err(err) => {
warn!(
"failed to claim backfill worker at {}: {err}",
config.codex_home.display()
config.codex_home().display()
);
return;
}
@@ -161,7 +164,7 @@ pub(crate) async fn backfill_sessions(runtime: &codex_state::StateRuntime, confi
if !claimed {
info!(
"state db backfill already running at {}; skipping duplicate worker",
config.codex_home.display()
config.codex_home().display()
);
return;
}
@@ -170,7 +173,7 @@ pub(crate) async fn backfill_sessions(runtime: &codex_state::StateRuntime, confi
Err(err) => {
warn!(
"failed to read claimed backfill state at {}: {err}",
config.codex_home.display()
config.codex_home().display()
);
BackfillState {
status: BackfillStatus::Running,
@@ -182,15 +185,15 @@ pub(crate) async fn backfill_sessions(runtime: &codex_state::StateRuntime, confi
if let Err(err) = runtime.mark_backfill_running().await {
warn!(
"failed to mark backfill running at {}: {err}",
config.codex_home.display()
config.codex_home().display()
);
} else {
backfill_state.status = BackfillStatus::Running;
}
}
let sessions_root = config.codex_home.join(rollout::SESSIONS_SUBDIR);
let archived_root = config.codex_home.join(rollout::ARCHIVED_SESSIONS_SUBDIR);
let sessions_root = config.codex_home().join(rollout::SESSIONS_SUBDIR);
let archived_root = config.codex_home().join(rollout::ARCHIVED_SESSIONS_SUBDIR);
let mut rollout_paths: Vec<BackfillRolloutPath> = Vec::new();
for (root, archived) in [(sessions_root, false), (archived_root, true)] {
if !tokio::fs::try_exists(&root).await.unwrap_or(false) {
@@ -199,7 +202,7 @@ pub(crate) async fn backfill_sessions(runtime: &codex_state::StateRuntime, confi
match collect_rollout_paths(&root).await {
Ok(paths) => {
rollout_paths.extend(paths.into_iter().map(|path| BackfillRolloutPath {
watermark: backfill_watermark_for_path(config.codex_home.as_path(), &path),
watermark: backfill_watermark_for_path(config.codex_home(), &path),
path,
archived,
}));
@@ -226,9 +229,7 @@ pub(crate) async fn backfill_sessions(runtime: &codex_state::StateRuntime, confi
for batch in rollout_paths.chunks(BACKFILL_BATCH_SIZE) {
for rollout in batch {
stats.scanned = stats.scanned.saturating_add(1);
match extract_metadata_from_rollout(&rollout.path, config.model_provider_id.as_str())
.await
{
match extract_metadata_from_rollout(&rollout.path, config.model_provider_id()).await {
Ok(outcome) => {
if outcome.parse_errors > 0
&& let Some(ref metric_client) = metric_client
@@ -307,7 +308,7 @@ pub(crate) async fn backfill_sessions(runtime: &codex_state::StateRuntime, confi
{
warn!(
"failed to checkpoint backfill at {}: {err}",
config.codex_home.display()
config.codex_home().display()
);
} else {
last_watermark = Some(last_entry.watermark.clone());
@@ -320,7 +321,7 @@ pub(crate) async fn backfill_sessions(runtime: &codex_state::StateRuntime, confi
{
warn!(
"failed to mark backfill complete at {}: {err}",
config.codex_home.display()
config.codex_home().display()
);
}

View File

@@ -1,4 +1,5 @@
use super::*;
use crate::StateDbConfigSnapshot;
use chrono::DateTime;
use chrono::NaiveDateTime;
use chrono::Timelike;
@@ -21,6 +22,14 @@ use std::path::PathBuf;
use tempfile::tempdir;
use uuid::Uuid;
fn test_state_db_config(codex_home: &Path) -> StateDbConfigSnapshot {
StateDbConfigSnapshot::from_parts(
codex_home.to_path_buf(),
codex_home.to_path_buf(),
"test-provider".to_string(),
)
}
#[tokio::test]
async fn extract_metadata_from_rollout_uses_session_meta() {
let dir = tempdir().expect("tempdir");
@@ -195,9 +204,7 @@ async fn backfill_sessions_resumes_from_watermark_and_marks_complete() {
))
.await;
let mut config = crate::config::test_config();
config.codex_home = codex_home.clone();
config.model_provider_id = "test-provider".to_string();
let config = test_state_db_config(codex_home.as_path());
backfill_sessions(runtime.as_ref(), &config).await;
let first_id = ThreadId::from_string(&first_uuid.to_string()).expect("first thread id");
@@ -265,9 +272,7 @@ async fn backfill_sessions_preserves_existing_git_branch_and_fills_missing_git_f
.await
.expect("existing metadata upsert");
let mut config = crate::config::test_config();
config.codex_home = codex_home.clone();
config.model_provider_id = "test-provider".to_string();
let config = test_state_db_config(codex_home.as_path());
backfill_sessions(runtime.as_ref(), &config).await;
let persisted = runtime
@@ -302,9 +307,7 @@ async fn backfill_sessions_normalizes_cwd_before_upsert() {
.await
.expect("initialize runtime");
let mut config = crate::config::test_config();
config.codex_home = codex_home.clone();
config.model_provider_id = "test-provider".to_string();
let config = test_state_db_config(codex_home.as_path());
backfill_sessions(runtime.as_ref(), &config).await;
let thread_id = ThreadId::from_string(&thread_uuid.to_string()).expect("thread id");

View File

@@ -7,16 +7,13 @@ pub const ARCHIVED_SESSIONS_SUBDIR: &str = "archived_sessions";
pub const INTERACTIVE_SESSION_SOURCES: &[SessionSource] =
&[SessionSource::Cli, SessionSource::VSCode];
pub(crate) mod error;
pub mod list;
pub(crate) mod metadata;
pub(crate) mod policy;
pub mod metadata;
pub mod policy;
pub mod recorder;
pub(crate) mod session_index;
pub(crate) mod truncation;
pub mod session_index;
pub use codex_protocol::protocol::SessionMeta;
pub(crate) use error::map_session_init_error;
pub use list::find_archived_thread_path_by_id_str;
pub use list::find_thread_path_by_id_str;
#[deprecated(note = "use find_thread_path_by_id_str")]

View File

@@ -46,7 +46,7 @@ pub(crate) fn should_persist_response_item(item: &ResponseItem) -> bool {
/// Whether a `ResponseItem` should be persisted for the memories.
#[inline]
pub(crate) fn should_persist_response_item_for_memories(item: &ResponseItem) -> bool {
pub fn should_persist_response_item_for_memories(item: &ResponseItem) -> bool {
match item {
ResponseItem::Message { role, .. } => role != "developer",
ResponseItem::LocalShellCall { .. }

View File

@@ -39,8 +39,9 @@ use super::list::parse_timestamp_uuid_from_filename;
use super::metadata;
use super::policy::EventPersistenceMode;
use super::policy::is_persisted_response_item;
use crate::config::Config;
use crate::default_client::originator;
use crate::RolloutConfig;
use crate::RolloutConfigSnapshot;
use crate::StateDbConfig;
use crate::git_info::collect_git_info;
use crate::path_utils;
use crate::state_db;
@@ -163,7 +164,7 @@ impl RolloutRecorder {
/// List threads (rollout files) under the provided Codex home directory.
#[allow(clippy::too_many_arguments)]
pub async fn list_threads(
config: &Config,
config: &(impl StateDbConfig + ?Sized),
page_size: usize,
cursor: Option<&Cursor>,
sort_key: ThreadSortKey,
@@ -189,7 +190,7 @@ impl RolloutRecorder {
/// List archived threads (rollout files) under the archived sessions directory.
#[allow(clippy::too_many_arguments)]
pub async fn list_archived_threads(
config: &Config,
config: &(impl StateDbConfig + ?Sized),
page_size: usize,
cursor: Option<&Cursor>,
sort_key: ThreadSortKey,
@@ -214,7 +215,7 @@ impl RolloutRecorder {
#[allow(clippy::too_many_arguments)]
async fn list_threads_with_db_fallback(
config: &Config,
config: &(impl StateDbConfig + ?Sized),
page_size: usize,
cursor: Option<&Cursor>,
sort_key: ThreadSortKey,
@@ -224,7 +225,7 @@ impl RolloutRecorder {
archived: bool,
search_term: Option<&str>,
) -> std::io::Result<ThreadsPage> {
let codex_home = config.codex_home.as_path();
let codex_home = config.codex_home();
// Filesystem-first listing intentionally overfetches so we can repair stale/missing
// SQLite rollout paths before the final DB-backed page is returned.
let fs_page_size = page_size.saturating_mul(2).max(page_size);
@@ -298,7 +299,7 @@ impl RolloutRecorder {
/// Find the newest recorded thread path, optionally filtering to a matching cwd.
#[allow(clippy::too_many_arguments)]
pub async fn find_latest_thread_path(
config: &Config,
config: &(impl StateDbConfig + ?Sized),
page_size: usize,
cursor: Option<&Cursor>,
sort_key: ThreadSortKey,
@@ -307,7 +308,7 @@ impl RolloutRecorder {
default_provider: &str,
filter_cwd: Option<&Path>,
) -> std::io::Result<Option<PathBuf>> {
let codex_home = config.codex_home.as_path();
let codex_home = config.codex_home();
let state_db_ctx = state_db::get_state_db(config).await;
if state_db_ctx.is_some() {
let mut db_cursor = cursor.cloned();
@@ -368,11 +369,12 @@ impl RolloutRecorder {
///
/// For resumed sessions, this immediately opens the existing rollout file.
pub async fn new(
config: &Config,
config: &(impl RolloutConfig + ?Sized),
params: RolloutRecorderParams,
state_db_ctx: Option<StateDbHandle>,
state_builder: Option<ThreadMetadataBuilder>,
) -> std::io::Result<Self> {
let config_snapshot = RolloutConfigSnapshot::new(config);
let (file, deferred_log_file_info, rollout_path, meta, event_persistence_mode) =
match params {
RolloutRecorderParams::Create {
@@ -383,7 +385,8 @@ impl RolloutRecorder {
dynamic_tools,
event_persistence_mode,
} => {
let log_file_info = precompute_log_file_info(config, conversation_id)?;
let log_file_info =
precompute_log_file_info(&config_snapshot, conversation_id)?;
let path = log_file_info.path.clone();
let session_id = log_file_info.conversation_id;
let started_at = log_file_info.timestamp;
@@ -400,20 +403,20 @@ impl RolloutRecorder {
id: session_id,
forked_from_id,
timestamp,
cwd: config.cwd.clone(),
originator: originator().value,
cwd: config_snapshot.cwd().to_path_buf(),
originator: config_snapshot.originator(),
cli_version: env!("CARGO_PKG_VERSION").to_string(),
agent_nickname: source.get_nickname(),
agent_role: source.get_agent_role(),
source,
model_provider: Some(config.model_provider_id.clone()),
model_provider: Some(config_snapshot.model_provider_id().to_string()),
base_instructions: Some(base_instructions),
dynamic_tools: if dynamic_tools.is_empty() {
None
} else {
Some(dynamic_tools)
},
memory_mode: (!config.memories.generate_memories)
memory_mode: (!config_snapshot.generate_memories())
.then_some("disabled".to_string()),
};
@@ -443,7 +446,7 @@ impl RolloutRecorder {
};
// Clone the cwd for the spawned task to collect git info asynchronously
let cwd = config.cwd.clone();
let cwd = config_snapshot.cwd().to_path_buf();
// A reasonably-sized bounded channel. If the buffer fills up the send
// future will yield, which is fine we only need to ensure we do not
@@ -461,8 +464,8 @@ impl RolloutRecorder {
rollout_path.clone(),
state_db_ctx.clone(),
state_builder,
config.model_provider_id.clone(),
config.memories.generate_memories,
config_snapshot.model_provider_id().to_string(),
config_snapshot.generate_memories(),
));
Ok(Self {
@@ -481,7 +484,7 @@ impl RolloutRecorder {
self.state_db.clone()
}
pub(crate) async fn record_items(&self, items: &[RolloutItem]) -> std::io::Result<()> {
pub async fn record_items(&self, items: &[RolloutItem]) -> std::io::Result<()> {
let mut filtered = Vec::new();
for item in items {
// Note that function calls may look a bit strange if they are
@@ -527,7 +530,7 @@ impl RolloutRecorder {
.map_err(|e| IoError::other(format!("failed waiting for rollout flush: {e}")))
}
pub(crate) async fn load_rollout_items(
pub async fn load_rollout_items(
path: &Path,
) -> std::io::Result<(Vec<RolloutItem>, Option<ThreadId>, usize)> {
trace!("Resuming rollout from {path:?}");
@@ -659,13 +662,13 @@ struct LogFileInfo {
}
fn precompute_log_file_info(
config: &Config,
config: &(impl StateDbConfig + ?Sized),
conversation_id: ThreadId,
) -> std::io::Result<LogFileInfo> {
// Resolve ~/.codex/sessions/YYYY/MM/DD path.
let timestamp = OffsetDateTime::now_local()
.map_err(|e| IoError::other(format!("failed to get local time: {e}")))?;
let mut dir = config.codex_home.clone();
let mut dir = config.codex_home().to_path_buf();
dir.push(SESSIONS_SUBDIR);
dir.push(timestamp.year().to_string());
dir.push(format!("{:02}", u8::from(timestamp.month())));

View File

@@ -1,6 +1,5 @@
use super::*;
use crate::config::ConfigBuilder;
use crate::features::Feature;
use crate::RolloutConfigSnapshot;
use chrono::TimeZone;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::protocol::AgentMessageEvent;
@@ -19,6 +18,19 @@ use std::time::Duration;
use tempfile::TempDir;
use uuid::Uuid;
const TEST_PROVIDER: &str = "test-provider";
fn test_rollout_config(codex_home: &Path) -> RolloutConfigSnapshot {
RolloutConfigSnapshot::from_parts(
codex_home.to_path_buf(),
codex_home.to_path_buf(),
codex_home.to_path_buf(),
TEST_PROVIDER.to_string(),
true,
"test-originator".to_string(),
)
}
fn write_session_file(root: &Path, ts: &str, uuid: Uuid) -> std::io::Result<PathBuf> {
let day_dir = root.join("sessions/2025/01/03");
fs::create_dir_all(&day_dir)?;
@@ -54,10 +66,7 @@ fn write_session_file(root: &Path, ts: &str, uuid: Uuid) -> std::io::Result<Path
#[tokio::test]
async fn recorder_materializes_only_after_explicit_persist() -> std::io::Result<()> {
let home = TempDir::new().expect("temp dir");
let config = ConfigBuilder::default()
.codex_home(home.path().to_path_buf())
.build()
.await?;
let config = test_rollout_config(home.path());
let thread_id = ThreadId::new();
let recorder = RolloutRecorder::new(
&config,
@@ -140,16 +149,9 @@ async fn recorder_materializes_only_after_explicit_persist() -> std::io::Result<
#[tokio::test]
async fn metadata_irrelevant_events_touch_state_db_updated_at() -> std::io::Result<()> {
let home = TempDir::new().expect("temp dir");
let mut config = ConfigBuilder::default()
.codex_home(home.path().to_path_buf())
.build()
.await?;
config
.features
.enable(Feature::Sqlite)
.expect("test config should allow sqlite");
let config = test_rollout_config(home.path());
let state_db = StateRuntime::init(home.path().to_path_buf(), config.model_provider_id.clone())
let state_db = StateRuntime::init(home.path().to_path_buf(), TEST_PROVIDER.to_string())
.await
.expect("state db should initialize");
state_db
@@ -227,16 +229,8 @@ async fn metadata_irrelevant_events_touch_state_db_updated_at() -> std::io::Resu
async fn metadata_irrelevant_events_fall_back_to_upsert_when_thread_missing() -> std::io::Result<()>
{
let home = TempDir::new().expect("temp dir");
let mut config = ConfigBuilder::default()
.codex_home(home.path().to_path_buf())
.build()
.await?;
config
.features
.enable(Feature::Sqlite)
.expect("test config should allow sqlite");
let state_db = StateRuntime::init(home.path().to_path_buf(), config.model_provider_id.clone())
let state_db = StateRuntime::init(home.path().to_path_buf(), TEST_PROVIDER.to_string())
.await
.expect("state db should initialize");
let thread_id = ThreadId::new();
@@ -259,7 +253,7 @@ async fn metadata_irrelevant_events_fall_back_to_upsert_when_thread_missing() ->
rollout_path.as_path(),
Some(&builder),
items.as_slice(),
config.model_provider_id.as_str(),
TEST_PROVIDER,
None,
)
.await;
@@ -277,20 +271,12 @@ async fn metadata_irrelevant_events_fall_back_to_upsert_when_thread_missing() ->
#[tokio::test]
async fn list_threads_db_disabled_does_not_skip_paginated_items() -> std::io::Result<()> {
let home = TempDir::new().expect("temp dir");
let mut config = ConfigBuilder::default()
.codex_home(home.path().to_path_buf())
.build()
.await?;
config
.features
.disable(Feature::Sqlite)
.expect("test config should allow sqlite to be disabled");
let config = test_rollout_config(home.path());
let newest = write_session_file(home.path(), "2025-01-03T12-00-00", Uuid::from_u128(9001))?;
let middle = write_session_file(home.path(), "2025-01-02T12-00-00", Uuid::from_u128(9002))?;
let _oldest = write_session_file(home.path(), "2025-01-01T12-00-00", Uuid::from_u128(9003))?;
let default_provider = config.model_provider_id.clone();
let page1 = RolloutRecorder::list_threads(
&config,
1,
@@ -298,7 +284,7 @@ async fn list_threads_db_disabled_does_not_skip_paginated_items() -> std::io::Re
ThreadSortKey::CreatedAt,
&[],
None,
default_provider.as_str(),
TEST_PROVIDER,
None,
)
.await?;
@@ -313,7 +299,7 @@ async fn list_threads_db_disabled_does_not_skip_paginated_items() -> std::io::Re
ThreadSortKey::CreatedAt,
&[],
None,
default_provider.as_str(),
TEST_PROVIDER,
None,
)
.await?;
@@ -325,14 +311,7 @@ async fn list_threads_db_disabled_does_not_skip_paginated_items() -> std::io::Re
#[tokio::test]
async fn list_threads_db_enabled_drops_missing_rollout_paths() -> std::io::Result<()> {
let home = TempDir::new().expect("temp dir");
let mut config = ConfigBuilder::default()
.codex_home(home.path().to_path_buf())
.build()
.await?;
config
.features
.enable(Feature::Sqlite)
.expect("test config should allow sqlite");
let config = test_rollout_config(home.path());
let uuid = Uuid::from_u128(9010);
let thread_id = ThreadId::from_string(&uuid.to_string()).expect("valid thread id");
@@ -340,12 +319,10 @@ async fn list_threads_db_enabled_drops_missing_rollout_paths() -> std::io::Resul
"sessions/2099/01/01/rollout-2099-01-01T00-00-00-{uuid}.jsonl"
));
let runtime = codex_state::StateRuntime::init(
home.path().to_path_buf(),
config.model_provider_id.clone(),
)
.await
.expect("state db should initialize");
let runtime =
codex_state::StateRuntime::init(home.path().to_path_buf(), TEST_PROVIDER.to_string())
.await
.expect("state db should initialize");
runtime
.mark_backfill_complete(None)
.await
@@ -360,16 +337,15 @@ async fn list_threads_db_enabled_drops_missing_rollout_paths() -> std::io::Resul
created_at,
SessionSource::Cli,
);
builder.model_provider = Some(config.model_provider_id.clone());
builder.model_provider = Some(TEST_PROVIDER.to_string());
builder.cwd = home.path().to_path_buf();
let mut metadata = builder.build(config.model_provider_id.as_str());
let mut metadata = builder.build(TEST_PROVIDER);
metadata.first_user_message = Some("Hello from user".to_string());
runtime
.upsert_thread(&metadata)
.await
.expect("state db upsert should succeed");
let default_provider = config.model_provider_id.clone();
let page = RolloutRecorder::list_threads(
&config,
10,
@@ -377,7 +353,7 @@ async fn list_threads_db_enabled_drops_missing_rollout_paths() -> std::io::Resul
ThreadSortKey::CreatedAt,
&[],
None,
default_provider.as_str(),
TEST_PROVIDER,
None,
)
.await?;
@@ -393,14 +369,7 @@ async fn list_threads_db_enabled_drops_missing_rollout_paths() -> std::io::Resul
#[tokio::test]
async fn list_threads_db_enabled_repairs_stale_rollout_paths() -> std::io::Result<()> {
let home = TempDir::new().expect("temp dir");
let mut config = ConfigBuilder::default()
.codex_home(home.path().to_path_buf())
.build()
.await?;
config
.features
.enable(Feature::Sqlite)
.expect("test config should allow sqlite");
let config = test_rollout_config(home.path());
let uuid = Uuid::from_u128(9011);
let thread_id = ThreadId::from_string(&uuid.to_string()).expect("valid thread id");
@@ -409,12 +378,10 @@ async fn list_threads_db_enabled_repairs_stale_rollout_paths() -> std::io::Resul
"sessions/2099/01/01/rollout-2099-01-01T00-00-00-{uuid}.jsonl"
));
let runtime = codex_state::StateRuntime::init(
home.path().to_path_buf(),
config.model_provider_id.clone(),
)
.await
.expect("state db should initialize");
let runtime =
codex_state::StateRuntime::init(home.path().to_path_buf(), TEST_PROVIDER.to_string())
.await
.expect("state db should initialize");
runtime
.mark_backfill_complete(None)
.await
@@ -429,16 +396,15 @@ async fn list_threads_db_enabled_repairs_stale_rollout_paths() -> std::io::Resul
created_at,
SessionSource::Cli,
);
builder.model_provider = Some(config.model_provider_id.clone());
builder.model_provider = Some(TEST_PROVIDER.to_string());
builder.cwd = home.path().to_path_buf();
let mut metadata = builder.build(config.model_provider_id.as_str());
let mut metadata = builder.build(TEST_PROVIDER);
metadata.first_user_message = Some("Hello from user".to_string());
runtime
.upsert_thread(&metadata)
.await
.expect("state db upsert should succeed");
let default_provider = config.model_provider_id.clone();
let page = RolloutRecorder::list_threads(
&config,
1,
@@ -446,7 +412,7 @@ async fn list_threads_db_enabled_repairs_stale_rollout_paths() -> std::io::Resul
ThreadSortKey::CreatedAt,
&[],
None,
default_provider.as_str(),
TEST_PROVIDER,
None,
)
.await?;

View File

@@ -0,0 +1,548 @@
use crate::StateDbConfig;
use crate::StateDbConfigSnapshot;
use crate::path_utils::normalize_for_path_comparison;
use crate::rollout::list::Cursor;
use crate::rollout::list::ThreadSortKey;
use crate::rollout::metadata;
use chrono::DateTime;
use chrono::NaiveDateTime;
use chrono::Timelike;
use chrono::Utc;
use codex_protocol::ThreadId;
use codex_protocol::dynamic_tools::DynamicToolSpec;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::SessionSource;
pub use codex_state::LogEntry;
use codex_state::ThreadMetadataBuilder;
use serde_json::Value;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use tracing::warn;
use uuid::Uuid;
/// Core-facing handle to the SQLite-backed state runtime.
pub type StateDbHandle = Arc<codex_state::StateRuntime>;
/// Initialize the state runtime for thread state persistence and backfill checks. To only be used
/// inside `core`. The initialization should not be done anywhere else.
pub async fn init(config: &(impl StateDbConfig + ?Sized)) -> Option<StateDbHandle> {
let runtime = match codex_state::StateRuntime::init(
config.sqlite_home().to_path_buf(),
config.model_provider_id().to_string(),
)
.await
{
Ok(runtime) => runtime,
Err(err) => {
warn!(
"failed to initialize state runtime at {}: {err}",
config.sqlite_home().display()
);
return None;
}
};
let backfill_state = match runtime.get_backfill_state().await {
Ok(state) => state,
Err(err) => {
warn!(
"failed to read backfill state at {}: {err}",
config.codex_home().display()
);
return None;
}
};
if backfill_state.status != codex_state::BackfillStatus::Complete {
let runtime_for_backfill = runtime.clone();
let config = StateDbConfigSnapshot::new(config);
tokio::spawn(async move {
metadata::backfill_sessions(runtime_for_backfill.as_ref(), &config).await;
});
}
Some(runtime)
}
/// Get the DB if the feature is enabled and the DB exists.
pub async fn get_state_db(config: &(impl StateDbConfig + ?Sized)) -> Option<StateDbHandle> {
let state_path = codex_state::state_db_path(config.sqlite_home());
if !tokio::fs::try_exists(&state_path).await.unwrap_or(false) {
return None;
}
let runtime = codex_state::StateRuntime::init(
config.sqlite_home().to_path_buf(),
config.model_provider_id().to_string(),
)
.await
.ok()?;
require_backfill_complete(runtime, config.sqlite_home()).await
}
/// Open the state runtime when the SQLite file exists, without feature gating.
///
/// This is used for parity checks during the SQLite migration phase.
pub async fn open_if_present(codex_home: &Path, default_provider: &str) -> Option<StateDbHandle> {
let db_path = codex_state::state_db_path(codex_home);
if !tokio::fs::try_exists(&db_path).await.unwrap_or(false) {
return None;
}
let runtime =
codex_state::StateRuntime::init(codex_home.to_path_buf(), default_provider.to_string())
.await
.ok()?;
require_backfill_complete(runtime, codex_home).await
}
async fn require_backfill_complete(
runtime: StateDbHandle,
codex_home: &Path,
) -> Option<StateDbHandle> {
match runtime.get_backfill_state().await {
Ok(state) if state.status == codex_state::BackfillStatus::Complete => Some(runtime),
Ok(state) => {
warn!(
"state db backfill not complete at {} (status: {})",
codex_home.display(),
state.status.as_str()
);
None
}
Err(err) => {
warn!(
"failed to read backfill state at {}: {err}",
codex_home.display()
);
None
}
}
}
fn cursor_to_anchor(cursor: Option<&Cursor>) -> Option<codex_state::Anchor> {
let cursor = cursor?;
let value = serde_json::to_value(cursor).ok()?;
let cursor_str = value.as_str()?;
let (ts_str, id_str) = cursor_str.split_once('|')?;
if id_str.contains('|') {
return None;
}
let id = Uuid::parse_str(id_str).ok()?;
let ts = if let Ok(naive) = NaiveDateTime::parse_from_str(ts_str, "%Y-%m-%dT%H-%M-%S") {
DateTime::<Utc>::from_naive_utc_and_offset(naive, Utc)
} else if let Ok(dt) = DateTime::parse_from_rfc3339(ts_str) {
dt.with_timezone(&Utc)
} else {
return None;
}
.with_nanosecond(0)?;
Some(codex_state::Anchor { ts, id })
}
pub(crate) fn normalize_cwd_for_state_db(cwd: &Path) -> PathBuf {
normalize_for_path_comparison(cwd).unwrap_or_else(|_| cwd.to_path_buf())
}
/// List thread ids from SQLite for parity checks without rollout scanning.
#[allow(clippy::too_many_arguments)]
pub async fn list_thread_ids_db(
context: Option<&codex_state::StateRuntime>,
codex_home: &Path,
page_size: usize,
cursor: Option<&Cursor>,
sort_key: ThreadSortKey,
allowed_sources: &[SessionSource],
model_providers: Option<&[String]>,
archived_only: bool,
stage: &str,
) -> Option<Vec<ThreadId>> {
let ctx = context?;
if ctx.codex_home() != codex_home {
warn!(
"state db codex_home mismatch: expected {}, got {}",
ctx.codex_home().display(),
codex_home.display()
);
}
let anchor = cursor_to_anchor(cursor);
let allowed_sources: Vec<String> = allowed_sources
.iter()
.map(|value| match serde_json::to_value(value) {
Ok(Value::String(s)) => s,
Ok(other) => other.to_string(),
Err(_) => String::new(),
})
.collect();
let model_providers = model_providers.map(<[String]>::to_vec);
match ctx
.list_thread_ids(
page_size,
anchor.as_ref(),
match sort_key {
ThreadSortKey::CreatedAt => codex_state::SortKey::CreatedAt,
ThreadSortKey::UpdatedAt => codex_state::SortKey::UpdatedAt,
},
allowed_sources.as_slice(),
model_providers.as_deref(),
archived_only,
)
.await
{
Ok(ids) => Some(ids),
Err(err) => {
warn!("state db list_thread_ids failed during {stage}: {err}");
None
}
}
}
/// List thread metadata from SQLite without rollout directory traversal.
#[allow(clippy::too_many_arguments)]
pub async fn list_threads_db(
context: Option<&codex_state::StateRuntime>,
codex_home: &Path,
page_size: usize,
cursor: Option<&Cursor>,
sort_key: ThreadSortKey,
allowed_sources: &[SessionSource],
model_providers: Option<&[String]>,
archived: bool,
search_term: Option<&str>,
) -> Option<codex_state::ThreadsPage> {
let ctx = context?;
if ctx.codex_home() != codex_home {
warn!(
"state db codex_home mismatch: expected {}, got {}",
ctx.codex_home().display(),
codex_home.display()
);
}
let anchor = cursor_to_anchor(cursor);
let allowed_sources: Vec<String> = allowed_sources
.iter()
.map(|value| match serde_json::to_value(value) {
Ok(Value::String(s)) => s,
Ok(other) => other.to_string(),
Err(_) => String::new(),
})
.collect();
let model_providers = model_providers.map(<[String]>::to_vec);
match ctx
.list_threads(
page_size,
anchor.as_ref(),
match sort_key {
ThreadSortKey::CreatedAt => codex_state::SortKey::CreatedAt,
ThreadSortKey::UpdatedAt => codex_state::SortKey::UpdatedAt,
},
allowed_sources.as_slice(),
model_providers.as_deref(),
archived,
search_term,
)
.await
{
Ok(mut page) => {
let mut valid_items = Vec::with_capacity(page.items.len());
for item in page.items {
if tokio::fs::try_exists(&item.rollout_path)
.await
.unwrap_or(false)
{
valid_items.push(item);
} else {
warn!(
"state db list_threads returned stale rollout path for thread {}: {}",
item.id,
item.rollout_path.display()
);
warn!("state db discrepancy during list_threads_db: stale_db_path_dropped");
let _ = ctx.delete_thread(item.id).await;
}
}
page.items = valid_items;
Some(page)
}
Err(err) => {
warn!("state db list_threads failed: {err}");
None
}
}
}
/// Look up the rollout path for a thread id using SQLite.
pub async fn find_rollout_path_by_id(
context: Option<&codex_state::StateRuntime>,
thread_id: ThreadId,
archived_only: Option<bool>,
stage: &str,
) -> Option<PathBuf> {
let ctx = context?;
ctx.find_rollout_path_by_id(thread_id, archived_only)
.await
.unwrap_or_else(|err| {
warn!("state db find_rollout_path_by_id failed during {stage}: {err}");
None
})
}
/// Get dynamic tools for a thread id using SQLite.
pub async fn get_dynamic_tools(
context: Option<&codex_state::StateRuntime>,
thread_id: ThreadId,
stage: &str,
) -> Option<Vec<DynamicToolSpec>> {
let ctx = context?;
match ctx.get_dynamic_tools(thread_id).await {
Ok(tools) => tools,
Err(err) => {
warn!("state db get_dynamic_tools failed during {stage}: {err}");
None
}
}
}
/// Persist dynamic tools for a thread id using SQLite, if none exist yet.
pub async fn persist_dynamic_tools(
context: Option<&codex_state::StateRuntime>,
thread_id: ThreadId,
tools: Option<&[DynamicToolSpec]>,
stage: &str,
) {
let Some(ctx) = context else {
return;
};
if let Err(err) = ctx.persist_dynamic_tools(thread_id, tools).await {
warn!("state db persist_dynamic_tools failed during {stage}: {err}");
}
}
pub async fn mark_thread_memory_mode_polluted(
context: Option<&codex_state::StateRuntime>,
thread_id: ThreadId,
stage: &str,
) {
let Some(ctx) = context else {
return;
};
if let Err(err) = ctx.mark_thread_memory_mode_polluted(thread_id).await {
warn!("state db mark_thread_memory_mode_polluted failed during {stage}: {err}");
}
}
/// Reconcile rollout items into SQLite, falling back to scanning the rollout file.
pub async fn reconcile_rollout(
context: Option<&codex_state::StateRuntime>,
rollout_path: &Path,
default_provider: &str,
builder: Option<&ThreadMetadataBuilder>,
items: &[RolloutItem],
archived_only: Option<bool>,
new_thread_memory_mode: Option<&str>,
) {
let Some(ctx) = context else {
return;
};
if builder.is_some() || !items.is_empty() {
apply_rollout_items(
Some(ctx),
rollout_path,
default_provider,
builder,
items,
"reconcile_rollout",
new_thread_memory_mode,
/*updated_at_override*/ None,
)
.await;
return;
}
let outcome =
match metadata::extract_metadata_from_rollout(rollout_path, default_provider).await {
Ok(outcome) => outcome,
Err(err) => {
warn!(
"state db reconcile_rollout extraction failed {}: {err}",
rollout_path.display()
);
return;
}
};
let mut metadata = outcome.metadata;
let memory_mode = outcome.memory_mode.unwrap_or_else(|| "enabled".to_string());
metadata.cwd = normalize_cwd_for_state_db(&metadata.cwd);
if let Ok(Some(existing_metadata)) = ctx.get_thread(metadata.id).await {
metadata.prefer_existing_git_info(&existing_metadata);
}
match archived_only {
Some(true) if metadata.archived_at.is_none() => {
metadata.archived_at = Some(metadata.updated_at);
}
Some(false) => {
metadata.archived_at = None;
}
Some(true) | None => {}
}
if let Err(err) = ctx.upsert_thread(&metadata).await {
warn!(
"state db reconcile_rollout upsert failed {}: {err}",
rollout_path.display()
);
return;
}
if let Err(err) = ctx
.set_thread_memory_mode(metadata.id, memory_mode.as_str())
.await
{
warn!(
"state db reconcile_rollout memory_mode update failed {}: {err}",
rollout_path.display()
);
return;
}
if let Ok(meta_line) = crate::rollout::list::read_session_meta_line(rollout_path).await {
persist_dynamic_tools(
Some(ctx),
meta_line.meta.id,
meta_line.meta.dynamic_tools.as_deref(),
"reconcile_rollout",
)
.await;
} else {
warn!(
"state db reconcile_rollout missing session meta {}",
rollout_path.display()
);
}
}
/// Repair a thread's rollout path after filesystem fallback succeeds.
pub async fn read_repair_rollout_path(
context: Option<&codex_state::StateRuntime>,
thread_id: Option<ThreadId>,
archived_only: Option<bool>,
rollout_path: &Path,
) {
let Some(ctx) = context else {
return;
};
// Fast path: update an existing metadata row in place, but avoid writes when
// read-repair computes no effective change.
let mut saw_existing_metadata = false;
if let Some(thread_id) = thread_id
&& let Ok(Some(metadata)) = ctx.get_thread(thread_id).await
{
saw_existing_metadata = true;
let mut repaired = metadata.clone();
repaired.rollout_path = rollout_path.to_path_buf();
repaired.cwd = normalize_cwd_for_state_db(&repaired.cwd);
match archived_only {
Some(true) if repaired.archived_at.is_none() => {
repaired.archived_at = Some(repaired.updated_at);
}
Some(false) => {
repaired.archived_at = None;
}
Some(true) | None => {}
}
if repaired == metadata {
return;
}
warn!("state db discrepancy during read_repair_rollout_path: upsert_needed (fast path)");
if let Err(err) = ctx.upsert_thread(&repaired).await {
warn!(
"state db read-repair upsert failed for {}: {err}",
rollout_path.display()
);
} else {
return;
}
}
// Slow path: when the row is missing/unreadable (or direct upsert failed),
// rebuild metadata from rollout contents and reconcile it into SQLite.
if !saw_existing_metadata {
warn!("state db discrepancy during read_repair_rollout_path: upsert_needed (slow path)");
}
let default_provider = crate::rollout::list::read_session_meta_line(rollout_path)
.await
.ok()
.and_then(|meta| meta.meta.model_provider)
.unwrap_or_default();
reconcile_rollout(
Some(ctx),
rollout_path,
default_provider.as_str(),
/*builder*/ None,
&[],
archived_only,
/*new_thread_memory_mode*/ None,
)
.await;
}
/// Apply rollout items incrementally to SQLite.
#[allow(clippy::too_many_arguments)]
pub async fn apply_rollout_items(
context: Option<&codex_state::StateRuntime>,
rollout_path: &Path,
_default_provider: &str,
builder: Option<&ThreadMetadataBuilder>,
items: &[RolloutItem],
stage: &str,
new_thread_memory_mode: Option<&str>,
updated_at_override: Option<DateTime<Utc>>,
) {
let Some(ctx) = context else {
return;
};
let mut builder = match builder {
Some(builder) => builder.clone(),
None => match metadata::builder_from_items(items, rollout_path) {
Some(builder) => builder,
None => {
warn!(
"state db apply_rollout_items missing builder during {stage}: {}",
rollout_path.display()
);
warn!("state db discrepancy during apply_rollout_items: {stage}, missing_builder");
return;
}
},
};
builder.rollout_path = rollout_path.to_path_buf();
builder.cwd = normalize_cwd_for_state_db(&builder.cwd);
if let Err(err) = ctx
.apply_rollout_items(&builder, items, new_thread_memory_mode, updated_at_override)
.await
{
warn!(
"state db apply_rollout_items failed during {stage} for {}: {err}",
rollout_path.display()
);
}
}
pub async fn touch_thread_updated_at(
context: Option<&codex_state::StateRuntime>,
thread_id: Option<ThreadId>,
updated_at: DateTime<Utc>,
stage: &str,
) -> bool {
let Some(ctx) = context else {
return false;
};
let Some(thread_id) = thread_id else {
return false;
};
ctx.touch_thread_updated_at(thread_id, updated_at)
.await
.unwrap_or_else(|err| {
warn!("state db touch_thread_updated_at failed during {stage} for {thread_id}: {err}");
false
})
}
#[cfg(test)]
#[path = "state_db_tests.rs"]
mod tests;

View File

@@ -0,0 +1,83 @@
use codex_protocol::protocol::TruncationPolicy as ProtocolTruncationPolicy;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub(crate) enum TruncationPolicy {
Bytes(usize),
Tokens(usize),
}
impl From<TruncationPolicy> for ProtocolTruncationPolicy {
fn from(value: TruncationPolicy) -> Self {
match value {
TruncationPolicy::Bytes(bytes) => Self::Bytes(bytes),
TruncationPolicy::Tokens(tokens) => Self::Tokens(tokens),
}
}
}
pub(crate) fn truncate_text(content: &str, policy: TruncationPolicy) -> String {
match policy {
TruncationPolicy::Bytes(max_bytes) => truncate_with_byte_budget(content, max_bytes),
TruncationPolicy::Tokens(tokens) => {
truncate_with_byte_budget(content, tokens.saturating_mul(4))
}
}
}
fn truncate_with_byte_budget(s: &str, max_bytes: usize) -> String {
if s.is_empty() {
return String::new();
}
if max_bytes == 0 {
return format!("{} chars truncated…", s.chars().count());
}
if s.len() <= max_bytes {
return s.to_string();
}
let total_chars = s.chars().count();
let left_budget = max_bytes / 2;
let right_budget = max_bytes - left_budget;
let (removed_chars, left, right) = split_string(s, left_budget, right_budget);
let marker = format!(
"{} chars truncated…",
total_chars.saturating_sub(left.chars().count() + right.chars().count() + removed_chars)
+ removed_chars
);
let mut out = String::with_capacity(left.len() + marker.len() + right.len());
out.push_str(left);
out.push_str(&marker);
out.push_str(right);
out
}
fn split_string(s: &str, beginning_bytes: usize, end_bytes: usize) -> (usize, &str, &str) {
let len = s.len();
let tail_start_target = len.saturating_sub(end_bytes);
let mut prefix_end = 0usize;
let mut suffix_start = len;
let mut removed_chars = 0usize;
let mut suffix_started = false;
for (idx, ch) in s.char_indices() {
let char_end = idx + ch.len_utf8();
if char_end <= beginning_bytes {
prefix_end = char_end;
continue;
}
if idx >= tail_start_target {
if !suffix_started {
suffix_start = idx;
suffix_started = true;
}
continue;
}
removed_chars = removed_chars.saturating_add(1);
}
if suffix_start < prefix_end {
suffix_start = prefix_end;
}
(removed_chars, &s[..prefix_end], &s[suffix_start..])
}

View File

@@ -41,6 +41,7 @@ codex-file-search = { workspace = true }
codex-login = { workspace = true }
codex-otel = { workspace = true }
codex-protocol = { workspace = true }
codex-rollout = { workspace = true }
codex-shell-command = { workspace = true }
codex-state = { workspace = true }
codex-tui-app-server = { workspace = true }

View File

@@ -62,7 +62,6 @@ use codex_core::config::types::WindowsSandboxModeToml;
use codex_core::config_loader::ConfigLayerStackOrdering;
use codex_core::features::FEATURES;
use codex_core::features::Feature;
use codex_core::find_thread_name_by_id;
use codex_core::git_info::current_branch_name;
use codex_core::git_info::get_git_repo_root;
use codex_core::git_info::local_git_branches;
@@ -150,6 +149,7 @@ use codex_protocol::request_permissions::RequestPermissionsEvent;
use codex_protocol::request_user_input::RequestUserInputEvent;
use codex_protocol::user_input::TextElement;
use codex_protocol::user_input::UserInput;
use codex_rollout::find_thread_name_by_id;
use codex_utils_sleep_inhibitor::SleepInhibitor;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;

View File

@@ -10,9 +10,6 @@ pub use app::ExitReason;
use codex_cloud_requirements::cloud_requirements_loader;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::INTERACTIVE_SESSION_SOURCES;
use codex_core::RolloutRecorder;
use codex_core::ThreadSortKey;
use codex_core::auth::AuthMode;
use codex_core::auth::enforce_login_restrictions;
use codex_core::check_execpolicy_for_warnings;
@@ -27,12 +24,8 @@ use codex_core::config_loader::ConfigLoadError;
use codex_core::config_loader::LoaderOverrides;
use codex_core::config_loader::format_config_error_with_source;
use codex_core::default_client::set_default_client_residency_requirement;
use codex_core::find_thread_path_by_id_str;
use codex_core::find_thread_path_by_name_str;
use codex_core::format_exec_policy_error_with_source;
use codex_core::path_utils;
use codex_core::read_session_meta_line;
use codex_core::state_db::get_state_db;
use codex_core::terminal::Multiplexer;
use codex_core::windows_sandbox::WindowsSandboxLevelExt;
use codex_protocol::ThreadId;
@@ -42,6 +35,13 @@ use codex_protocol::config_types::WindowsSandboxLevel;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::RolloutLine;
use codex_rollout::INTERACTIVE_SESSION_SOURCES;
use codex_rollout::RolloutRecorder;
use codex_rollout::ThreadSortKey;
use codex_rollout::find_thread_path_by_id_str;
use codex_rollout::find_thread_path_by_name_str;
use codex_rollout::read_session_meta_line;
use codex_rollout::state_db::get_state_db;
use codex_state::log_db;
use codex_utils_absolute_path::AbsolutePathBuf;
use codex_utils_oss::ensure_oss_provider_ready;
@@ -533,7 +533,7 @@ pub async fn run_main(
let otel_tracing_layer = otel.as_ref().and_then(|o| o.tracing_layer());
let log_db_layer = codex_core::state_db::get_state_db(&config)
let log_db_layer = get_state_db(&config)
.await
.map(|db| log_db::start(db).with_filter(env_filter()));

View File

@@ -12,16 +12,16 @@ use crate::tui::Tui;
use crate::tui::TuiEvent;
use chrono::DateTime;
use chrono::Utc;
use codex_core::Cursor;
use codex_core::INTERACTIVE_SESSION_SOURCES;
use codex_core::RolloutRecorder;
use codex_core::ThreadItem;
use codex_core::ThreadSortKey;
use codex_core::ThreadsPage;
use codex_core::config::Config;
use codex_core::find_thread_names_by_ids;
use codex_core::path_utils;
use codex_protocol::ThreadId;
use codex_rollout::Cursor;
use codex_rollout::INTERACTIVE_SESSION_SOURCES;
use codex_rollout::RolloutRecorder;
use codex_rollout::ThreadItem;
use codex_rollout::ThreadSortKey;
use codex_rollout::ThreadsPage;
use codex_rollout::find_thread_names_by_ids;
use color_eyre::eyre::Result;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;

View File

@@ -46,6 +46,7 @@ codex-file-search = { workspace = true }
codex-login = { workspace = true }
codex-otel = { workspace = true }
codex-protocol = { workspace = true }
codex-rollout = { workspace = true }
codex-shell-command = { workspace = true }
codex-state = { workspace = true }
codex-utils-approval-presets = { workspace = true }

View File

@@ -63,7 +63,6 @@ use codex_core::config::types::WindowsSandboxModeToml;
use codex_core::config_loader::ConfigLayerStackOrdering;
use codex_core::features::FEATURES;
use codex_core::features::Feature;
use codex_core::find_thread_name_by_id;
use codex_core::git_info::current_branch_name;
use codex_core::git_info::get_git_repo_root;
use codex_core::git_info::local_git_branches;
@@ -148,6 +147,7 @@ use codex_protocol::request_permissions::RequestPermissionsEvent;
use codex_protocol::request_user_input::RequestUserInputEvent;
use codex_protocol::user_input::TextElement;
use codex_protocol::user_input::UserInput;
use codex_rollout::find_thread_name_by_id;
use codex_utils_sleep_inhibitor::SleepInhibitor;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;

View File

@@ -36,8 +36,6 @@ use codex_core::config_loader::format_config_error_with_source;
use codex_core::default_client::set_default_client_residency_requirement;
use codex_core::format_exec_policy_error_with_source;
use codex_core::path_utils;
use codex_core::read_session_meta_line;
use codex_core::state_db::get_state_db;
use codex_core::terminal::Multiplexer;
use codex_core::windows_sandbox::WindowsSandboxLevelExt;
use codex_protocol::ThreadId;
@@ -47,6 +45,8 @@ use codex_protocol::config_types::WindowsSandboxLevel;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::RolloutLine;
use codex_rollout::read_session_meta_line;
use codex_rollout::state_db::get_state_db;
use codex_state::log_db;
use codex_utils_absolute_path::AbsolutePathBuf;
use codex_utils_oss::ensure_oss_provider_ready;
@@ -857,7 +857,7 @@ pub async fn run_main(
let otel_tracing_layer = otel.as_ref().and_then(|o| o.tracing_layer());
let log_db_layer = codex_core::state_db::get_state_db(&config)
let log_db_layer = get_state_db(&config)
.await
.map(|db| log_db::start(db).with_filter(env_filter()));

View File

@@ -17,16 +17,16 @@ use codex_app_server_protocol::Thread;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadSortKey as AppServerThreadSortKey;
use codex_app_server_protocol::ThreadSourceKind;
use codex_core::Cursor;
use codex_core::INTERACTIVE_SESSION_SOURCES;
use codex_core::RolloutRecorder;
use codex_core::ThreadItem;
use codex_core::ThreadSortKey;
use codex_core::ThreadsPage;
use codex_core::config::Config;
use codex_core::find_thread_names_by_ids;
use codex_core::path_utils;
use codex_protocol::ThreadId;
use codex_rollout::Cursor;
use codex_rollout::INTERACTIVE_SESSION_SOURCES;
use codex_rollout::RolloutRecorder;
use codex_rollout::ThreadItem;
use codex_rollout::ThreadSortKey;
use codex_rollout::ThreadsPage;
use codex_rollout::find_thread_names_by_ids;
use color_eyre::eyre::Result;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;