Compare commits

...

14 Commits

Author SHA1 Message Date
Charles Cunningham
4de870eebc Skip cwd prompt for shared forks 2026-01-29 15:23:16 -08:00
Charles Cunningham
33a313899d Fix remote fork cwd + enforce shared ownership 2026-01-29 15:23:16 -08:00
Charles Cunningham
32dbd27fe5 lint 2026-01-29 15:23:16 -08:00
Charles Cunningham
36b60fb613 lint 2026-01-29 15:23:16 -08:00
Charles Cunningham
300460675c Check metadata even when the rollout blob is missing, and enforce ownership in that case 2026-01-29 15:23:15 -08:00
Charles Cunningham
bf35720298 Prefer current cwd if remote downloaded 2026-01-29 15:23:15 -08:00
Charles Cunningham
0c9260770b Preserve query params when building HTTP object URLs 2026-01-29 15:23:15 -08:00
Charles Cunningham
e5b3750be2 Fix /share user input handling after rebase 2026-01-29 15:23:15 -08:00
Charles Cunningham
365864fe95 Fix cargo deny 2026-01-29 15:23:15 -08:00
Charles Cunningham
9b4ab55274 Remove AWS support for now to make CI happy 2026-01-29 15:23:15 -08:00
Charles Cunningham
5f19e0d9ba Lint and fix 2026-01-29 15:23:15 -08:00
Charles Cunningham
45c1118890 Fork remotely stored session 2026-01-29 15:23:15 -08:00
Charles Cunningham
af04bd2324 Lint 2026-01-29 15:23:15 -08:00
Charles Cunningham
d7d08ec4a2 Enterprise session sharing via object storage (/share) 2026-01-29 15:23:15 -08:00
15 changed files with 2244 additions and 905 deletions

2061
codex-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -120,6 +120,8 @@ async-channel = "2.3.1"
async-stream = "0.3.6"
async-trait = "0.1.89"
axum = { version = "0.8", default-features = false }
azure_core = "0.21"
azure_identity = "0.21"
base64 = "0.22.1"
bytes = "1.10.1"
chardetng = "0.1.17"

View File

@@ -21,6 +21,8 @@ anyhow = { workspace = true }
arc-swap = "1.8.0"
async-channel = { workspace = true }
async-trait = { workspace = true }
azure_core = { workspace = true }
azure_identity = { workspace = true }
base64 = { workspace = true }
chardetng = { workspace = true }
chrono = { workspace = true, features = ["serde"] }

View File

@@ -1488,6 +1488,10 @@
],
"description": "Sandbox configuration to apply if `sandbox` is `WorkspaceWrite`."
},
"session_object_storage_url": {
"description": "Base URL for the object store used by /share (enterprise/self-hosted).",
"type": "string"
},
"shell_environment_policy": {
"allOf": [
{

View File

@@ -299,6 +299,9 @@ pub struct Config {
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
pub chatgpt_base_url: String,
/// Optional base URL for storing shared session rollouts in an object store.
pub session_object_storage_url: Option<String>,
/// When set, restricts ChatGPT login to a specific workspace identifier.
pub forced_chatgpt_workspace_id: Option<String>,
@@ -841,6 +844,9 @@ pub struct ConfigToml {
/// When unset, Codex will bind to an ephemeral port chosen by the OS.
pub mcp_oauth_callback_port: Option<u16>,
/// Base URL for the object store used by /share (enterprise/self-hosted).
pub session_object_storage_url: Option<String>,
/// User-defined provider entries that extend/override the built-in list.
#[serde(default)]
pub model_providers: HashMap<String, ModelProviderInfo>,
@@ -1445,6 +1451,16 @@ impl Config {
}
});
let session_object_storage_url =
cfg.session_object_storage_url.as_ref().and_then(|value| {
let trimmed = value.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_string())
}
});
let forced_login_method = cfg.forced_login_method;
let model = model.or(config_profile.model).or(cfg.model);
@@ -1574,6 +1590,7 @@ impl Config {
.chatgpt_base_url
.or(cfg.chatgpt_base_url)
.unwrap_or("https://chatgpt.com/backend-api/".to_string()),
session_object_storage_url,
forced_chatgpt_workspace_id,
forced_login_method,
include_apply_patch_tool: include_apply_patch_tool_flag,
@@ -3777,6 +3794,7 @@ model_verbosity = "high"
model_verbosity: None,
model_personality: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
session_object_storage_url: None,
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
@@ -3861,6 +3879,7 @@ model_verbosity = "high"
model_verbosity: None,
model_personality: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
session_object_storage_url: None,
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
@@ -3960,6 +3979,7 @@ model_verbosity = "high"
model_verbosity: None,
model_personality: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
session_object_storage_url: None,
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
@@ -4045,6 +4065,7 @@ model_verbosity = "high"
model_verbosity: Some(Verbosity::High),
model_personality: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
session_object_storage_url: None,
base_instructions: None,
developer_instructions: None,
compact_prompt: None,

View File

@@ -51,6 +51,7 @@ pub mod path_utils;
pub mod powershell;
pub mod sandboxing;
mod session_prefix;
pub mod session_share;
mod stream_events_utils;
mod text_encoding;
pub mod token_data;

View File

@@ -0,0 +1,633 @@
use std::ffi::OsStr;
use std::ffi::OsString;
use std::io;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use anyhow::Context;
use azure_core::auth::TokenCredential;
use azure_identity::AzureCliCredential;
use codex_protocol::ThreadId;
use reqwest::StatusCode;
use reqwest::Url;
use serde::Deserialize;
use serde::Serialize;
use time::OffsetDateTime;
use time::format_description::FormatItem;
use time::macros::format_description;
const SHARE_OBJECT_PREFIX: &str = "sessions";
const SHARE_OBJECT_SUFFIX: &str = ".jsonl";
const SHARE_META_SUFFIX: &str = ".meta.json";
const AZURE_STORAGE_SCOPE: &str = "https://storage.azure.com/.default";
#[derive(Debug, Clone)]
pub struct SessionShareResult {
pub remote_id: ThreadId,
pub object_url: Url,
}
#[derive(Debug, Clone)]
enum SessionObjectStore {
Http(HttpObjectStore),
Azure(AzureObjectStore),
}
#[derive(Debug, Clone)]
struct HttpObjectStore {
base_url: Url,
client: reqwest::Client,
}
#[derive(Debug, Clone)]
struct AzureObjectStore {
endpoint: Url,
container: String,
prefix: String,
sas_query: Option<String>,
client: reqwest::Client,
credential: Option<Arc<dyn TokenCredential>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct SessionShareMeta {
owner: String,
created_at: i64,
updated_at: i64,
}
impl SessionObjectStore {
pub async fn new(base_url: &str) -> anyhow::Result<Self> {
let mut url = Url::parse(base_url)
.with_context(|| format!("invalid session_object_storage_url: {base_url}"))?;
match url.scheme() {
"az" => Ok(SessionObjectStore::Azure(AzureObjectStore::new_from_az(
&url,
)?)),
"http" | "https" => {
if is_azure_blob_url(&url) {
Ok(SessionObjectStore::Azure(AzureObjectStore::new(&url)?))
} else {
ensure_trailing_slash(&mut url);
Ok(SessionObjectStore::Http(HttpObjectStore {
base_url: url,
client: reqwest::Client::new(),
}))
}
}
other => Err(anyhow::anyhow!(
"unsupported session_object_storage_url scheme {other}"
)),
}
}
fn object_url(&self, key: &str) -> anyhow::Result<Url> {
match self {
SessionObjectStore::Http(store) => store.object_url(key),
SessionObjectStore::Azure(store) => store.object_url(key),
}
}
async fn object_exists(&self, key: &str) -> anyhow::Result<bool> {
match self {
SessionObjectStore::Http(store) => store.object_exists(key).await,
SessionObjectStore::Azure(store) => store.object_exists(key).await,
}
}
async fn put_object(&self, key: &str, data: Vec<u8>, content_type: &str) -> anyhow::Result<()> {
match self {
SessionObjectStore::Http(store) => store.put_object(key, data, content_type).await,
SessionObjectStore::Azure(store) => store.put_object(key, data, content_type).await,
}
}
async fn get_object_bytes(&self, key: &str) -> anyhow::Result<Option<Vec<u8>>> {
match self {
SessionObjectStore::Http(store) => store.get_object_bytes(key).await,
SessionObjectStore::Azure(store) => store.get_object_bytes(key).await,
}
}
}
pub async fn upload_rollout_with_owner(
base_url: &str,
session_id: ThreadId,
owner: &str,
rollout_path: &Path,
) -> anyhow::Result<SessionShareResult> {
let data = tokio::fs::read(rollout_path)
.await
.with_context(|| format!("failed to read rollout at {}", rollout_path.display()))?;
let store = SessionObjectStore::new(base_url).await?;
let key = object_key(session_id);
let meta_key = meta_key(session_id);
let rollout_exists = store.object_exists(&key).await?;
let now = OffsetDateTime::now_utc().unix_timestamp();
let meta = fetch_meta(&store, &meta_key).await?;
match (rollout_exists, meta) {
(true, Some(meta)) => {
if meta.owner != owner {
return Err(anyhow::anyhow!(
"remote session already exists and belongs to another user"
));
}
store
.put_object(&key, data, "application/x-ndjson")
.await
.with_context(|| format!("failed to upload rollout for id {session_id}"))?;
let updated = SessionShareMeta {
owner: meta.owner,
created_at: meta.created_at,
updated_at: now,
};
upload_meta(&store, &meta_key, &updated).await?;
}
(true, None) => {
// Recover from a previous metadata upload failure by restoring metadata
// and overwriting the rollout blob.
let meta = SessionShareMeta {
owner: owner.to_string(),
created_at: now,
updated_at: now,
};
upload_meta(&store, &meta_key, &meta).await?;
store
.put_object(&key, data, "application/x-ndjson")
.await
.with_context(|| format!("failed to upload rollout for id {session_id}"))?;
}
(false, Some(meta)) => {
if meta.owner != owner {
return Err(anyhow::anyhow!(
"remote session metadata already exists and belongs to another user"
));
}
store
.put_object(&key, data, "application/x-ndjson")
.await
.with_context(|| format!("failed to upload rollout for id {session_id}"))?;
let updated = SessionShareMeta {
owner: meta.owner,
created_at: meta.created_at,
updated_at: now,
};
upload_meta(&store, &meta_key, &updated).await?;
}
(false, None) => {
let meta = SessionShareMeta {
owner: owner.to_string(),
created_at: now,
updated_at: now,
};
upload_meta(&store, &meta_key, &meta).await?;
store
.put_object(&key, data, "application/x-ndjson")
.await
.with_context(|| format!("failed to upload rollout for id {session_id}"))?;
}
}
let object_url = store.object_url(&key)?;
Ok(SessionShareResult {
remote_id: session_id,
object_url,
})
}
pub async fn download_rollout_if_available(
base_url: &str,
session_id: ThreadId,
codex_home: &Path,
) -> anyhow::Result<Option<PathBuf>> {
let store = SessionObjectStore::new(base_url).await?;
let key = object_key(session_id);
let meta_key = meta_key(session_id);
let Some(data) = store.get_object_bytes(&key).await? else {
return Ok(None);
};
let path = build_rollout_download_path(codex_home, session_id)?;
let parent = path
.parent()
.ok_or_else(|| anyhow::anyhow!("failed to resolve rollout directory"))?;
tokio::fs::create_dir_all(parent)
.await
.with_context(|| format!("failed to create rollout directory {}", parent.display()))?;
tokio::fs::write(&path, data)
.await
.with_context(|| format!("failed to write rollout file {}", path.display()))?;
let meta_path = share_meta_path_for_rollout_path(&path);
match fetch_meta(&store, &meta_key).await? {
Some(meta) => {
let payload =
serde_json::to_vec(&meta).with_context(|| "failed to serialize metadata")?;
tokio::fs::write(&meta_path, payload)
.await
.with_context(|| {
format!("failed to write share metadata {}", meta_path.display())
})?;
}
None => {
let _ = tokio::fs::remove_file(&meta_path).await;
}
}
Ok(Some(path))
}
fn object_key(id: ThreadId) -> String {
format!("{SHARE_OBJECT_PREFIX}/{id}{SHARE_OBJECT_SUFFIX}")
}
fn meta_key(id: ThreadId) -> String {
format!("{SHARE_OBJECT_PREFIX}/{id}{SHARE_META_SUFFIX}")
}
pub fn local_share_owner(rollout_path: &Path) -> anyhow::Result<Option<String>> {
let meta_path = share_meta_path_for_rollout_path(rollout_path);
let bytes = match std::fs::read(&meta_path) {
Ok(bytes) => bytes,
Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(err) => {
return Err(err)
.with_context(|| format!("failed to read share metadata {}", meta_path.display()));
}
};
let meta: SessionShareMeta =
serde_json::from_slice(&bytes).with_context(|| "failed to parse session share metadata")?;
Ok(Some(meta.owner))
}
async fn fetch_meta(
store: &SessionObjectStore,
key: &str,
) -> anyhow::Result<Option<SessionShareMeta>> {
let Some(bytes) = store.get_object_bytes(key).await? else {
return Ok(None);
};
let meta: SessionShareMeta =
serde_json::from_slice(&bytes).with_context(|| "failed to parse session share metadata")?;
Ok(Some(meta))
}
async fn upload_meta(
store: &SessionObjectStore,
key: &str,
meta: &SessionShareMeta,
) -> anyhow::Result<()> {
let payload = serde_json::to_vec(meta).with_context(|| "failed to serialize metadata")?;
store.put_object(key, payload, "application/json").await?;
Ok(())
}
fn build_rollout_download_path(codex_home: &Path, session_id: ThreadId) -> anyhow::Result<PathBuf> {
let timestamp = OffsetDateTime::now_local()
.map_err(|e| anyhow::anyhow!("failed to get local time: {e}"))?;
let format: &[FormatItem] =
format_description!("[year]-[month]-[day]T[hour]-[minute]-[second]");
let date_str = timestamp
.format(format)
.map_err(|e| anyhow::anyhow!("failed to format timestamp: {e}"))?;
let mut dir = codex_home.to_path_buf();
dir.push(crate::rollout::SESSIONS_SUBDIR);
dir.push(timestamp.year().to_string());
dir.push(format!("{:02}", u8::from(timestamp.month())));
dir.push(format!("{:02}", timestamp.day()));
let filename = format!("rollout-{date_str}-{session_id}.jsonl");
Ok(dir.join(filename))
}
fn share_meta_path_for_rollout_path(path: &Path) -> PathBuf {
let file_name = path.file_name().unwrap_or_else(|| OsStr::new("session"));
let mut name = OsString::from(file_name);
name.push(".share-meta.json");
path.with_file_name(name)
}
impl HttpObjectStore {
fn object_url(&self, key: &str) -> anyhow::Result<Url> {
let mut base = self.base_url.clone();
let query = base.query().map(str::to_string);
base.set_query(None);
let mut joined = base
.join(key)
.with_context(|| format!("failed to build object URL for key {key}"))?;
if let Some(query) = query {
joined.set_query(Some(&query));
}
Ok(joined)
}
async fn object_exists(&self, key: &str) -> anyhow::Result<bool> {
let url = self.object_url(key)?;
let response = self.client.head(url).send().await?;
match response.status() {
StatusCode::NOT_FOUND => Ok(false),
StatusCode::METHOD_NOT_ALLOWED | StatusCode::NOT_IMPLEMENTED => {
self.object_exists_via_get(key).await
}
status if status.is_success() => Ok(true),
status => Err(anyhow::anyhow!(
"object store HEAD failed with status {status}"
)),
}
}
async fn object_exists_via_get(&self, key: &str) -> anyhow::Result<bool> {
let url = self.object_url(key)?;
let response = self
.client
.get(url)
.header(reqwest::header::RANGE, "bytes=0-0")
.send()
.await?;
match response.status() {
StatusCode::NOT_FOUND => Ok(false),
StatusCode::PARTIAL_CONTENT | StatusCode::OK => Ok(true),
status => Err(anyhow::anyhow!(
"object store GET probe failed with status {status}"
)),
}
}
async fn put_object(&self, key: &str, data: Vec<u8>, content_type: &str) -> anyhow::Result<()> {
let url = self.object_url(key)?;
let response = self
.client
.put(url)
.header(reqwest::header::CONTENT_TYPE, content_type)
.body(data)
.send()
.await?;
if response.status().is_success() {
Ok(())
} else {
Err(anyhow::anyhow!(
"object store PUT failed with status {}",
response.status()
))
}
}
async fn get_object_bytes(&self, key: &str) -> anyhow::Result<Option<Vec<u8>>> {
let url = self.object_url(key)?;
let response = self.client.get(url).send().await?;
match response.status() {
StatusCode::NOT_FOUND => Ok(None),
status if status.is_success() => {
let bytes = response.bytes().await?;
Ok(Some(bytes.to_vec()))
}
status => Err(anyhow::anyhow!(
"object store GET failed with status {status}"
)),
}
}
}
impl AzureObjectStore {
fn new(url: &Url) -> anyhow::Result<Self> {
let endpoint = azure_endpoint(url)?;
let (container, prefix) = azure_container_and_prefix(url)?;
let sas_query = url.query().map(str::to_string);
let credential = if sas_query.is_some() {
None
} else {
let credential: Arc<dyn TokenCredential> = Arc::new(AzureCliCredential::new());
Some(credential)
};
Ok(Self {
endpoint,
container,
prefix,
sas_query,
client: reqwest::Client::new(),
credential,
})
}
fn new_from_az(url: &Url) -> anyhow::Result<Self> {
let account = url
.host_str()
.ok_or_else(|| anyhow::anyhow!("az url missing account name"))?;
let endpoint = azure_endpoint_for_account(account)?;
let (container, prefix) = azure_container_and_prefix(url)?;
let sas_query = url.query().map(str::to_string);
let credential = if sas_query.is_some() {
None
} else {
let credential: Arc<dyn TokenCredential> = Arc::new(AzureCliCredential::new());
Some(credential)
};
Ok(Self {
endpoint,
container,
prefix,
sas_query,
client: reqwest::Client::new(),
credential,
})
}
fn object_url(&self, key: &str) -> anyhow::Result<Url> {
let full_key = join_prefix(&self.prefix, key);
let mut url = self.endpoint.clone();
if full_key.is_empty() {
url.set_path(&format!("/{}", self.container));
} else {
url.set_path(&format!("/{}/{}", self.container, full_key));
}
if let Some(query) = &self.sas_query {
url.set_query(Some(query));
}
Ok(url)
}
async fn object_exists(&self, key: &str) -> anyhow::Result<bool> {
let url = self.object_url(key)?;
let response = self
.authorized_request(self.client.head(url))
.await?
.send()
.await?;
match response.status() {
StatusCode::NOT_FOUND => Ok(false),
status if status.is_success() => Ok(true),
status => Err(anyhow::anyhow!(
"azure blob HEAD failed with status {status}{}",
azure_response_context(response.headers())
)),
}
}
async fn put_object(&self, key: &str, data: Vec<u8>, content_type: &str) -> anyhow::Result<()> {
let url = self.object_url(key)?;
let response = self
.authorized_request(
self.client
.put(url)
.header("x-ms-blob-type", "BlockBlob")
.header(reqwest::header::CONTENT_TYPE, content_type)
.body(data),
)
.await?
.send()
.await?;
if response.status().is_success() {
Ok(())
} else {
let status = response.status();
let headers = azure_response_context(response.headers());
let body = response.text().await.unwrap_or_default();
let body_snippet = azure_response_body_snippet(&body);
Err(anyhow::anyhow!(
"azure blob PUT failed with status {status}{headers}{body_snippet}"
))
}
}
async fn get_object_bytes(&self, key: &str) -> anyhow::Result<Option<Vec<u8>>> {
let url = self.object_url(key)?;
let response = self
.authorized_request(self.client.get(url))
.await?
.send()
.await?;
match response.status() {
StatusCode::NOT_FOUND => Ok(None),
status if status.is_success() => {
let bytes = response.bytes().await?;
Ok(Some(bytes.to_vec()))
}
status => Err(anyhow::anyhow!(
"azure blob GET failed with status {status}{}",
azure_response_context(response.headers())
)),
}
}
}
fn ensure_trailing_slash(url: &mut Url) {
let path = url.path();
if path.ends_with('/') {
return;
}
let trimmed = path.trim_end_matches('/');
let new_path = if trimmed.is_empty() {
"/".to_string()
} else {
format!("{trimmed}/")
};
url.set_path(&new_path);
}
fn join_prefix(prefix: &str, key: &str) -> String {
if prefix.is_empty() {
key.to_string()
} else {
format!("{prefix}/{key}")
}
}
fn is_azure_blob_url(url: &Url) -> bool {
let Some(host) = url.host_str() else {
return false;
};
host.ends_with(".blob.core.windows.net")
}
fn azure_endpoint(url: &Url) -> anyhow::Result<Url> {
let mut endpoint = url.clone();
endpoint.set_path("/");
endpoint.set_query(None);
endpoint.set_fragment(None);
Ok(endpoint)
}
fn azure_container_and_prefix(url: &Url) -> anyhow::Result<(String, String)> {
let segments = url
.path_segments()
.map(|iter| {
iter.filter(|segment| !segment.is_empty())
.collect::<Vec<_>>()
})
.unwrap_or_default();
azure_container_and_prefix_from_segments(&segments)
}
fn azure_container_and_prefix_from_segments(segments: &[&str]) -> anyhow::Result<(String, String)> {
if segments.is_empty() {
return Err(anyhow::anyhow!(
"azure blob url must include a container name"
));
}
let container = segments[0].to_string();
let prefix = segments[1..].join("/");
Ok((container, prefix))
}
fn azure_request(builder: reqwest::RequestBuilder) -> reqwest::RequestBuilder {
builder.header("x-ms-version", "2021-08-06")
}
fn azure_endpoint_for_account(account: &str) -> anyhow::Result<Url> {
let endpoint = format!("https://{account}.blob.core.windows.net/");
Url::parse(&endpoint).with_context(|| "failed to build azure blob endpoint")
}
impl AzureObjectStore {
async fn authorized_request(
&self,
builder: reqwest::RequestBuilder,
) -> anyhow::Result<reqwest::RequestBuilder> {
let builder = azure_request(builder);
let Some(credential) = &self.credential else {
return Ok(builder);
};
let token = credential
.get_token(&[AZURE_STORAGE_SCOPE])
.await
.with_context(|| "failed to acquire azure blob access token")?;
Ok(builder.bearer_auth(token.token.secret()))
}
}
fn azure_response_context(headers: &reqwest::header::HeaderMap) -> String {
let mut parts = Vec::new();
if let Some(value) = azure_header_value(headers, "x-ms-error-code") {
parts.push(format!("x-ms-error-code={value}"));
}
if let Some(value) = azure_header_value(headers, "x-ms-request-id") {
parts.push(format!("x-ms-request-id={value}"));
}
if let Some(value) = azure_header_value(headers, "www-authenticate") {
parts.push(format!("www-authenticate={value}"));
}
if parts.is_empty() {
String::new()
} else {
format!(" ({})", parts.join(", "))
}
}
fn azure_header_value(headers: &reqwest::header::HeaderMap, name: &str) -> Option<String> {
headers
.get(name)
.and_then(|value| value.to_str().ok())
.map(ToString::to_string)
}
fn azure_response_body_snippet(body: &str) -> String {
let trimmed = body.trim();
if trimmed.is_empty() {
return String::new();
}
let snippet = if trimmed.len() <= 512 {
trimmed.to_string()
} else {
let truncated: String = trimmed.chars().take(512).collect();
format!("{truncated}...")
};
format!(" (body={snippet})")
}

View File

@@ -74,6 +74,7 @@ ignore = [
{ id = "RUSTSEC-2025-0057", reason = "fxhash is unmaintained; pulled in via starlark_map/starlark v0.13.0 used by execpolicy/cli/core; no fixed release yet" },
{ id = "RUSTSEC-2024-0436", reason = "paste is unmaintained; pulled in via ratatui/rmcp/starlark used by tui/execpolicy; no fixed release yet" },
{ id = "RUSTSEC-2025-0134", reason = "rustls-pemfile is unmaintained; pulled in via rama-tls-rustls used by codex-network-proxy; no safe upgrade until rama removes the dependency" },
{ id = "RUSTSEC-2024-0384", reason = "instant is unmaintained; pulled in via http-types -> futures-lite used by Azure auth; no safe upgrade available yet" },
# TODO(joshka, nornagon): remove this exception when once we update the ratatui fork to a version that uses lru 0.13+.
{ id = "RUSTSEC-2026-0002", reason = "lru 0.12.5 is pulled in via ratatui fork; cannot upgrade until the fork is updated" },
]

View File

@@ -1300,6 +1300,7 @@ impl App {
&path,
CwdPromptAction::Resume,
true,
false,
)
.await?
{
@@ -1490,7 +1491,16 @@ impl App {
return Ok(AppRunControl::Exit(ExitReason::Fatal(message)));
}
AppEvent::CodexOp(op) => {
self.chat_widget.submit_op(op);
if let Op::UserInputAnswer { id, response } = op {
if crate::chatwidget::ChatWidget::is_share_request_id(&id) {
self.chat_widget.handle_share_response(response);
return Ok(AppRunControl::Continue);
}
self.chat_widget
.submit_op(Op::UserInputAnswer { id, response });
} else {
self.chat_widget.submit_op(op);
}
}
AppEvent::DiffResult(text) => {
// Clear the in-progress state in the bottom pane

View File

@@ -105,7 +105,11 @@ use codex_protocol::config_types::Settings;
use codex_protocol::config_types::WindowsSandboxLevel;
use codex_protocol::models::local_image_label_text;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::request_user_input::RequestUserInputAnswer;
use codex_protocol::request_user_input::RequestUserInputEvent;
use codex_protocol::request_user_input::RequestUserInputQuestion;
use codex_protocol::request_user_input::RequestUserInputQuestionOption;
use codex_protocol::request_user_input::RequestUserInputResponse;
use codex_protocol::user_input::TextElement;
use codex_protocol::user_input::UserInput;
use crossterm::event::KeyCode;
@@ -131,6 +135,11 @@ const PLAN_IMPLEMENTATION_TITLE: &str = "Implement this plan?";
const PLAN_IMPLEMENTATION_YES: &str = "Yes, implement this plan";
const PLAN_IMPLEMENTATION_NO: &str = "No, stay in Plan mode";
const PLAN_IMPLEMENTATION_CODING_MESSAGE: &str = "Implement the plan.";
const SHARE_REQUEST_PREFIX: &str = "share:";
const SHARE_SCOPE_QUESTION_ID: &str = "share_scope";
const SHARE_EMAILS_QUESTION_ID: &str = "share_emails";
const SHARE_SCOPE_EVERYONE_LABEL: &str = "Everyone using the same blob store";
const SHARE_SCOPE_EMAIL_LABEL: &str = "Specific emails";
use crate::app_event::AppEvent;
use crate::app_event::ConnectorsSnapshot;
@@ -368,6 +377,95 @@ pub(crate) fn get_limits_duration(windows_minutes: i64) -> String {
}
}
#[derive(Debug, Clone, Copy)]
enum ShareScope {
Everyone,
Emails,
}
impl ShareScope {
fn label(self) -> &'static str {
match self {
ShareScope::Everyone => SHARE_SCOPE_EVERYONE_LABEL,
ShareScope::Emails => SHARE_SCOPE_EMAIL_LABEL,
}
}
}
fn parse_share_scope(response: &RequestUserInputResponse) -> ShareScope {
response
.answers
.get(SHARE_SCOPE_QUESTION_ID)
.and_then(selected_label)
.map(|label| {
if label == SHARE_SCOPE_EMAIL_LABEL {
ShareScope::Emails
} else {
ShareScope::Everyone
}
})
.unwrap_or(ShareScope::Everyone)
}
fn parse_share_emails(response: &RequestUserInputResponse) -> Vec<String> {
let note = response
.answers
.get(SHARE_EMAILS_QUESTION_ID)
.and_then(extract_user_note);
let Some(note) = note else {
return Vec::new();
};
note.split(',')
.map(str::trim)
.filter(|entry| !entry.is_empty())
.map(ToString::to_string)
.collect()
}
fn selected_label(answer: &RequestUserInputAnswer) -> Option<String> {
answer
.answers
.iter()
.find(|entry| !entry.starts_with("user_note: "))
.cloned()
}
fn extract_user_note(answer: &RequestUserInputAnswer) -> Option<String> {
answer
.answers
.iter()
.find_map(|entry| entry.strip_prefix("user_note: ").map(ToString::to_string))
}
fn share_success_lines(
remote_id: ThreadId,
scope: ShareScope,
emails: &[String],
) -> Vec<Line<'static>> {
let mut lines: Vec<Line<'static>> = Vec::new();
lines.push(vec!["".dim(), "Session shared.".into()].into());
lines.push(vec![" ".into(), "Access: ".dim(), scope.label().into()].into());
if matches!(scope, ShareScope::Emails) {
let email_list = if emails.is_empty() {
"(none provided)".to_string()
} else {
emails.join(", ")
};
lines.push(vec![" ".into(), "Emails: ".dim(), email_list.into()].into());
}
let fork_command = format!("codex fork {remote_id}");
lines.push(vec![" ".into(), "Fork with: ".dim(), fork_command.cyan()].into());
lines.push(
vec![
" ".into(),
"Remote session id: ".dim(),
remote_id.to_string().cyan(),
]
.into(),
);
lines
}
/// Common initialization parameters shared by all `ChatWidget` constructors.
pub(crate) struct ChatWidgetInit {
pub(crate) config: Config,
@@ -2643,6 +2741,9 @@ impl ChatWidget {
SlashCommand::Fork => {
self.app_event_tx.send(AppEvent::ForkCurrentSession);
}
SlashCommand::Share => {
self.start_share_flow();
}
SlashCommand::Init => {
let init_target = self.config.cwd.join(DEFAULT_PROJECT_DOC_FILENAME);
if init_target.exists() {
@@ -2864,6 +2965,117 @@ impl ChatWidget {
}
}
fn start_share_flow(&mut self) {
let Some(thread_id) = self.thread_id else {
self.add_error_message("Current session is not ready to share yet.".to_string());
return;
};
if self.rollout_path().is_none() {
self.add_error_message("Current session is not ready to share yet.".to_string());
return;
}
if self.config.session_object_storage_url.is_none() {
self.add_error_message(
"Sharing requires `session_object_storage_url` in config.toml.".to_string(),
);
return;
}
let request_id = format!("{SHARE_REQUEST_PREFIX}{thread_id}");
let questions = vec![
RequestUserInputQuestion {
id: SHARE_SCOPE_QUESTION_ID.to_string(),
header: "Share".to_string(),
question: "Who should be able to open this shared session?".to_string(),
is_other: false,
options: Some(vec![
RequestUserInputQuestionOption {
label: SHARE_SCOPE_EVERYONE_LABEL.to_string(),
description: "Anyone with access to the same object store.".to_string(),
},
RequestUserInputQuestionOption {
label: SHARE_SCOPE_EMAIL_LABEL.to_string(),
description: "Restrict to a list of emails (not enforced yet).".to_string(),
},
]),
},
RequestUserInputQuestion {
id: SHARE_EMAILS_QUESTION_ID.to_string(),
header: "Emails".to_string(),
question: "If sharing with specific emails, list them (comma-separated)."
.to_string(),
is_other: false,
options: None,
},
];
self.bottom_pane
.push_user_input_request(RequestUserInputEvent {
call_id: request_id.clone(),
turn_id: request_id,
questions,
});
self.request_redraw();
}
pub(crate) fn is_share_request_id(id: &str) -> bool {
id.starts_with(SHARE_REQUEST_PREFIX)
}
pub(crate) fn handle_share_response(&mut self, response: RequestUserInputResponse) {
let scope = parse_share_scope(&response);
let emails = parse_share_emails(&response);
// TODO: Enforce email-based access once we have a server-side auth check.
let Some(storage_url) = self.config.session_object_storage_url.clone() else {
self.add_error_message(
"Sharing requires `session_object_storage_url` in config.toml.".to_string(),
);
return;
};
let owner = self
.auth_manager
.auth_cached()
.and_then(|auth| auth.get_account_email());
let Some(owner) = owner else {
self.add_error_message(
"Sharing requires a signed-in ChatGPT account with an email.".to_string(),
);
return;
};
let Some(thread_id) = self.thread_id else {
self.add_error_message("Current session is not ready to share yet.".to_string());
return;
};
let Some(rollout_path) = self.rollout_path() else {
self.add_error_message("Current session is not ready to share yet.".to_string());
return;
};
self.add_info_message("Sharing current session…".to_string(), None);
let app_event_tx = self.app_event_tx.clone();
tokio::spawn(async move {
let result = codex_core::session_share::upload_rollout_with_owner(
&storage_url,
thread_id,
&owner,
&rollout_path,
)
.await;
let cell = match result {
Ok(result) => {
let lines = share_success_lines(result.remote_id, scope, &emails);
PlainHistoryCell::new(lines)
}
Err(err) => {
history_cell::new_error_event(format!("Failed to share session: {err}"))
}
};
app_event_tx.send(AppEvent::InsertHistoryCell(Box::new(cell)));
});
}
pub(crate) fn handle_paste(&mut self, text: String) {
self.bottom_pane.handle_paste(text);
}

View File

@@ -29,8 +29,11 @@ use codex_core::find_thread_path_by_id_str;
use codex_core::path_utils;
use codex_core::protocol::AskForApproval;
use codex_core::read_session_meta_line;
use codex_core::session_share::download_rollout_if_available;
use codex_core::session_share::local_share_owner;
use codex_core::terminal::Multiplexer;
use codex_core::windows_sandbox::WindowsSandboxLevelExt;
use codex_protocol::ThreadId;
use codex_protocol::config_types::AltScreenMode;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::WindowsSandboxLevel;
@@ -476,8 +479,8 @@ async fn run_ratatui_app(
None
}
};
let mut missing_session_exit = |id_str: &str, action: &str| {
error!("Error finding conversation path: {id_str}");
let fatal_exit = |tui: &mut Tui, message: String| {
error!("{message}");
restore();
session_log::log_session_end();
let _ = tui.terminal.clear();
@@ -485,18 +488,47 @@ async fn run_ratatui_app(
token_usage: codex_core::protocol::TokenUsage::default(),
thread_id: None,
update_action: None,
exit_reason: ExitReason::Fatal(format!(
"No saved session found with ID {id_str}. Run `codex {action}` without an ID to choose from existing sessions."
)),
exit_reason: ExitReason::Fatal(message),
})
};
let missing_session_exit = |tui: &mut Tui, id_str: &str, action: &str| {
fatal_exit(
tui,
format!(
"No saved session found with ID {id_str}. Run `codex {action}` without an ID to choose from existing sessions."
),
)
};
let use_fork = cli.fork_picker || cli.fork_last || cli.fork_session_id.is_some();
let mut remote_fork_downloaded = false;
let session_selection = if use_fork {
if let Some(id_str) = cli.fork_session_id.as_deref() {
match find_thread_path_by_id_str(&config.codex_home, id_str).await? {
Some(path) => resume_picker::SessionSelection::Fork(path),
None => return missing_session_exit(id_str, "fork"),
None => {
let Some(storage_url) = config.session_object_storage_url.as_deref() else {
return missing_session_exit(&mut tui, id_str, "fork");
};
let Ok(session_id) = ThreadId::from_string(id_str) else {
return missing_session_exit(&mut tui, id_str, "fork");
};
match download_rollout_if_available(storage_url, session_id, &config.codex_home)
.await
{
Ok(Some(path)) => {
remote_fork_downloaded = true;
resume_picker::SessionSelection::Fork(path)
}
Ok(None) => return missing_session_exit(&mut tui, id_str, "fork"),
Err(err) => {
return fatal_exit(
&mut tui,
format!("Failed to fetch remote session {id_str}: {err}"),
);
}
}
}
}
} else if cli.fork_last {
let provider_filter = vec![config.model_provider_id.clone()];
@@ -545,7 +577,7 @@ async fn run_ratatui_app(
} else if let Some(id_str) = cli.resume_session_id.as_deref() {
match find_thread_path_by_id_str(&config.codex_home, id_str).await? {
Some(path) => resume_picker::SessionSelection::Resume(path),
None => return missing_session_exit(id_str, "resume"),
None => return missing_session_exit(&mut tui, id_str, "resume"),
}
} else if cli.resume_last {
let provider_filter = vec![config.model_provider_id.clone()];
@@ -594,6 +626,43 @@ async fn run_ratatui_app(
resume_picker::SessionSelection::StartFresh
};
let current_owner = auth_manager
.auth_cached()
.and_then(|auth| auth.get_account_email());
if let resume_picker::SessionSelection::Resume(path) = &session_selection {
match local_share_owner(path) {
Ok(Some(owner)) => {
if current_owner.as_deref() != Some(owner.as_str()) {
let session_id = read_session_meta_line(path)
.await
.ok()
.map(|meta| meta.meta.id.to_string());
let (id_display, fork_hint) = match session_id {
Some(id) => (id.clone(), format!("Use `codex fork {id}` instead.")),
None => (
"this session".to_string(),
"Use `codex fork` to select it instead.".to_string(),
),
};
return fatal_exit(
&mut tui,
format!(
"Cannot resume shared session {id_display} owned by {owner}. {fork_hint}"
),
);
}
}
Ok(None) => {}
Err(err) => {
return fatal_exit(
&mut tui,
format!("Failed to read shared session metadata: {err}"),
);
}
}
}
let current_cwd = config.cwd.clone();
let allow_prompt = cli.cwd.is_none();
let action_and_path_if_resume_or_fork = match &session_selection {
@@ -603,8 +672,25 @@ async fn run_ratatui_app(
};
let fallback_cwd = match action_and_path_if_resume_or_fork {
Some((action, path)) => {
resolve_cwd_for_resume_or_fork(&mut tui, &current_cwd, path, action, allow_prompt)
.await?
let shared_owner = match action {
CwdPromptAction::Fork => local_share_owner(path).ok().flatten(),
CwdPromptAction::Resume => None,
};
let shared_by_other = shared_owner
.as_deref()
.map(|owner| current_owner.as_deref() != Some(owner))
.unwrap_or(false);
let prefer_current_cwd =
(remote_fork_downloaded || shared_by_other) && action == CwdPromptAction::Fork;
resolve_cwd_for_resume_or_fork(
&mut tui,
&current_cwd,
path,
action,
allow_prompt,
prefer_current_cwd,
)
.await?
}
None => None,
};
@@ -712,10 +798,17 @@ pub(crate) async fn resolve_cwd_for_resume_or_fork(
path: &Path,
action: CwdPromptAction,
allow_prompt: bool,
prefer_current_cwd: bool,
) -> color_eyre::Result<Option<PathBuf>> {
if prefer_current_cwd {
return Ok(Some(current_cwd.to_path_buf()));
}
let Some(history_cwd) = read_session_cwd(path).await else {
return Ok(None);
};
if action == CwdPromptAction::Fork && !history_cwd.exists() {
return Ok(Some(current_cwd.to_path_buf()));
}
if allow_prompt && cwds_differ(current_cwd, &history_cwd) {
let selection =
cwd_prompt::run_cwd_selection_prompt(tui, action, current_cwd, &history_cwd).await?;

View File

@@ -23,6 +23,7 @@ pub enum SlashCommand {
New,
Resume,
Fork,
Share,
Init,
Compact,
Collab,
@@ -54,6 +55,7 @@ impl SlashCommand {
SlashCommand::Review => "review my current changes and find issues",
SlashCommand::Resume => "resume a saved chat",
SlashCommand::Fork => "fork the current chat",
SlashCommand::Share => "share the current chat via object storage",
// SlashCommand::Undo => "ask Codex to undo a turn",
SlashCommand::Quit | SlashCommand::Exit => "exit Codex",
SlashCommand::Diff => "show git diff (including untracked files)",
@@ -89,6 +91,7 @@ impl SlashCommand {
SlashCommand::New
| SlashCommand::Resume
| SlashCommand::Fork
| SlashCommand::Share
| SlashCommand::Init
| SlashCommand::Compact
// | SlashCommand::Undo

View File

@@ -8,6 +8,6 @@ license.workspace = true
workspace = true
[dependencies]
assert_cmd = { workspace = true }
path-absolutize = { workspace = true }
runfiles = { workspace = true }
thiserror = { workspace = true }

View File

@@ -1,3 +1,6 @@
use path_absolutize::Absolutize;
use std::env;
use std::ffi;
use std::ffi::OsString;
use std::io;
use std::path::Path;
@@ -42,27 +45,13 @@ pub fn cargo_bin(name: &str) -> Result<PathBuf, CargoBinError> {
return resolve_bin_from_env(key, value);
}
}
match assert_cmd::Command::cargo_bin(name) {
Ok(cmd) => {
let mut path = PathBuf::from(cmd.get_program());
if !path.is_absolute() {
path = std::env::current_dir()
.map_err(|source| CargoBinError::CurrentDir { source })?
.join(path);
}
if path.exists() {
Ok(path)
} else {
Err(CargoBinError::ResolvedPathDoesNotExist {
key: "assert_cmd::Command::cargo_bin".to_owned(),
path,
})
}
}
match resolve_bin_from_target_dir(name) {
Ok(path) => Ok(path),
Err(err) => Err(CargoBinError::NotFound {
name: name.to_owned(),
env_keys,
fallback: format!("assert_cmd fallback failed: {err}"),
fallback: format!("target dir fallback failed: {err}"),
}),
}
}
@@ -105,6 +94,41 @@ fn resolve_bin_from_env(key: &str, value: OsString) -> Result<PathBuf, CargoBinE
})
}
fn resolve_bin_from_target_dir(name: &str) -> Result<PathBuf, CargoBinError> {
let target_dir = cargo_target_dir()?;
let filename = format!("{name}{}", env::consts::EXE_SUFFIX);
let candidate = target_dir.join(filename);
let abs = absolutize_from_buck_or_cwd(candidate.clone())?;
if abs.exists() {
Ok(abs)
} else {
Err(CargoBinError::ResolvedPathDoesNotExist {
key: "target_dir".to_owned(),
path: candidate,
})
}
}
fn cargo_target_dir() -> Result<PathBuf, CargoBinError> {
let current_exe = env::current_exe().map_err(|source| CargoBinError::CurrentExe { source })?;
let mut path = current_exe.parent().map(PathBuf::from).ok_or_else(|| {
CargoBinError::ResolvedPathDoesNotExist {
key: "current_exe".to_owned(),
path: current_exe.clone(),
}
})?;
if path.ends_with(ffi::OsStr::new("deps")) {
path.pop();
}
Ok(path)
}
fn absolutize_from_buck_or_cwd(path: PathBuf) -> Result<PathBuf, CargoBinError> {
path.absolutize()
.map(|abs| abs.into_owned())
.map_err(|source| CargoBinError::CurrentDir { source })
}
/// Macro that derives the path to a test resource at runtime, the value of
/// which depends on whether Cargo or Bazel is being used to build and run a
/// test. Note the return value may be a relative or absolute path.

View File

@@ -24,6 +24,30 @@ Codex can run a notification hook when the agent finishes a turn. See the config
- https://developers.openai.com/codex/config-reference
## Session sharing (enterprise)
To enable `/share` for enterprise or self-hosted storage, configure:
```
session_object_storage_url = "https://your-object-store.example.com/codex-sessions/"
```
You can also use Azure Blob Storage with a SAS URL, either as a standard HTTPS URL or the shorthand `az://` form:
```
session_object_storage_url = "https://<account>.blob.core.windows.net/<container>/codex-sessions?<sas>"
```
```
session_object_storage_url = "az://<account>/<container>/codex-sessions?<sas>"
```
For Azure, the SAS token must allow read and write access to blob objects under the prefix. Listing is not required.
If you omit the SAS token, Codex will try Azure CLI authentication (`az login`) and request storage scope tokens for the blob API.
For HTTP/HTTPS URLs, the endpoint should support `HEAD`/`PUT` for individual objects. Codex will upload the session rollout (`.jsonl`) under this prefix when sharing.
## JSON Schema
The generated JSON Schema for `config.toml` lives at `codex-rs/core/config.schema.json`.